intel_bufmgr_gem.c revision 2e6867f6
1/**************************************************************************
2 *
3 * Copyright � 2007 Red Hat Inc.
4 * Copyright � 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30/*
31 * Authors: Thomas Hellstr�m <thomas-at-tungstengraphics-dot-com>
32 *          Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 *	    Eric Anholt <eric@anholt.net>
34 *	    Dave Airlie <airlied@linux.ie>
35 */
36
37#ifdef HAVE_CONFIG_H
38#include "config.h"
39#endif
40
41#include <xf86drm.h>
42#include <xf86atomic.h>
43#include <fcntl.h>
44#include <stdio.h>
45#include <stdlib.h>
46#include <string.h>
47#include <unistd.h>
48#include <assert.h>
49#include <pthread.h>
50#include <stddef.h>
51#include <sys/ioctl.h>
52#include <sys/mman.h>
53#include <sys/stat.h>
54#include <sys/types.h>
55
56#include "errno.h"
57#include "libdrm_lists.h"
58#include "intel_bufmgr.h"
59#include "intel_bufmgr_priv.h"
60#include "intel_chipset.h"
61#include "string.h"
62
63#include "i915_drm.h"
64
65#define DBG(...) do {					\
66	if (bufmgr_gem->bufmgr.debug)			\
67		fprintf(stderr, __VA_ARGS__);		\
68} while (0)
69
70typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
71
72struct drm_intel_gem_bo_bucket {
73	drmMMListHead head;
74	unsigned long size;
75};
76
77/* Only cache objects up to 64MB.  Bigger than that, and the rounding of the
78 * size makes many operations fail that wouldn't otherwise.
79 */
80#define DRM_INTEL_GEM_BO_BUCKETS	14
81typedef struct _drm_intel_bufmgr_gem {
82	drm_intel_bufmgr bufmgr;
83
84	int fd;
85
86	int max_relocs;
87
88	pthread_mutex_t lock;
89
90	struct drm_i915_gem_exec_object *exec_objects;
91	struct drm_i915_gem_exec_object2 *exec2_objects;
92	drm_intel_bo **exec_bos;
93	int exec_size;
94	int exec_count;
95
96	/** Array of lists of cached gem objects of power-of-two sizes */
97	struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
98
99	uint64_t gtt_size;
100	int available_fences;
101	int pci_device;
102	int gen;
103	char bo_reuse;
104	char fenced_relocs;
105} drm_intel_bufmgr_gem;
106
107#define DRM_INTEL_RELOC_FENCE (1<<0)
108
109typedef struct _drm_intel_reloc_target_info {
110	drm_intel_bo *bo;
111	int flags;
112} drm_intel_reloc_target;
113
114struct _drm_intel_bo_gem {
115	drm_intel_bo bo;
116
117	atomic_t refcount;
118	uint32_t gem_handle;
119	const char *name;
120
121	/**
122	 * Kenel-assigned global name for this object
123	 */
124	unsigned int global_name;
125
126	/**
127	 * Index of the buffer within the validation list while preparing a
128	 * batchbuffer execution.
129	 */
130	int validate_index;
131
132	/**
133	 * Current tiling mode
134	 */
135	uint32_t tiling_mode;
136	uint32_t swizzle_mode;
137
138	time_t free_time;
139
140	/** Array passed to the DRM containing relocation information. */
141	struct drm_i915_gem_relocation_entry *relocs;
142	/**
143	 * Array of info structs corresponding to relocs[i].target_handle etc
144	 */
145	drm_intel_reloc_target *reloc_target_info;
146	/** Number of entries in relocs */
147	int reloc_count;
148	/** Mapped address for the buffer, saved across map/unmap cycles */
149	void *mem_virtual;
150	/** GTT virtual address for the buffer, saved across map/unmap cycles */
151	void *gtt_virtual;
152
153	/** BO cache list */
154	drmMMListHead head;
155
156	/**
157	 * Boolean of whether this BO and its children have been included in
158	 * the current drm_intel_bufmgr_check_aperture_space() total.
159	 */
160	char included_in_check_aperture;
161
162	/**
163	 * Boolean of whether this buffer has been used as a relocation
164	 * target and had its size accounted for, and thus can't have any
165	 * further relocations added to it.
166	 */
167	char used_as_reloc_target;
168
169	/**
170	 * Boolean of whether we have encountered an error whilst building the relocation tree.
171	 */
172	char has_error;
173
174	/**
175	 * Boolean of whether this buffer can be re-used
176	 */
177	char reusable;
178
179	/**
180	 * Size in bytes of this buffer and its relocation descendents.
181	 *
182	 * Used to avoid costly tree walking in
183	 * drm_intel_bufmgr_check_aperture in the common case.
184	 */
185	int reloc_tree_size;
186
187	/**
188	 * Number of potential fence registers required by this buffer and its
189	 * relocations.
190	 */
191	int reloc_tree_fences;
192};
193
194static unsigned int
195drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
196
197static unsigned int
198drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
199
200static int
201drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
202			    uint32_t * swizzle_mode);
203
204static int
205drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
206			    uint32_t stride);
207
208static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
209						      time_t time);
210
211static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
212
213static void drm_intel_gem_bo_free(drm_intel_bo *bo);
214
215static unsigned long
216drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
217			   uint32_t *tiling_mode)
218{
219	unsigned long min_size, max_size;
220	unsigned long i;
221
222	if (*tiling_mode == I915_TILING_NONE)
223		return size;
224
225	/* 965+ just need multiples of page size for tiling */
226	if (bufmgr_gem->gen >= 4)
227		return ROUND_UP_TO(size, 4096);
228
229	/* Older chips need powers of two, of at least 512k or 1M */
230	if (bufmgr_gem->gen == 3) {
231		min_size = 1024*1024;
232		max_size = 128*1024*1024;
233	} else {
234		min_size = 512*1024;
235		max_size = 64*1024*1024;
236	}
237
238	if (size > max_size) {
239		*tiling_mode = I915_TILING_NONE;
240		return size;
241	}
242
243	for (i = min_size; i < size; i <<= 1)
244		;
245
246	return i;
247}
248
249/*
250 * Round a given pitch up to the minimum required for X tiling on a
251 * given chip.  We use 512 as the minimum to allow for a later tiling
252 * change.
253 */
254static unsigned long
255drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
256			    unsigned long pitch, uint32_t tiling_mode)
257{
258	unsigned long tile_width;
259	unsigned long i;
260
261	/* If untiled, then just align it so that we can do rendering
262	 * to it with the 3D engine.
263	 */
264	if (tiling_mode == I915_TILING_NONE)
265		return ALIGN(pitch, 64);
266
267	if (tiling_mode == I915_TILING_X)
268		tile_width = 512;
269	else
270		tile_width = 128;
271
272	/* 965 is flexible */
273	if (bufmgr_gem->gen >= 4)
274		return ROUND_UP_TO(pitch, tile_width);
275
276	/* Pre-965 needs power of two tile width */
277	for (i = tile_width; i < pitch; i <<= 1)
278		;
279
280	return i;
281}
282
283static struct drm_intel_gem_bo_bucket *
284drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
285				 unsigned long size)
286{
287	int i;
288
289	for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
290		struct drm_intel_gem_bo_bucket *bucket =
291		    &bufmgr_gem->cache_bucket[i];
292		if (bucket->size >= size) {
293			return bucket;
294		}
295	}
296
297	return NULL;
298}
299
300static void
301drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
302{
303	int i, j;
304
305	for (i = 0; i < bufmgr_gem->exec_count; i++) {
306		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
307		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
308
309		if (bo_gem->relocs == NULL) {
310			DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
311			    bo_gem->name);
312			continue;
313		}
314
315		for (j = 0; j < bo_gem->reloc_count; j++) {
316			drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
317			drm_intel_bo_gem *target_gem =
318			    (drm_intel_bo_gem *) target_bo;
319
320			DBG("%2d: %d (%s)@0x%08llx -> "
321			    "%d (%s)@0x%08lx + 0x%08x\n",
322			    i,
323			    bo_gem->gem_handle, bo_gem->name,
324			    (unsigned long long)bo_gem->relocs[j].offset,
325			    target_gem->gem_handle,
326			    target_gem->name,
327			    target_bo->offset,
328			    bo_gem->relocs[j].delta);
329		}
330	}
331}
332
333static inline void
334drm_intel_gem_bo_reference(drm_intel_bo *bo)
335{
336	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
337
338	assert(atomic_read(&bo_gem->refcount) > 0);
339	atomic_inc(&bo_gem->refcount);
340}
341
342/**
343 * Adds the given buffer to the list of buffers to be validated (moved into the
344 * appropriate memory type) with the next batch submission.
345 *
346 * If a buffer is validated multiple times in a batch submission, it ends up
347 * with the intersection of the memory type flags and the union of the
348 * access flags.
349 */
350static void
351drm_intel_add_validate_buffer(drm_intel_bo *bo)
352{
353	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
354	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
355	int index;
356
357	if (bo_gem->validate_index != -1)
358		return;
359
360	/* Extend the array of validation entries as necessary. */
361	if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
362		int new_size = bufmgr_gem->exec_size * 2;
363
364		if (new_size == 0)
365			new_size = 5;
366
367		bufmgr_gem->exec_objects =
368		    realloc(bufmgr_gem->exec_objects,
369			    sizeof(*bufmgr_gem->exec_objects) * new_size);
370		bufmgr_gem->exec_bos =
371		    realloc(bufmgr_gem->exec_bos,
372			    sizeof(*bufmgr_gem->exec_bos) * new_size);
373		bufmgr_gem->exec_size = new_size;
374	}
375
376	index = bufmgr_gem->exec_count;
377	bo_gem->validate_index = index;
378	/* Fill in array entry */
379	bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
380	bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
381	bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
382	bufmgr_gem->exec_objects[index].alignment = 0;
383	bufmgr_gem->exec_objects[index].offset = 0;
384	bufmgr_gem->exec_bos[index] = bo;
385	bufmgr_gem->exec_count++;
386}
387
388static void
389drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
390{
391	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
392	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
393	int index;
394
395	if (bo_gem->validate_index != -1) {
396		if (need_fence)
397			bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
398				EXEC_OBJECT_NEEDS_FENCE;
399		return;
400	}
401
402	/* Extend the array of validation entries as necessary. */
403	if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
404		int new_size = bufmgr_gem->exec_size * 2;
405
406		if (new_size == 0)
407			new_size = 5;
408
409		bufmgr_gem->exec2_objects =
410			realloc(bufmgr_gem->exec2_objects,
411				sizeof(*bufmgr_gem->exec2_objects) * new_size);
412		bufmgr_gem->exec_bos =
413			realloc(bufmgr_gem->exec_bos,
414				sizeof(*bufmgr_gem->exec_bos) * new_size);
415		bufmgr_gem->exec_size = new_size;
416	}
417
418	index = bufmgr_gem->exec_count;
419	bo_gem->validate_index = index;
420	/* Fill in array entry */
421	bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
422	bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
423	bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
424	bufmgr_gem->exec2_objects[index].alignment = 0;
425	bufmgr_gem->exec2_objects[index].offset = 0;
426	bufmgr_gem->exec_bos[index] = bo;
427	bufmgr_gem->exec2_objects[index].flags = 0;
428	bufmgr_gem->exec2_objects[index].rsvd1 = 0;
429	bufmgr_gem->exec2_objects[index].rsvd2 = 0;
430	if (need_fence) {
431		bufmgr_gem->exec2_objects[index].flags |=
432			EXEC_OBJECT_NEEDS_FENCE;
433	}
434	bufmgr_gem->exec_count++;
435}
436
437#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
438	sizeof(uint32_t))
439
440static void
441drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
442				      drm_intel_bo_gem *bo_gem)
443{
444	int size;
445
446	assert(!bo_gem->used_as_reloc_target);
447
448	/* The older chipsets are far-less flexible in terms of tiling,
449	 * and require tiled buffer to be size aligned in the aperture.
450	 * This means that in the worst possible case we will need a hole
451	 * twice as large as the object in order for it to fit into the
452	 * aperture. Optimal packing is for wimps.
453	 */
454	size = bo_gem->bo.size;
455	if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE)
456		size *= 2;
457
458	bo_gem->reloc_tree_size = size;
459}
460
461static int
462drm_intel_setup_reloc_list(drm_intel_bo *bo)
463{
464	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
465	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
466	unsigned int max_relocs = bufmgr_gem->max_relocs;
467
468	if (bo->size / 4 < max_relocs)
469		max_relocs = bo->size / 4;
470
471	bo_gem->relocs = malloc(max_relocs *
472				sizeof(struct drm_i915_gem_relocation_entry));
473	bo_gem->reloc_target_info = malloc(max_relocs *
474					   sizeof(drm_intel_reloc_target *));
475	if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
476		bo_gem->has_error = 1;
477
478		free (bo_gem->relocs);
479		bo_gem->relocs = NULL;
480
481		free (bo_gem->reloc_target_info);
482		bo_gem->reloc_target_info = NULL;
483
484		return 1;
485	}
486
487	return 0;
488}
489
490static int
491drm_intel_gem_bo_busy(drm_intel_bo *bo)
492{
493	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
494	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
495	struct drm_i915_gem_busy busy;
496	int ret;
497
498	memset(&busy, 0, sizeof(busy));
499	busy.handle = bo_gem->gem_handle;
500
501	do {
502		ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
503	} while (ret == -1 && errno == EINTR);
504
505	return (ret == 0 && busy.busy);
506}
507
508static int
509drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
510				  drm_intel_bo_gem *bo_gem, int state)
511{
512	struct drm_i915_gem_madvise madv;
513
514	madv.handle = bo_gem->gem_handle;
515	madv.madv = state;
516	madv.retained = 1;
517	ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
518
519	return madv.retained;
520}
521
522static int
523drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
524{
525	return drm_intel_gem_bo_madvise_internal
526		((drm_intel_bufmgr_gem *) bo->bufmgr,
527		 (drm_intel_bo_gem *) bo,
528		 madv);
529}
530
531/* drop the oldest entries that have been purged by the kernel */
532static void
533drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
534				    struct drm_intel_gem_bo_bucket *bucket)
535{
536	while (!DRMLISTEMPTY(&bucket->head)) {
537		drm_intel_bo_gem *bo_gem;
538
539		bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
540				      bucket->head.next, head);
541		if (drm_intel_gem_bo_madvise_internal
542		    (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
543			break;
544
545		DRMLISTDEL(&bo_gem->head);
546		drm_intel_gem_bo_free(&bo_gem->bo);
547	}
548}
549
550static drm_intel_bo *
551drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
552				const char *name,
553				unsigned long size,
554				unsigned long flags)
555{
556	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
557	drm_intel_bo_gem *bo_gem;
558	unsigned int page_size = getpagesize();
559	int ret;
560	struct drm_intel_gem_bo_bucket *bucket;
561	int alloc_from_cache;
562	unsigned long bo_size;
563	int for_render = 0;
564
565	if (flags & BO_ALLOC_FOR_RENDER)
566		for_render = 1;
567
568	/* Round the allocated size up to a power of two number of pages. */
569	bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
570
571	/* If we don't have caching at this size, don't actually round the
572	 * allocation up.
573	 */
574	if (bucket == NULL) {
575		bo_size = size;
576		if (bo_size < page_size)
577			bo_size = page_size;
578	} else {
579		bo_size = bucket->size;
580	}
581
582	pthread_mutex_lock(&bufmgr_gem->lock);
583	/* Get a buffer out of the cache if available */
584retry:
585	alloc_from_cache = 0;
586	if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
587		if (for_render) {
588			/* Allocate new render-target BOs from the tail (MRU)
589			 * of the list, as it will likely be hot in the GPU
590			 * cache and in the aperture for us.
591			 */
592			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
593					      bucket->head.prev, head);
594			DRMLISTDEL(&bo_gem->head);
595			alloc_from_cache = 1;
596		} else {
597			/* For non-render-target BOs (where we're probably
598			 * going to map it first thing in order to fill it
599			 * with data), check if the last BO in the cache is
600			 * unbusy, and only reuse in that case. Otherwise,
601			 * allocating a new buffer is probably faster than
602			 * waiting for the GPU to finish.
603			 */
604			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
605					      bucket->head.next, head);
606			if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
607				alloc_from_cache = 1;
608				DRMLISTDEL(&bo_gem->head);
609			}
610		}
611
612		if (alloc_from_cache) {
613			if (!drm_intel_gem_bo_madvise_internal
614			    (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
615				drm_intel_gem_bo_free(&bo_gem->bo);
616				drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
617								    bucket);
618				goto retry;
619			}
620		}
621	}
622	pthread_mutex_unlock(&bufmgr_gem->lock);
623
624	if (!alloc_from_cache) {
625		struct drm_i915_gem_create create;
626
627		bo_gem = calloc(1, sizeof(*bo_gem));
628		if (!bo_gem)
629			return NULL;
630
631		bo_gem->bo.size = bo_size;
632		memset(&create, 0, sizeof(create));
633		create.size = bo_size;
634
635		do {
636			ret = ioctl(bufmgr_gem->fd,
637				    DRM_IOCTL_I915_GEM_CREATE,
638				    &create);
639		} while (ret == -1 && errno == EINTR);
640		bo_gem->gem_handle = create.handle;
641		bo_gem->bo.handle = bo_gem->gem_handle;
642		if (ret != 0) {
643			free(bo_gem);
644			return NULL;
645		}
646		bo_gem->bo.bufmgr = bufmgr;
647	}
648
649	bo_gem->name = name;
650	atomic_set(&bo_gem->refcount, 1);
651	bo_gem->validate_index = -1;
652	bo_gem->reloc_tree_fences = 0;
653	bo_gem->used_as_reloc_target = 0;
654	bo_gem->has_error = 0;
655	bo_gem->tiling_mode = I915_TILING_NONE;
656	bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
657	bo_gem->reusable = 1;
658
659	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
660
661	DBG("bo_create: buf %d (%s) %ldb\n",
662	    bo_gem->gem_handle, bo_gem->name, size);
663
664	return &bo_gem->bo;
665}
666
667static drm_intel_bo *
668drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
669				  const char *name,
670				  unsigned long size,
671				  unsigned int alignment)
672{
673	return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
674					       BO_ALLOC_FOR_RENDER);
675}
676
677static drm_intel_bo *
678drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
679		       const char *name,
680		       unsigned long size,
681		       unsigned int alignment)
682{
683	return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0);
684}
685
686static drm_intel_bo *
687drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
688			     int x, int y, int cpp, uint32_t *tiling_mode,
689			     unsigned long *pitch, unsigned long flags)
690{
691	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
692	drm_intel_bo *bo;
693	unsigned long size, stride, aligned_y = y;
694	int ret;
695
696	/* If we're tiled, our allocations are in 8 or 32-row blocks,
697	 * so failure to align our height means that we won't allocate
698	 * enough pages.
699	 *
700	 * If we're untiled, we still have to align to 2 rows high
701	 * because the data port accesses 2x2 blocks even if the
702	 * bottom row isn't to be rendered, so failure to align means
703	 * we could walk off the end of the GTT and fault.  This is
704	 * documented on 965, and may be the case on older chipsets
705	 * too so we try to be careful.
706	 */
707	if (*tiling_mode == I915_TILING_NONE)
708		aligned_y = ALIGN(y, 2);
709	else if (*tiling_mode == I915_TILING_X)
710		aligned_y = ALIGN(y, 8);
711	else if (*tiling_mode == I915_TILING_Y)
712		aligned_y = ALIGN(y, 32);
713
714	stride = x * cpp;
715	stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, *tiling_mode);
716	size = stride * aligned_y;
717	size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
718
719	bo = drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags);
720	if (!bo)
721		return NULL;
722
723	ret = drm_intel_gem_bo_set_tiling(bo, tiling_mode, stride);
724	if (ret != 0) {
725		drm_intel_gem_bo_unreference(bo);
726		return NULL;
727	}
728
729	*pitch = stride;
730
731	return bo;
732}
733
734/**
735 * Returns a drm_intel_bo wrapping the given buffer object handle.
736 *
737 * This can be used when one application needs to pass a buffer object
738 * to another.
739 */
740drm_intel_bo *
741drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
742				  const char *name,
743				  unsigned int handle)
744{
745	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
746	drm_intel_bo_gem *bo_gem;
747	int ret;
748	struct drm_gem_open open_arg;
749	struct drm_i915_gem_get_tiling get_tiling;
750
751	bo_gem = calloc(1, sizeof(*bo_gem));
752	if (!bo_gem)
753		return NULL;
754
755	memset(&open_arg, 0, sizeof(open_arg));
756	open_arg.name = handle;
757	do {
758		ret = ioctl(bufmgr_gem->fd,
759			    DRM_IOCTL_GEM_OPEN,
760			    &open_arg);
761	} while (ret == -1 && errno == EINTR);
762	if (ret != 0) {
763		fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
764			name, handle, strerror(errno));
765		free(bo_gem);
766		return NULL;
767	}
768	bo_gem->bo.size = open_arg.size;
769	bo_gem->bo.offset = 0;
770	bo_gem->bo.virtual = NULL;
771	bo_gem->bo.bufmgr = bufmgr;
772	bo_gem->name = name;
773	atomic_set(&bo_gem->refcount, 1);
774	bo_gem->validate_index = -1;
775	bo_gem->gem_handle = open_arg.handle;
776	bo_gem->global_name = handle;
777	bo_gem->reusable = 0;
778
779	memset(&get_tiling, 0, sizeof(get_tiling));
780	get_tiling.handle = bo_gem->gem_handle;
781	ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
782	if (ret != 0) {
783		drm_intel_gem_bo_unreference(&bo_gem->bo);
784		return NULL;
785	}
786	bo_gem->tiling_mode = get_tiling.tiling_mode;
787	bo_gem->swizzle_mode = get_tiling.swizzle_mode;
788	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
789
790	DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
791
792	return &bo_gem->bo;
793}
794
795static void
796drm_intel_gem_bo_free(drm_intel_bo *bo)
797{
798	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
799	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
800	struct drm_gem_close close;
801	int ret;
802
803	if (bo_gem->mem_virtual)
804		munmap(bo_gem->mem_virtual, bo_gem->bo.size);
805	if (bo_gem->gtt_virtual)
806		munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
807
808	/* Close this object */
809	memset(&close, 0, sizeof(close));
810	close.handle = bo_gem->gem_handle;
811	ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
812	if (ret != 0) {
813		fprintf(stderr,
814			"DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
815			bo_gem->gem_handle, bo_gem->name, strerror(errno));
816	}
817	free(bo);
818}
819
820/** Frees all cached buffers significantly older than @time. */
821static void
822drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
823{
824	int i;
825
826	for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
827		struct drm_intel_gem_bo_bucket *bucket =
828		    &bufmgr_gem->cache_bucket[i];
829
830		while (!DRMLISTEMPTY(&bucket->head)) {
831			drm_intel_bo_gem *bo_gem;
832
833			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
834					      bucket->head.next, head);
835			if (time - bo_gem->free_time <= 1)
836				break;
837
838			DRMLISTDEL(&bo_gem->head);
839
840			drm_intel_gem_bo_free(&bo_gem->bo);
841		}
842	}
843}
844
845static void
846drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
847{
848	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
849	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
850	struct drm_intel_gem_bo_bucket *bucket;
851	uint32_t tiling_mode;
852	int i;
853
854	/* Unreference all the target buffers */
855	for (i = 0; i < bo_gem->reloc_count; i++) {
856		drm_intel_gem_bo_unreference_locked_timed(bo_gem->
857							  reloc_target_info[i].bo,
858							  time);
859	}
860	bo_gem->reloc_count = 0;
861	bo_gem->used_as_reloc_target = 0;
862
863	DBG("bo_unreference final: %d (%s)\n",
864	    bo_gem->gem_handle, bo_gem->name);
865
866	/* release memory associated with this object */
867	if (bo_gem->reloc_target_info) {
868		free(bo_gem->reloc_target_info);
869		bo_gem->reloc_target_info = NULL;
870	}
871	if (bo_gem->relocs) {
872		free(bo_gem->relocs);
873		bo_gem->relocs = NULL;
874	}
875
876	bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
877	/* Put the buffer into our internal cache for reuse if we can. */
878	tiling_mode = I915_TILING_NONE;
879	if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
880	    drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0 &&
881	    drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
882					      I915_MADV_DONTNEED)) {
883		bo_gem->free_time = time;
884
885		bo_gem->name = NULL;
886		bo_gem->validate_index = -1;
887
888		DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
889
890		drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time);
891	} else {
892		drm_intel_gem_bo_free(bo);
893	}
894}
895
896static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
897						      time_t time)
898{
899	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
900
901	assert(atomic_read(&bo_gem->refcount) > 0);
902	if (atomic_dec_and_test(&bo_gem->refcount))
903		drm_intel_gem_bo_unreference_final(bo, time);
904}
905
906static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
907{
908	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
909
910	assert(atomic_read(&bo_gem->refcount) > 0);
911	if (atomic_dec_and_test(&bo_gem->refcount)) {
912		drm_intel_bufmgr_gem *bufmgr_gem =
913		    (drm_intel_bufmgr_gem *) bo->bufmgr;
914		struct timespec time;
915
916		clock_gettime(CLOCK_MONOTONIC, &time);
917
918		pthread_mutex_lock(&bufmgr_gem->lock);
919		drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
920		pthread_mutex_unlock(&bufmgr_gem->lock);
921	}
922}
923
924static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
925{
926	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
927	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
928	struct drm_i915_gem_set_domain set_domain;
929	int ret;
930
931	pthread_mutex_lock(&bufmgr_gem->lock);
932
933	/* Allow recursive mapping. Mesa may recursively map buffers with
934	 * nested display loops.
935	 */
936	if (!bo_gem->mem_virtual) {
937		struct drm_i915_gem_mmap mmap_arg;
938
939		DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
940
941		memset(&mmap_arg, 0, sizeof(mmap_arg));
942		mmap_arg.handle = bo_gem->gem_handle;
943		mmap_arg.offset = 0;
944		mmap_arg.size = bo->size;
945		do {
946			ret = ioctl(bufmgr_gem->fd,
947				    DRM_IOCTL_I915_GEM_MMAP,
948				    &mmap_arg);
949		} while (ret == -1 && errno == EINTR);
950		if (ret != 0) {
951			ret = -errno;
952			fprintf(stderr,
953				"%s:%d: Error mapping buffer %d (%s): %s .\n",
954				__FILE__, __LINE__, bo_gem->gem_handle,
955				bo_gem->name, strerror(errno));
956			pthread_mutex_unlock(&bufmgr_gem->lock);
957			return ret;
958		}
959		bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
960	}
961	DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
962	    bo_gem->mem_virtual);
963	bo->virtual = bo_gem->mem_virtual;
964
965	set_domain.handle = bo_gem->gem_handle;
966	set_domain.read_domains = I915_GEM_DOMAIN_CPU;
967	if (write_enable)
968		set_domain.write_domain = I915_GEM_DOMAIN_CPU;
969	else
970		set_domain.write_domain = 0;
971	do {
972		ret = ioctl(bufmgr_gem->fd,
973			    DRM_IOCTL_I915_GEM_SET_DOMAIN,
974			    &set_domain);
975	} while (ret == -1 && errno == EINTR);
976	if (ret != 0) {
977		ret = -errno;
978		fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
979			__FILE__, __LINE__, bo_gem->gem_handle,
980			strerror(errno));
981		pthread_mutex_unlock(&bufmgr_gem->lock);
982		return ret;
983	}
984
985	pthread_mutex_unlock(&bufmgr_gem->lock);
986
987	return 0;
988}
989
990int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
991{
992	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
993	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
994	struct drm_i915_gem_set_domain set_domain;
995	int ret;
996
997	pthread_mutex_lock(&bufmgr_gem->lock);
998
999	/* Get a mapping of the buffer if we haven't before. */
1000	if (bo_gem->gtt_virtual == NULL) {
1001		struct drm_i915_gem_mmap_gtt mmap_arg;
1002
1003		DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
1004		    bo_gem->name);
1005
1006		memset(&mmap_arg, 0, sizeof(mmap_arg));
1007		mmap_arg.handle = bo_gem->gem_handle;
1008
1009		/* Get the fake offset back... */
1010		do {
1011			ret = ioctl(bufmgr_gem->fd,
1012				    DRM_IOCTL_I915_GEM_MMAP_GTT,
1013				    &mmap_arg);
1014		} while (ret == -1 && errno == EINTR);
1015		if (ret != 0) {
1016			ret = -errno;
1017			fprintf(stderr,
1018				"%s:%d: Error preparing buffer map %d (%s): %s .\n",
1019				__FILE__, __LINE__,
1020				bo_gem->gem_handle, bo_gem->name,
1021				strerror(errno));
1022			pthread_mutex_unlock(&bufmgr_gem->lock);
1023			return ret;
1024		}
1025
1026		/* and mmap it */
1027		bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1028					   MAP_SHARED, bufmgr_gem->fd,
1029					   mmap_arg.offset);
1030		if (bo_gem->gtt_virtual == MAP_FAILED) {
1031			bo_gem->gtt_virtual = NULL;
1032			ret = -errno;
1033			fprintf(stderr,
1034				"%s:%d: Error mapping buffer %d (%s): %s .\n",
1035				__FILE__, __LINE__,
1036				bo_gem->gem_handle, bo_gem->name,
1037				strerror(errno));
1038			pthread_mutex_unlock(&bufmgr_gem->lock);
1039			return ret;
1040		}
1041	}
1042
1043	bo->virtual = bo_gem->gtt_virtual;
1044
1045	DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1046	    bo_gem->gtt_virtual);
1047
1048	/* Now move it to the GTT domain so that the CPU caches are flushed */
1049	set_domain.handle = bo_gem->gem_handle;
1050	set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1051	set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1052	do {
1053		ret = ioctl(bufmgr_gem->fd,
1054			    DRM_IOCTL_I915_GEM_SET_DOMAIN,
1055			    &set_domain);
1056	} while (ret == -1 && errno == EINTR);
1057
1058	if (ret != 0) {
1059		ret = -errno;
1060		fprintf(stderr, "%s:%d: Error setting domain %d: %s\n",
1061			__FILE__, __LINE__, bo_gem->gem_handle,
1062			strerror(errno));
1063	}
1064
1065	pthread_mutex_unlock(&bufmgr_gem->lock);
1066
1067	return ret;
1068}
1069
1070int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1071{
1072	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1073	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1074	int ret = 0;
1075
1076	if (bo == NULL)
1077		return 0;
1078
1079	assert(bo_gem->gtt_virtual != NULL);
1080
1081	pthread_mutex_lock(&bufmgr_gem->lock);
1082	bo->virtual = NULL;
1083	pthread_mutex_unlock(&bufmgr_gem->lock);
1084
1085	return ret;
1086}
1087
1088static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1089{
1090	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1091	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1092	struct drm_i915_gem_sw_finish sw_finish;
1093	int ret;
1094
1095	if (bo == NULL)
1096		return 0;
1097
1098	assert(bo_gem->mem_virtual != NULL);
1099
1100	pthread_mutex_lock(&bufmgr_gem->lock);
1101
1102	/* Cause a flush to happen if the buffer's pinned for scanout, so the
1103	 * results show up in a timely manner.
1104	 */
1105	sw_finish.handle = bo_gem->gem_handle;
1106	do {
1107		ret = ioctl(bufmgr_gem->fd,
1108			    DRM_IOCTL_I915_GEM_SW_FINISH,
1109			    &sw_finish);
1110	} while (ret == -1 && errno == EINTR);
1111	ret = ret == -1 ? -errno : 0;
1112
1113	bo->virtual = NULL;
1114	pthread_mutex_unlock(&bufmgr_gem->lock);
1115
1116	return ret;
1117}
1118
1119static int
1120drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1121			 unsigned long size, const void *data)
1122{
1123	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1124	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1125	struct drm_i915_gem_pwrite pwrite;
1126	int ret;
1127
1128	memset(&pwrite, 0, sizeof(pwrite));
1129	pwrite.handle = bo_gem->gem_handle;
1130	pwrite.offset = offset;
1131	pwrite.size = size;
1132	pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1133	do {
1134		ret = ioctl(bufmgr_gem->fd,
1135			    DRM_IOCTL_I915_GEM_PWRITE,
1136			    &pwrite);
1137	} while (ret == -1 && errno == EINTR);
1138	if (ret != 0) {
1139		ret = -errno;
1140		fprintf(stderr,
1141			"%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1142			__FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1143			(int)size, strerror(errno));
1144	}
1145
1146	return ret;
1147}
1148
1149static int
1150drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1151{
1152	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1153	struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1154	int ret;
1155
1156	get_pipe_from_crtc_id.crtc_id = crtc_id;
1157	ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1158		    &get_pipe_from_crtc_id);
1159	if (ret != 0) {
1160		/* We return -1 here to signal that we don't
1161		 * know which pipe is associated with this crtc.
1162		 * This lets the caller know that this information
1163		 * isn't available; using the wrong pipe for
1164		 * vblank waiting can cause the chipset to lock up
1165		 */
1166		return -1;
1167	}
1168
1169	return get_pipe_from_crtc_id.pipe;
1170}
1171
1172static int
1173drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1174			     unsigned long size, void *data)
1175{
1176	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1177	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1178	struct drm_i915_gem_pread pread;
1179	int ret;
1180
1181	memset(&pread, 0, sizeof(pread));
1182	pread.handle = bo_gem->gem_handle;
1183	pread.offset = offset;
1184	pread.size = size;
1185	pread.data_ptr = (uint64_t) (uintptr_t) data;
1186	do {
1187		ret = ioctl(bufmgr_gem->fd,
1188			    DRM_IOCTL_I915_GEM_PREAD,
1189			    &pread);
1190	} while (ret == -1 && errno == EINTR);
1191	if (ret != 0) {
1192		ret = -errno;
1193		fprintf(stderr,
1194			"%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1195			__FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1196			(int)size, strerror(errno));
1197	}
1198
1199	return ret;
1200}
1201
1202/** Waits for all GPU rendering to the object to have completed. */
1203static void
1204drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1205{
1206	drm_intel_gem_bo_start_gtt_access(bo, 0);
1207}
1208
1209/**
1210 * Sets the object to the GTT read and possibly write domain, used by the X
1211 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1212 *
1213 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1214 * can do tiled pixmaps this way.
1215 */
1216void
1217drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1218{
1219	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1220	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1221	struct drm_i915_gem_set_domain set_domain;
1222	int ret;
1223
1224	set_domain.handle = bo_gem->gem_handle;
1225	set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1226	set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1227	do {
1228		ret = ioctl(bufmgr_gem->fd,
1229			    DRM_IOCTL_I915_GEM_SET_DOMAIN,
1230			    &set_domain);
1231	} while (ret == -1 && errno == EINTR);
1232	if (ret != 0) {
1233		fprintf(stderr,
1234			"%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1235			__FILE__, __LINE__, bo_gem->gem_handle,
1236			set_domain.read_domains, set_domain.write_domain,
1237			strerror(errno));
1238	}
1239}
1240
1241static void
1242drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1243{
1244	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1245	int i;
1246
1247	free(bufmgr_gem->exec2_objects);
1248	free(bufmgr_gem->exec_objects);
1249	free(bufmgr_gem->exec_bos);
1250
1251	pthread_mutex_destroy(&bufmgr_gem->lock);
1252
1253	/* Free any cached buffer objects we were going to reuse */
1254	for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
1255		struct drm_intel_gem_bo_bucket *bucket =
1256		    &bufmgr_gem->cache_bucket[i];
1257		drm_intel_bo_gem *bo_gem;
1258
1259		while (!DRMLISTEMPTY(&bucket->head)) {
1260			bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1261					      bucket->head.next, head);
1262			DRMLISTDEL(&bo_gem->head);
1263
1264			drm_intel_gem_bo_free(&bo_gem->bo);
1265		}
1266	}
1267
1268	free(bufmgr);
1269}
1270
1271/**
1272 * Adds the target buffer to the validation list and adds the relocation
1273 * to the reloc_buffer's relocation list.
1274 *
1275 * The relocation entry at the given offset must already contain the
1276 * precomputed relocation value, because the kernel will optimize out
1277 * the relocation entry write when the buffer hasn't moved from the
1278 * last known offset in target_bo.
1279 */
1280static int
1281do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1282		 drm_intel_bo *target_bo, uint32_t target_offset,
1283		 uint32_t read_domains, uint32_t write_domain,
1284		 int need_fence)
1285{
1286	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1287	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1288	drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1289
1290	if (bo_gem->has_error)
1291		return -ENOMEM;
1292
1293	if (target_bo_gem->has_error) {
1294		bo_gem->has_error = 1;
1295		return -ENOMEM;
1296	}
1297
1298	if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1299		need_fence = 0;
1300
1301	/* We never use HW fences for rendering on 965+ */
1302	if (bufmgr_gem->gen >= 4)
1303		need_fence = 0;
1304
1305	/* Create a new relocation list if needed */
1306	if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1307		return -ENOMEM;
1308
1309	/* Check overflow */
1310	assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1311
1312	/* Check args */
1313	assert(offset <= bo->size - 4);
1314	assert((write_domain & (write_domain - 1)) == 0);
1315
1316	/* Make sure that we're not adding a reloc to something whose size has
1317	 * already been accounted for.
1318	 */
1319	assert(!bo_gem->used_as_reloc_target);
1320	bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1321	/* An object needing a fence is a tiled buffer, so it won't have
1322	 * relocs to other buffers.
1323	 */
1324	if (need_fence)
1325		target_bo_gem->reloc_tree_fences = 1;
1326	bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1327
1328	/* Flag the target to disallow further relocations in it. */
1329	target_bo_gem->used_as_reloc_target = 1;
1330
1331	bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1332	bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1333	bo_gem->relocs[bo_gem->reloc_count].target_handle =
1334	    target_bo_gem->gem_handle;
1335	bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1336	bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1337	bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1338
1339	bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1340	drm_intel_gem_bo_reference(target_bo);
1341	if (need_fence)
1342		bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1343			DRM_INTEL_RELOC_FENCE;
1344	else
1345		bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1346
1347	bo_gem->reloc_count++;
1348
1349	return 0;
1350}
1351
1352static int
1353drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1354			    drm_intel_bo *target_bo, uint32_t target_offset,
1355			    uint32_t read_domains, uint32_t write_domain)
1356{
1357	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1358
1359	return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1360				read_domains, write_domain,
1361				!bufmgr_gem->fenced_relocs);
1362}
1363
1364static int
1365drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1366				  drm_intel_bo *target_bo,
1367				  uint32_t target_offset,
1368				  uint32_t read_domains, uint32_t write_domain)
1369{
1370	return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1371				read_domains, write_domain, 1);
1372}
1373
1374/**
1375 * Walk the tree of relocations rooted at BO and accumulate the list of
1376 * validations to be performed and update the relocation buffers with
1377 * index values into the validation list.
1378 */
1379static void
1380drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1381{
1382	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1383	int i;
1384
1385	if (bo_gem->relocs == NULL)
1386		return;
1387
1388	for (i = 0; i < bo_gem->reloc_count; i++) {
1389		drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1390
1391		/* Continue walking the tree depth-first. */
1392		drm_intel_gem_bo_process_reloc(target_bo);
1393
1394		/* Add the target to the validate list */
1395		drm_intel_add_validate_buffer(target_bo);
1396	}
1397}
1398
1399static void
1400drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1401{
1402	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1403	int i;
1404
1405	if (bo_gem->relocs == NULL)
1406		return;
1407
1408	for (i = 0; i < bo_gem->reloc_count; i++) {
1409		drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1410		int need_fence;
1411
1412		/* Continue walking the tree depth-first. */
1413		drm_intel_gem_bo_process_reloc2(target_bo);
1414
1415		need_fence = (bo_gem->reloc_target_info[i].flags &
1416			      DRM_INTEL_RELOC_FENCE);
1417
1418		/* Add the target to the validate list */
1419		drm_intel_add_validate_buffer2(target_bo, need_fence);
1420	}
1421}
1422
1423
1424static void
1425drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1426{
1427	int i;
1428
1429	for (i = 0; i < bufmgr_gem->exec_count; i++) {
1430		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1431		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1432
1433		/* Update the buffer offset */
1434		if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1435			DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1436			    bo_gem->gem_handle, bo_gem->name, bo->offset,
1437			    (unsigned long long)bufmgr_gem->exec_objects[i].
1438			    offset);
1439			bo->offset = bufmgr_gem->exec_objects[i].offset;
1440		}
1441	}
1442}
1443
1444static void
1445drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1446{
1447	int i;
1448
1449	for (i = 0; i < bufmgr_gem->exec_count; i++) {
1450		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1451		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1452
1453		/* Update the buffer offset */
1454		if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1455			DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1456			    bo_gem->gem_handle, bo_gem->name, bo->offset,
1457			    (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1458			bo->offset = bufmgr_gem->exec2_objects[i].offset;
1459		}
1460	}
1461}
1462
1463static int
1464drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
1465		      drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
1466{
1467	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1468	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1469	struct drm_i915_gem_execbuffer execbuf;
1470	int ret, i;
1471
1472	if (bo_gem->has_error)
1473		return -ENOMEM;
1474
1475	pthread_mutex_lock(&bufmgr_gem->lock);
1476	/* Update indices and set up the validate list. */
1477	drm_intel_gem_bo_process_reloc(bo);
1478
1479	/* Add the batch buffer to the validation list.  There are no
1480	 * relocations pointing to it.
1481	 */
1482	drm_intel_add_validate_buffer(bo);
1483
1484	execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1485	execbuf.buffer_count = bufmgr_gem->exec_count;
1486	execbuf.batch_start_offset = 0;
1487	execbuf.batch_len = used;
1488	execbuf.cliprects_ptr = (uintptr_t) cliprects;
1489	execbuf.num_cliprects = num_cliprects;
1490	execbuf.DR1 = 0;
1491	execbuf.DR4 = DR4;
1492
1493	do {
1494		ret = ioctl(bufmgr_gem->fd,
1495			    DRM_IOCTL_I915_GEM_EXECBUFFER,
1496			    &execbuf);
1497	} while (ret != 0 && errno == EINTR);
1498
1499	if (ret != 0) {
1500		ret = -errno;
1501		if (errno == ENOSPC) {
1502			fprintf(stderr,
1503				"Execbuffer fails to pin. "
1504				"Estimate: %u. Actual: %u. Available: %u\n",
1505				drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1506								   bufmgr_gem->
1507								   exec_count),
1508				drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1509								  bufmgr_gem->
1510								  exec_count),
1511				(unsigned int)bufmgr_gem->gtt_size);
1512		}
1513	}
1514	drm_intel_update_buffer_offsets(bufmgr_gem);
1515
1516	if (bufmgr_gem->bufmgr.debug)
1517		drm_intel_gem_dump_validation_list(bufmgr_gem);
1518
1519	for (i = 0; i < bufmgr_gem->exec_count; i++) {
1520		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1521		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1522
1523		/* Disconnect the buffer from the validate list */
1524		bo_gem->validate_index = -1;
1525		bufmgr_gem->exec_bos[i] = NULL;
1526	}
1527	bufmgr_gem->exec_count = 0;
1528	pthread_mutex_unlock(&bufmgr_gem->lock);
1529
1530	return ret;
1531}
1532
1533static int
1534drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
1535		       drm_clip_rect_t *cliprects, int num_cliprects,
1536		       int DR4)
1537{
1538	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1539	struct drm_i915_gem_execbuffer2 execbuf;
1540	int ret, i;
1541
1542	pthread_mutex_lock(&bufmgr_gem->lock);
1543	/* Update indices and set up the validate list. */
1544	drm_intel_gem_bo_process_reloc2(bo);
1545
1546	/* Add the batch buffer to the validation list.  There are no relocations
1547	 * pointing to it.
1548	 */
1549	drm_intel_add_validate_buffer2(bo, 0);
1550
1551	execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
1552	execbuf.buffer_count = bufmgr_gem->exec_count;
1553	execbuf.batch_start_offset = 0;
1554	execbuf.batch_len = used;
1555	execbuf.cliprects_ptr = (uintptr_t)cliprects;
1556	execbuf.num_cliprects = num_cliprects;
1557	execbuf.DR1 = 0;
1558	execbuf.DR4 = DR4;
1559	execbuf.flags = 0;
1560	execbuf.rsvd1 = 0;
1561	execbuf.rsvd2 = 0;
1562
1563	do {
1564		ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2,
1565			    &execbuf);
1566	} while (ret != 0 && errno == EINTR);
1567
1568	if (ret != 0) {
1569		ret = -errno;
1570		if (ret == -ENOMEM) {
1571			fprintf(stderr,
1572				"Execbuffer fails to pin. "
1573				"Estimate: %u. Actual: %u. Available: %u\n",
1574				drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1575								   bufmgr_gem->exec_count),
1576				drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1577								  bufmgr_gem->exec_count),
1578				(unsigned int) bufmgr_gem->gtt_size);
1579		}
1580	}
1581	drm_intel_update_buffer_offsets2(bufmgr_gem);
1582
1583	if (bufmgr_gem->bufmgr.debug)
1584		drm_intel_gem_dump_validation_list(bufmgr_gem);
1585
1586	for (i = 0; i < bufmgr_gem->exec_count; i++) {
1587		drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1588		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1589
1590		/* Disconnect the buffer from the validate list */
1591		bo_gem->validate_index = -1;
1592		bufmgr_gem->exec_bos[i] = NULL;
1593	}
1594	bufmgr_gem->exec_count = 0;
1595	pthread_mutex_unlock(&bufmgr_gem->lock);
1596
1597	return ret;
1598}
1599
1600static int
1601drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
1602{
1603	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1604	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1605	struct drm_i915_gem_pin pin;
1606	int ret;
1607
1608	memset(&pin, 0, sizeof(pin));
1609	pin.handle = bo_gem->gem_handle;
1610	pin.alignment = alignment;
1611
1612	do {
1613		ret = ioctl(bufmgr_gem->fd,
1614			    DRM_IOCTL_I915_GEM_PIN,
1615			    &pin);
1616	} while (ret == -1 && errno == EINTR);
1617
1618	if (ret != 0)
1619		return -errno;
1620
1621	bo->offset = pin.offset;
1622	return 0;
1623}
1624
1625static int
1626drm_intel_gem_bo_unpin(drm_intel_bo *bo)
1627{
1628	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1629	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1630	struct drm_i915_gem_unpin unpin;
1631	int ret;
1632
1633	memset(&unpin, 0, sizeof(unpin));
1634	unpin.handle = bo_gem->gem_handle;
1635
1636	ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
1637	if (ret != 0)
1638		return -errno;
1639
1640	return 0;
1641}
1642
1643static int
1644drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1645			    uint32_t stride)
1646{
1647	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1648	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1649	struct drm_i915_gem_set_tiling set_tiling;
1650	int ret;
1651
1652	if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode)
1653		return 0;
1654
1655	memset(&set_tiling, 0, sizeof(set_tiling));
1656	set_tiling.handle = bo_gem->gem_handle;
1657
1658	do {
1659		set_tiling.tiling_mode = *tiling_mode;
1660		set_tiling.stride = stride;
1661
1662		ret = ioctl(bufmgr_gem->fd,
1663			    DRM_IOCTL_I915_GEM_SET_TILING,
1664			    &set_tiling);
1665	} while (ret == -1 && errno == EINTR);
1666	bo_gem->tiling_mode = set_tiling.tiling_mode;
1667	bo_gem->swizzle_mode = set_tiling.swizzle_mode;
1668
1669	drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
1670
1671	*tiling_mode = bo_gem->tiling_mode;
1672	return ret == 0 ? 0 : -errno;
1673}
1674
1675static int
1676drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1677			    uint32_t * swizzle_mode)
1678{
1679	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1680
1681	*tiling_mode = bo_gem->tiling_mode;
1682	*swizzle_mode = bo_gem->swizzle_mode;
1683	return 0;
1684}
1685
1686static int
1687drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
1688{
1689	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1690	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1691	struct drm_gem_flink flink;
1692	int ret;
1693
1694	if (!bo_gem->global_name) {
1695		memset(&flink, 0, sizeof(flink));
1696		flink.handle = bo_gem->gem_handle;
1697
1698		ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
1699		if (ret != 0)
1700			return -errno;
1701		bo_gem->global_name = flink.name;
1702		bo_gem->reusable = 0;
1703	}
1704
1705	*name = bo_gem->global_name;
1706	return 0;
1707}
1708
1709/**
1710 * Enables unlimited caching of buffer objects for reuse.
1711 *
1712 * This is potentially very memory expensive, as the cache at each bucket
1713 * size is only bounded by how many buffers of that size we've managed to have
1714 * in flight at once.
1715 */
1716void
1717drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
1718{
1719	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1720
1721	bufmgr_gem->bo_reuse = 1;
1722}
1723
1724/**
1725 * Enable use of fenced reloc type.
1726 *
1727 * New code should enable this to avoid unnecessary fence register
1728 * allocation.  If this option is not enabled, all relocs will have fence
1729 * register allocated.
1730 */
1731void
1732drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
1733{
1734	drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
1735
1736	if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
1737		bufmgr_gem->fenced_relocs = 1;
1738}
1739
1740/**
1741 * Return the additional aperture space required by the tree of buffer objects
1742 * rooted at bo.
1743 */
1744static int
1745drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
1746{
1747	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1748	int i;
1749	int total = 0;
1750
1751	if (bo == NULL || bo_gem->included_in_check_aperture)
1752		return 0;
1753
1754	total += bo->size;
1755	bo_gem->included_in_check_aperture = 1;
1756
1757	for (i = 0; i < bo_gem->reloc_count; i++)
1758		total +=
1759		    drm_intel_gem_bo_get_aperture_space(bo_gem->
1760							reloc_target_info[i].bo);
1761
1762	return total;
1763}
1764
1765/**
1766 * Count the number of buffers in this list that need a fence reg
1767 *
1768 * If the count is greater than the number of available regs, we'll have
1769 * to ask the caller to resubmit a batch with fewer tiled buffers.
1770 *
1771 * This function over-counts if the same buffer is used multiple times.
1772 */
1773static unsigned int
1774drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
1775{
1776	int i;
1777	unsigned int total = 0;
1778
1779	for (i = 0; i < count; i++) {
1780		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1781
1782		if (bo_gem == NULL)
1783			continue;
1784
1785		total += bo_gem->reloc_tree_fences;
1786	}
1787	return total;
1788}
1789
1790/**
1791 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
1792 * for the next drm_intel_bufmgr_check_aperture_space() call.
1793 */
1794static void
1795drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
1796{
1797	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1798	int i;
1799
1800	if (bo == NULL || !bo_gem->included_in_check_aperture)
1801		return;
1802
1803	bo_gem->included_in_check_aperture = 0;
1804
1805	for (i = 0; i < bo_gem->reloc_count; i++)
1806		drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
1807							   reloc_target_info[i].bo);
1808}
1809
1810/**
1811 * Return a conservative estimate for the amount of aperture required
1812 * for a collection of buffers. This may double-count some buffers.
1813 */
1814static unsigned int
1815drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
1816{
1817	int i;
1818	unsigned int total = 0;
1819
1820	for (i = 0; i < count; i++) {
1821		drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1822		if (bo_gem != NULL)
1823			total += bo_gem->reloc_tree_size;
1824	}
1825	return total;
1826}
1827
1828/**
1829 * Return the amount of aperture needed for a collection of buffers.
1830 * This avoids double counting any buffers, at the cost of looking
1831 * at every buffer in the set.
1832 */
1833static unsigned int
1834drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
1835{
1836	int i;
1837	unsigned int total = 0;
1838
1839	for (i = 0; i < count; i++) {
1840		total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
1841		/* For the first buffer object in the array, we get an
1842		 * accurate count back for its reloc_tree size (since nothing
1843		 * had been flagged as being counted yet).  We can save that
1844		 * value out as a more conservative reloc_tree_size that
1845		 * avoids double-counting target buffers.  Since the first
1846		 * buffer happens to usually be the batch buffer in our
1847		 * callers, this can pull us back from doing the tree
1848		 * walk on every new batch emit.
1849		 */
1850		if (i == 0) {
1851			drm_intel_bo_gem *bo_gem =
1852			    (drm_intel_bo_gem *) bo_array[i];
1853			bo_gem->reloc_tree_size = total;
1854		}
1855	}
1856
1857	for (i = 0; i < count; i++)
1858		drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
1859	return total;
1860}
1861
1862/**
1863 * Return -1 if the batchbuffer should be flushed before attempting to
1864 * emit rendering referencing the buffers pointed to by bo_array.
1865 *
1866 * This is required because if we try to emit a batchbuffer with relocations
1867 * to a tree of buffers that won't simultaneously fit in the aperture,
1868 * the rendering will return an error at a point where the software is not
1869 * prepared to recover from it.
1870 *
1871 * However, we also want to emit the batchbuffer significantly before we reach
1872 * the limit, as a series of batchbuffers each of which references buffers
1873 * covering almost all of the aperture means that at each emit we end up
1874 * waiting to evict a buffer from the last rendering, and we get synchronous
1875 * performance.  By emitting smaller batchbuffers, we eat some CPU overhead to
1876 * get better parallelism.
1877 */
1878static int
1879drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
1880{
1881	drm_intel_bufmgr_gem *bufmgr_gem =
1882	    (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
1883	unsigned int total = 0;
1884	unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
1885	int total_fences;
1886
1887	/* Check for fence reg constraints if necessary */
1888	if (bufmgr_gem->available_fences) {
1889		total_fences = drm_intel_gem_total_fences(bo_array, count);
1890		if (total_fences > bufmgr_gem->available_fences)
1891			return -ENOSPC;
1892	}
1893
1894	total = drm_intel_gem_estimate_batch_space(bo_array, count);
1895
1896	if (total > threshold)
1897		total = drm_intel_gem_compute_batch_space(bo_array, count);
1898
1899	if (total > threshold) {
1900		DBG("check_space: overflowed available aperture, "
1901		    "%dkb vs %dkb\n",
1902		    total / 1024, (int)bufmgr_gem->gtt_size / 1024);
1903		return -ENOSPC;
1904	} else {
1905		DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
1906		    (int)bufmgr_gem->gtt_size / 1024);
1907		return 0;
1908	}
1909}
1910
1911/*
1912 * Disable buffer reuse for objects which are shared with the kernel
1913 * as scanout buffers
1914 */
1915static int
1916drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
1917{
1918	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1919
1920	bo_gem->reusable = 0;
1921	return 0;
1922}
1923
1924static int
1925_drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
1926{
1927	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1928	int i;
1929
1930	for (i = 0; i < bo_gem->reloc_count; i++) {
1931		if (bo_gem->reloc_target_info[i].bo == target_bo)
1932			return 1;
1933		if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
1934						target_bo))
1935			return 1;
1936	}
1937
1938	return 0;
1939}
1940
1941/** Return true if target_bo is referenced by bo's relocation tree. */
1942static int
1943drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
1944{
1945	drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1946
1947	if (bo == NULL || target_bo == NULL)
1948		return 0;
1949	if (target_bo_gem->used_as_reloc_target)
1950		return _drm_intel_gem_bo_references(bo, target_bo);
1951	return 0;
1952}
1953
1954/**
1955 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1956 * and manage map buffer objections.
1957 *
1958 * \param fd File descriptor of the opened DRM device.
1959 */
1960drm_intel_bufmgr *
1961drm_intel_bufmgr_gem_init(int fd, int batch_size)
1962{
1963	drm_intel_bufmgr_gem *bufmgr_gem;
1964	struct drm_i915_gem_get_aperture aperture;
1965	drm_i915_getparam_t gp;
1966	int ret, i;
1967	unsigned long size;
1968	int exec2 = 0;
1969
1970	bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
1971	if (bufmgr_gem == NULL)
1972		return NULL;
1973
1974	bufmgr_gem->fd = fd;
1975
1976	if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
1977		free(bufmgr_gem);
1978		return NULL;
1979	}
1980
1981	ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1982
1983	if (ret == 0)
1984		bufmgr_gem->gtt_size = aperture.aper_available_size;
1985	else {
1986		fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
1987			strerror(errno));
1988		bufmgr_gem->gtt_size = 128 * 1024 * 1024;
1989		fprintf(stderr, "Assuming %dkB available aperture size.\n"
1990			"May lead to reduced performance or incorrect "
1991			"rendering.\n",
1992			(int)bufmgr_gem->gtt_size / 1024);
1993	}
1994
1995	gp.param = I915_PARAM_CHIPSET_ID;
1996	gp.value = &bufmgr_gem->pci_device;
1997	ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1998	if (ret) {
1999		fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2000		fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2001	}
2002
2003	if (IS_GEN2(bufmgr_gem))
2004		bufmgr_gem->gen = 2;
2005	else if (IS_GEN3(bufmgr_gem))
2006		bufmgr_gem->gen = 3;
2007	else if (IS_GEN4(bufmgr_gem))
2008		bufmgr_gem->gen = 4;
2009	else
2010		bufmgr_gem->gen = 6;
2011
2012	gp.param = I915_PARAM_HAS_EXECBUF2;
2013	ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2014	if (!ret)
2015		exec2 = 1;
2016
2017	if (bufmgr_gem->gen < 4) {
2018		gp.param = I915_PARAM_NUM_FENCES_AVAIL;
2019		gp.value = &bufmgr_gem->available_fences;
2020		ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2021		if (ret) {
2022			fprintf(stderr, "get fences failed: %d [%d]\n", ret,
2023				errno);
2024			fprintf(stderr, "param: %d, val: %d\n", gp.param,
2025				*gp.value);
2026			bufmgr_gem->available_fences = 0;
2027		} else {
2028			/* XXX The kernel reports the total number of fences,
2029			 * including any that may be pinned.
2030			 *
2031			 * We presume that there will be at least one pinned
2032			 * fence for the scanout buffer, but there may be more
2033			 * than one scanout and the user may be manually
2034			 * pinning buffers. Let's move to execbuffer2 and
2035			 * thereby forget the insanity of using fences...
2036			 */
2037			bufmgr_gem->available_fences -= 2;
2038			if (bufmgr_gem->available_fences < 0)
2039				bufmgr_gem->available_fences = 0;
2040		}
2041	}
2042
2043	/* Let's go with one relocation per every 2 dwords (but round down a bit
2044	 * since a power of two will mean an extra page allocation for the reloc
2045	 * buffer).
2046	 *
2047	 * Every 4 was too few for the blender benchmark.
2048	 */
2049	bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
2050
2051	bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
2052	bufmgr_gem->bufmgr.bo_alloc_for_render =
2053	    drm_intel_gem_bo_alloc_for_render;
2054	bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
2055	bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
2056	bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
2057	bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
2058	bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
2059	bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
2060	bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
2061	bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
2062	bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
2063	bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
2064	bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
2065	bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
2066	bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
2067	bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
2068	bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
2069	/* Use the new one if available */
2070	if (exec2)
2071		bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
2072	else
2073		bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
2074	bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
2075	bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
2076	bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
2077	bufmgr_gem->bufmgr.debug = 0;
2078	bufmgr_gem->bufmgr.check_aperture_space =
2079	    drm_intel_gem_check_aperture_space;
2080	bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
2081	bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
2082	    drm_intel_gem_get_pipe_from_crtc_id;
2083	bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
2084
2085	/* Initialize the linked lists for BO reuse cache. */
2086	for (i = 0, size = 4096; i < DRM_INTEL_GEM_BO_BUCKETS; i++, size *= 2) {
2087		DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2088		bufmgr_gem->cache_bucket[i].size = size;
2089	}
2090
2091	return &bufmgr_gem->bufmgr;
2092}
2093