kgem.c revision 813957e3
103b705cfSriastradh/*
203b705cfSriastradh * Copyright (c) 2011 Intel Corporation
303b705cfSriastradh *
403b705cfSriastradh * Permission is hereby granted, free of charge, to any person obtaining a
503b705cfSriastradh * copy of this software and associated documentation files (the "Software"),
603b705cfSriastradh * to deal in the Software without restriction, including without limitation
703b705cfSriastradh * the rights to use, copy, modify, merge, publish, distribute, sublicense,
803b705cfSriastradh * and/or sell copies of the Software, and to permit persons to whom the
903b705cfSriastradh * Software is furnished to do so, subject to the following conditions:
1003b705cfSriastradh *
1103b705cfSriastradh * The above copyright notice and this permission notice (including the next
1203b705cfSriastradh * paragraph) shall be included in all copies or substantial portions of the
1303b705cfSriastradh * Software.
1403b705cfSriastradh *
1503b705cfSriastradh * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1603b705cfSriastradh * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1703b705cfSriastradh * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1803b705cfSriastradh * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
1903b705cfSriastradh * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
2003b705cfSriastradh * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
2103b705cfSriastradh * SOFTWARE.
2203b705cfSriastradh *
2303b705cfSriastradh * Authors:
2403b705cfSriastradh *    Chris Wilson <chris@chris-wilson.co.uk>
2503b705cfSriastradh *
2603b705cfSriastradh */
2703b705cfSriastradh
2803b705cfSriastradh#ifdef HAVE_CONFIG_H
2903b705cfSriastradh#include "config.h"
3003b705cfSriastradh#endif
3103b705cfSriastradh
3203b705cfSriastradh#include "sna.h"
3303b705cfSriastradh#include "sna_reg.h"
3403b705cfSriastradh
3503b705cfSriastradh#include <unistd.h>
3603b705cfSriastradh#include <sys/ioctl.h>
3703b705cfSriastradh#include <sys/mman.h>
389a906b70Schristos#include <sys/stat.h>
3903b705cfSriastradh#include <time.h>
409a906b70Schristos#include <sched.h>
4103b705cfSriastradh#include <errno.h>
4203b705cfSriastradh#include <fcntl.h>
4303b705cfSriastradh
4403b705cfSriastradh#include <xf86drm.h>
4503b705cfSriastradh
4603b705cfSriastradh#ifdef HAVE_VALGRIND
4703b705cfSriastradh#include <valgrind.h>
4803b705cfSriastradh#include <memcheck.h>
4903b705cfSriastradh#endif
5003b705cfSriastradh
5103b705cfSriastradh#ifdef HAVE_STRUCT_SYSINFO_TOTALRAM
5203b705cfSriastradh#include <sys/sysinfo.h>
5303b705cfSriastradh#endif
5403b705cfSriastradh
5503b705cfSriastradh#include "sna_cpuid.h"
5603b705cfSriastradh
5703b705cfSriastradhstatic struct kgem_bo *
5803b705cfSriastradhsearch_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
5903b705cfSriastradh
6003b705cfSriastradhstatic struct kgem_bo *
6103b705cfSriastradhsearch_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
6203b705cfSriastradh
6303b705cfSriastradh#define DBG_NO_HW 0
649a906b70Schristos#define DBG_NO_EXEC 0
6503b705cfSriastradh#define DBG_NO_TILING 0
6603b705cfSriastradh#define DBG_NO_CACHE 0
679a906b70Schristos#define DBG_NO_SNOOP_CACHE 0
6803b705cfSriastradh#define DBG_NO_CACHE_LEVEL 0
6903b705cfSriastradh#define DBG_NO_CPU 0
7003b705cfSriastradh#define DBG_NO_CREATE2 0
7103b705cfSriastradh#define DBG_NO_USERPTR 0
7203b705cfSriastradh#define DBG_NO_UNSYNCHRONIZED_USERPTR 0
7303b705cfSriastradh#define DBG_NO_LLC 0
7403b705cfSriastradh#define DBG_NO_SEMAPHORES 0
7503b705cfSriastradh#define DBG_NO_MADV 0
7603b705cfSriastradh#define DBG_NO_UPLOAD_CACHE 0
7703b705cfSriastradh#define DBG_NO_UPLOAD_ACTIVE 0
7803b705cfSriastradh#define DBG_NO_MAP_UPLOAD 0
7903b705cfSriastradh#define DBG_NO_RELAXED_FENCING 0
8003b705cfSriastradh#define DBG_NO_SECURE_BATCHES 0
8103b705cfSriastradh#define DBG_NO_PINNED_BATCHES 0
82813957e3Ssnj#define DBG_NO_SHRINK_BATCHES 0
8303b705cfSriastradh#define DBG_NO_FAST_RELOC 0
8403b705cfSriastradh#define DBG_NO_HANDLE_LUT 0
8503b705cfSriastradh#define DBG_NO_WT 0
86813957e3Ssnj#define DBG_NO_WC_MMAP 0
8703b705cfSriastradh#define DBG_DUMP 0
889a906b70Schristos#define DBG_NO_MALLOC_CACHE 0
8903b705cfSriastradh
9003b705cfSriastradh#define FORCE_MMAP_SYNC 0 /* ((1 << DOMAIN_CPU) | (1 << DOMAIN_GTT)) */
9103b705cfSriastradh
9203b705cfSriastradh#ifndef DEBUG_SYNC
9303b705cfSriastradh#define DEBUG_SYNC 0
9403b705cfSriastradh#endif
9503b705cfSriastradh
969a906b70Schristos#define SHOW_BATCH_BEFORE 0
979a906b70Schristos#define SHOW_BATCH_AFTER 0
9803b705cfSriastradh
99813957e3Ssnj#if !USE_WC_MMAP
100813957e3Ssnj#undef DBG_NO_WC_MMAP
101813957e3Ssnj#define DBG_NO_WC_MMAP 1
102813957e3Ssnj#endif
103813957e3Ssnj
10403b705cfSriastradh#if 0
10503b705cfSriastradh#define ASSERT_IDLE(kgem__, handle__) assert(!__kgem_busy(kgem__, handle__))
10603b705cfSriastradh#define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__) assert(!(expect__) || !__kgem_busy(kgem__, handle__))
10703b705cfSriastradh#else
10803b705cfSriastradh#define ASSERT_IDLE(kgem__, handle__)
10903b705cfSriastradh#define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__)
11003b705cfSriastradh#endif
11103b705cfSriastradh
11203b705cfSriastradh/* Worst case seems to be 965gm where we cannot write within a cacheline that
11303b705cfSriastradh * is being simultaneously being read by the GPU, or within the sampler
11403b705cfSriastradh * prefetch. In general, the chipsets seem to have a requirement that sampler
11503b705cfSriastradh * offsets be aligned to a cacheline (64 bytes).
1169a906b70Schristos *
1179a906b70Schristos * Actually, it turns out the BLT color pattern (BR15) has the most severe
1189a906b70Schristos * alignment restrictions, 64 bytes for 8-bpp, 128 bytes for 16-bpp and 256
1199a906b70Schristos * bytes for 32-bpp.
12003b705cfSriastradh */
1219a906b70Schristos#define UPLOAD_ALIGNMENT 256
12203b705cfSriastradh
12303b705cfSriastradh#define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE)
12403b705cfSriastradh#define NUM_PAGES(x) (((x) + PAGE_SIZE-1) / PAGE_SIZE)
12503b705cfSriastradh
12603b705cfSriastradh#define MAX_GTT_VMA_CACHE 512
12703b705cfSriastradh#define MAX_CPU_VMA_CACHE INT16_MAX
12803b705cfSriastradh#define MAP_PRESERVE_TIME 10
12903b705cfSriastradh
1309a906b70Schristos#define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1))
1319a906b70Schristos#define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 1)
13203b705cfSriastradh
13303b705cfSriastradh#define LOCAL_I915_PARAM_HAS_BLT		11
13403b705cfSriastradh#define LOCAL_I915_PARAM_HAS_RELAXED_FENCING	12
13503b705cfSriastradh#define LOCAL_I915_PARAM_HAS_RELAXED_DELTA	15
136813957e3Ssnj#define LOCAL_I915_PARAM_HAS_LLC		17
13703b705cfSriastradh#define LOCAL_I915_PARAM_HAS_SEMAPHORES		20
13803b705cfSriastradh#define LOCAL_I915_PARAM_HAS_SECURE_BATCHES	23
13903b705cfSriastradh#define LOCAL_I915_PARAM_HAS_PINNED_BATCHES	24
14003b705cfSriastradh#define LOCAL_I915_PARAM_HAS_NO_RELOC		25
14103b705cfSriastradh#define LOCAL_I915_PARAM_HAS_HANDLE_LUT		26
14203b705cfSriastradh#define LOCAL_I915_PARAM_HAS_WT			27
143813957e3Ssnj#define LOCAL_I915_PARAM_MMAP_VERSION		30
14403b705cfSriastradh
14503b705cfSriastradh#define LOCAL_I915_EXEC_IS_PINNED		(1<<10)
14603b705cfSriastradh#define LOCAL_I915_EXEC_NO_RELOC		(1<<11)
14703b705cfSriastradh#define LOCAL_I915_EXEC_HANDLE_LUT		(1<<12)
14803b705cfSriastradh
1499a906b70Schristos#define LOCAL_I915_GEM_CREATE2       0x34
15003b705cfSriastradh#define LOCAL_IOCTL_I915_GEM_CREATE2 DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_CREATE2, struct local_i915_gem_create2)
15103b705cfSriastradhstruct local_i915_gem_create2 {
15203b705cfSriastradh	uint64_t size;
15303b705cfSriastradh	uint32_t placement;
15403b705cfSriastradh#define LOCAL_I915_CREATE_PLACEMENT_SYSTEM 0
15503b705cfSriastradh#define LOCAL_I915_CREATE_PLACEMENT_STOLEN 1 /* Cannot use CPU mmaps or pread/pwrite */
15603b705cfSriastradh	uint32_t domain;
15703b705cfSriastradh	uint32_t caching;
15803b705cfSriastradh	uint32_t tiling_mode;
15903b705cfSriastradh	uint32_t stride;
16003b705cfSriastradh	uint32_t flags;
16103b705cfSriastradh	uint32_t pad;
16203b705cfSriastradh	uint32_t handle;
16303b705cfSriastradh};
16403b705cfSriastradh
16503b705cfSriastradh#define LOCAL_I915_GEM_USERPTR       0x33
16603b705cfSriastradh#define LOCAL_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_USERPTR, struct local_i915_gem_userptr)
16703b705cfSriastradhstruct local_i915_gem_userptr {
16803b705cfSriastradh	uint64_t user_ptr;
16903b705cfSriastradh	uint64_t user_size;
17003b705cfSriastradh	uint32_t flags;
1719a906b70Schristos#define I915_USERPTR_READ_ONLY		0x1
1729a906b70Schristos#define I915_USERPTR_UNSYNCHRONIZED	0x80000000
17303b705cfSriastradh	uint32_t handle;
17403b705cfSriastradh};
17503b705cfSriastradh
17603b705cfSriastradh#define UNCACHED	0
17703b705cfSriastradh#define SNOOPED		1
17803b705cfSriastradh#define DISPLAY		2
17903b705cfSriastradh
18003b705cfSriastradhstruct local_i915_gem_caching {
18103b705cfSriastradh	uint32_t handle;
18203b705cfSriastradh	uint32_t caching;
18303b705cfSriastradh};
18403b705cfSriastradh
18503b705cfSriastradh#define LOCAL_I915_GEM_SET_CACHING	0x2f
1869a906b70Schristos#define LOCAL_I915_GEM_GET_CACHING	0x30
18703b705cfSriastradh#define LOCAL_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + LOCAL_I915_GEM_SET_CACHING, struct local_i915_gem_caching)
1889a906b70Schristos#define LOCAL_IOCTL_I915_GEM_GET_CACHING DRM_IOW(DRM_COMMAND_BASE + LOCAL_I915_GEM_GET_CACHING, struct local_i915_gem_caching)
18903b705cfSriastradh
190813957e3Ssnjstruct local_i915_gem_mmap2 {
191813957e3Ssnj	uint32_t handle;
192813957e3Ssnj	uint32_t pad;
193813957e3Ssnj	uint64_t offset;
194813957e3Ssnj	uint64_t size;
195813957e3Ssnj	uint64_t addr_ptr;
196813957e3Ssnj	uint64_t flags;
197813957e3Ssnj#define I915_MMAP_WC 0x1
198813957e3Ssnj};
199813957e3Ssnj#define LOCAL_IOCTL_I915_GEM_MMAP_v2 DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct local_i915_gem_mmap2)
200813957e3Ssnj
20103b705cfSriastradhstruct kgem_buffer {
20203b705cfSriastradh	struct kgem_bo base;
20303b705cfSriastradh	void *mem;
20403b705cfSriastradh	uint32_t used;
20503b705cfSriastradh	uint32_t need_io : 1;
20603b705cfSriastradh	uint32_t write : 2;
2079a906b70Schristos	uint32_t mmapped : 2;
2089a906b70Schristos};
2099a906b70Schristosenum {
2109a906b70Schristos	MMAPPED_NONE,
2119a906b70Schristos	MMAPPED_GTT,
2129a906b70Schristos	MMAPPED_CPU
21303b705cfSriastradh};
21403b705cfSriastradh
21503b705cfSriastradhstatic struct kgem_bo *__kgem_freed_bo;
21603b705cfSriastradhstatic struct kgem_request *__kgem_freed_request;
21703b705cfSriastradhstatic struct drm_i915_gem_exec_object2 _kgem_dummy_exec;
21803b705cfSriastradh
21903b705cfSriastradhstatic inline int bytes(struct kgem_bo *bo)
22003b705cfSriastradh{
22103b705cfSriastradh	return __kgem_bo_size(bo);
22203b705cfSriastradh}
22303b705cfSriastradh
22403b705cfSriastradh#define bucket(B) (B)->size.pages.bucket
22503b705cfSriastradh#define num_pages(B) (B)->size.pages.count
22603b705cfSriastradh
2279a906b70Schristosstatic int do_ioctl(int fd, unsigned long req, void *arg)
2289a906b70Schristos{
2299a906b70Schristos	int err;
2309a906b70Schristos
2319a906b70Schristosrestart:
2329a906b70Schristos	if (ioctl(fd, req, arg) == 0)
2339a906b70Schristos		return 0;
2349a906b70Schristos
2359a906b70Schristos	err = errno;
2369a906b70Schristos
2379a906b70Schristos	if (err == EINTR)
2389a906b70Schristos		goto restart;
2399a906b70Schristos
2409a906b70Schristos	if (err == EAGAIN) {
2419a906b70Schristos		sched_yield();
2429a906b70Schristos		goto restart;
2439a906b70Schristos	}
2449a906b70Schristos
2459a906b70Schristos	return -err;
2469a906b70Schristos}
2479a906b70Schristos
24803b705cfSriastradh#ifdef DEBUG_MEMORY
24903b705cfSriastradhstatic void debug_alloc(struct kgem *kgem, size_t size)
25003b705cfSriastradh{
25103b705cfSriastradh	kgem->debug_memory.bo_allocs++;
25203b705cfSriastradh	kgem->debug_memory.bo_bytes += size;
25303b705cfSriastradh}
25403b705cfSriastradhstatic void debug_alloc__bo(struct kgem *kgem, struct kgem_bo *bo)
25503b705cfSriastradh{
25603b705cfSriastradh	debug_alloc(kgem, bytes(bo));
25703b705cfSriastradh}
25803b705cfSriastradh#else
25903b705cfSriastradh#define debug_alloc__bo(k, b)
26003b705cfSriastradh#endif
26103b705cfSriastradh
26203b705cfSriastradh#ifndef NDEBUG
26303b705cfSriastradhstatic void assert_tiling(struct kgem *kgem, struct kgem_bo *bo)
26403b705cfSriastradh{
26503b705cfSriastradh	struct drm_i915_gem_get_tiling tiling;
26603b705cfSriastradh
26703b705cfSriastradh	assert(bo);
26803b705cfSriastradh
26903b705cfSriastradh	VG_CLEAR(tiling);
27003b705cfSriastradh	tiling.handle = bo->handle;
2719a906b70Schristos	tiling.tiling_mode = bo->tiling;
2729a906b70Schristos	(void)do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &tiling);
27303b705cfSriastradh	assert(tiling.tiling_mode == bo->tiling);
27403b705cfSriastradh}
2759a906b70Schristos
2769a906b70Schristosstatic void assert_cacheing(struct kgem *kgem, struct kgem_bo *bo)
2779a906b70Schristos{
2789a906b70Schristos	struct local_i915_gem_caching arg;
2799a906b70Schristos	int expect = kgem->has_llc ? SNOOPED : UNCACHED;
2809a906b70Schristos
2819a906b70Schristos	VG_CLEAR(arg);
2829a906b70Schristos	arg.handle = bo->handle;
2839a906b70Schristos	arg.caching = expect;
2849a906b70Schristos
2859a906b70Schristos	(void)do_ioctl(kgem->fd, LOCAL_IOCTL_I915_GEM_GET_CACHING, &arg);
2869a906b70Schristos
2879a906b70Schristos	assert(arg.caching == expect);
2889a906b70Schristos}
2899a906b70Schristos
2909a906b70Schristosstatic void assert_bo_retired(struct kgem_bo *bo)
2919a906b70Schristos{
2929a906b70Schristos	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
2939a906b70Schristos	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
2949a906b70Schristos	assert(bo->refcnt);
2959a906b70Schristos	assert(bo->rq == NULL);
2969a906b70Schristos	assert(bo->exec == NULL);
2979a906b70Schristos	assert(list_is_empty(&bo->request));
2989a906b70Schristos}
29903b705cfSriastradh#else
30003b705cfSriastradh#define assert_tiling(kgem, bo)
3019a906b70Schristos#define assert_cacheing(kgem, bo)
3029a906b70Schristos#define assert_bo_retired(bo)
30303b705cfSriastradh#endif
30403b705cfSriastradh
305813957e3Ssnjstatic void
306813957e3Ssnj__kgem_set_wedged(struct kgem *kgem)
307813957e3Ssnj{
308813957e3Ssnj	kgem->wedged = true;
309813957e3Ssnj	sna_render_mark_wedged(container_of(kgem, struct sna, kgem));
310813957e3Ssnj}
311813957e3Ssnj
31203b705cfSriastradhstatic void kgem_sna_reset(struct kgem *kgem)
31303b705cfSriastradh{
31403b705cfSriastradh	struct sna *sna = container_of(kgem, struct sna, kgem);
31503b705cfSriastradh
31603b705cfSriastradh	sna->render.reset(sna);
31703b705cfSriastradh	sna->blt_state.fill_bo = 0;
31803b705cfSriastradh}
31903b705cfSriastradh
32003b705cfSriastradhstatic void kgem_sna_flush(struct kgem *kgem)
32103b705cfSriastradh{
32203b705cfSriastradh	struct sna *sna = container_of(kgem, struct sna, kgem);
32303b705cfSriastradh
32403b705cfSriastradh	sna->render.flush(sna);
32503b705cfSriastradh
32603b705cfSriastradh	if (sna->render.solid_cache.dirty)
32703b705cfSriastradh		sna_render_flush_solid(sna);
32803b705cfSriastradh}
32903b705cfSriastradh
33003b705cfSriastradhstatic bool gem_set_tiling(int fd, uint32_t handle, int tiling, int stride)
33103b705cfSriastradh{
33203b705cfSriastradh	struct drm_i915_gem_set_tiling set_tiling;
3339a906b70Schristos	int err;
33403b705cfSriastradh
33503b705cfSriastradh	if (DBG_NO_TILING)
33603b705cfSriastradh		return false;
33703b705cfSriastradh
33803b705cfSriastradh	VG_CLEAR(set_tiling);
3399a906b70Schristosrestart:
3409a906b70Schristos	set_tiling.handle = handle;
3419a906b70Schristos	set_tiling.tiling_mode = tiling;
3429a906b70Schristos	set_tiling.stride = stride;
34303b705cfSriastradh
3449a906b70Schristos	if (ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling) == 0)
3459a906b70Schristos		return true;
3469a906b70Schristos
3479a906b70Schristos	err = errno;
3489a906b70Schristos	if (err == EINTR)
3499a906b70Schristos		goto restart;
3509a906b70Schristos
3519a906b70Schristos	if (err == EAGAIN) {
3529a906b70Schristos		sched_yield();
3539a906b70Schristos		goto restart;
3549a906b70Schristos	}
3559a906b70Schristos
3569a906b70Schristos	return false;
35703b705cfSriastradh}
35803b705cfSriastradh
35903b705cfSriastradhstatic bool gem_set_caching(int fd, uint32_t handle, int caching)
36003b705cfSriastradh{
36103b705cfSriastradh	struct local_i915_gem_caching arg;
36203b705cfSriastradh
36303b705cfSriastradh	VG_CLEAR(arg);
36403b705cfSriastradh	arg.handle = handle;
36503b705cfSriastradh	arg.caching = caching;
3669a906b70Schristos	return do_ioctl(fd, LOCAL_IOCTL_I915_GEM_SET_CACHING, &arg) == 0;
36703b705cfSriastradh}
36803b705cfSriastradh
36903b705cfSriastradhstatic uint32_t gem_userptr(int fd, void *ptr, int size, int read_only)
37003b705cfSriastradh{
37103b705cfSriastradh	struct local_i915_gem_userptr arg;
37203b705cfSriastradh
37303b705cfSriastradh	VG_CLEAR(arg);
37403b705cfSriastradh	arg.user_ptr = (uintptr_t)ptr;
37503b705cfSriastradh	arg.user_size = size;
37603b705cfSriastradh	arg.flags = I915_USERPTR_UNSYNCHRONIZED;
37703b705cfSriastradh	if (read_only)
37803b705cfSriastradh		arg.flags |= I915_USERPTR_READ_ONLY;
37903b705cfSriastradh
38003b705cfSriastradh	if (DBG_NO_UNSYNCHRONIZED_USERPTR ||
3819a906b70Schristos	    do_ioctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &arg)) {
38203b705cfSriastradh		arg.flags &= ~I915_USERPTR_UNSYNCHRONIZED;
3839a906b70Schristos		if (do_ioctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &arg)) {
38403b705cfSriastradh			DBG(("%s: failed to map %p + %d bytes: %d\n",
38503b705cfSriastradh			     __FUNCTION__, ptr, size, errno));
38603b705cfSriastradh			return 0;
38703b705cfSriastradh		}
38803b705cfSriastradh	}
38903b705cfSriastradh
39003b705cfSriastradh	return arg.handle;
39103b705cfSriastradh}
39203b705cfSriastradh
3939a906b70Schristosstatic bool __kgem_throttle(struct kgem *kgem, bool harder)
39403b705cfSriastradh{
3959a906b70Schristos	/* Let this be woken up by sigtimer so that we don't block here
3969a906b70Schristos	 * too much and completely starve X. We will sleep again shortly,
3979a906b70Schristos	 * and so catch up or detect the hang.
3989a906b70Schristos	 */
3999a906b70Schristos	do {
4009a906b70Schristos		if (ioctl(kgem->fd, DRM_IOCTL_I915_GEM_THROTTLE) == 0) {
4019a906b70Schristos			kgem->need_throttle = 0;
4029a906b70Schristos			return false;
4039a906b70Schristos		}
40403b705cfSriastradh
4059a906b70Schristos		if (errno == EIO)
4069a906b70Schristos			return true;
4079a906b70Schristos	} while (harder);
4089a906b70Schristos
4099a906b70Schristos	return false;
4109a906b70Schristos}
4119a906b70Schristos
4129a906b70Schristosstatic bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags)
4139a906b70Schristos{
4149a906b70Schristos	if (flags & CREATE_NO_RETIRE || !kgem->need_retire) {
4159a906b70Schristos		DBG(("%s: not retiring\n", __FUNCTION__));
41603b705cfSriastradh		return false;
41703b705cfSriastradh	}
41803b705cfSriastradh
41903b705cfSriastradh	if (kgem_retire(kgem))
42003b705cfSriastradh		return true;
42103b705cfSriastradh
42203b705cfSriastradh	if (flags & CREATE_NO_THROTTLE || !kgem->need_throttle) {
42303b705cfSriastradh		DBG(("%s: not throttling\n", __FUNCTION__));
42403b705cfSriastradh		return false;
42503b705cfSriastradh	}
42603b705cfSriastradh
4279a906b70Schristos	__kgem_throttle(kgem, false);
42803b705cfSriastradh	return kgem_retire(kgem);
42903b705cfSriastradh}
43003b705cfSriastradh
43103b705cfSriastradhstatic void *__kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
43203b705cfSriastradh{
433813957e3Ssnj	struct drm_i915_gem_mmap_gtt gtt;
43403b705cfSriastradh	void *ptr;
4353d02bce8Sriastradh	int err;
43603b705cfSriastradh
43703b705cfSriastradh	DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__,
43803b705cfSriastradh	     bo->handle, bytes(bo)));
43903b705cfSriastradh
440813957e3Ssnj	VG_CLEAR(gtt);
44103b705cfSriastradhretry_gtt:
442813957e3Ssnj	gtt.handle = bo->handle;
4439a906b70Schristos#ifdef __NetBSD__
444813957e3Ssnj	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &gtt)) {
4453d02bce8Sriastradh		err = errno;
4469a906b70Schristos#else
447813957e3Ssnj	if ((err = do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &gtt))) {
4489a906b70Schristos#endif
44903b705cfSriastradh		assert(err != EINVAL);
45003b705cfSriastradh
45103b705cfSriastradh		(void)__kgem_throttle_retire(kgem, 0);
45203b705cfSriastradh		if (kgem_expire_cache(kgem))
45303b705cfSriastradh			goto retry_gtt;
45403b705cfSriastradh
4559a906b70Schristos		if (kgem_cleanup_cache(kgem))
45603b705cfSriastradh			goto retry_gtt;
45703b705cfSriastradh
4589a906b70Schristos		ERR(("%s: failed to retrieve GTT offset for handle=%d: %d\n",
4599a906b70Schristos		     __FUNCTION__, bo->handle, -err));
46003b705cfSriastradh		return NULL;
46103b705cfSriastradh	}
46203b705cfSriastradh
46303b705cfSriastradhretry_mmap:
4649a906b70Schristos#ifdef __NetBSD__
465813957e3Ssnj	err = -drmMap(kgem->fd, gtt.offset, bytes(bo), &ptr);
4663d02bce8Sriastradh	if (err) {
4679a906b70Schristos#else
4689a906b70Schristos	ptr = mmap(0, bytes(bo), PROT_READ | PROT_WRITE, MAP_SHARED,
469813957e3Ssnj		   kgem->fd, gtt.offset);
4709a906b70Schristos	if (ptr == MAP_FAILED) {
4719a906b70Schristos		err = errno;
4729a906b70Schristos#endif
47303b705cfSriastradh		assert(err != EINVAL);
47403b705cfSriastradh
47503b705cfSriastradh		if (__kgem_throttle_retire(kgem, 0))
47603b705cfSriastradh			goto retry_mmap;
47703b705cfSriastradh
4789a906b70Schristos		if (kgem_cleanup_cache(kgem))
47903b705cfSriastradh			goto retry_mmap;
48003b705cfSriastradh
4819a906b70Schristos		ERR(("%s: failed to mmap handle=%d, %d bytes, into GTT domain: %d\n",
4829a906b70Schristos		     __FUNCTION__, bo->handle, bytes(bo), err));
48303b705cfSriastradh		ptr = NULL;
48403b705cfSriastradh	}
48503b705cfSriastradh
486813957e3Ssnj	/* Cache this mapping to avoid the overhead of an
487813957e3Ssnj	 * excruciatingly slow GTT pagefault. This is more an
488813957e3Ssnj	 * issue with compositing managers which need to
489813957e3Ssnj	 * frequently flush CPU damage to their GPU bo.
490813957e3Ssnj	 */
491813957e3Ssnj	return bo->map__gtt = ptr;
492813957e3Ssnj}
493813957e3Ssnj
494813957e3Ssnjstatic void *__kgem_bo_map__wc(struct kgem *kgem, struct kgem_bo *bo)
495813957e3Ssnj{
496813957e3Ssnj	struct local_i915_gem_mmap2 wc;
497813957e3Ssnj	int err;
498813957e3Ssnj
499813957e3Ssnj	DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__,
500813957e3Ssnj	     bo->handle, bytes(bo)));
501813957e3Ssnj	assert(kgem->has_wc_mmap);
502813957e3Ssnj
503813957e3Ssnj	VG_CLEAR(wc);
504813957e3Ssnj
505813957e3Ssnjretry_wc:
506813957e3Ssnj	wc.handle = bo->handle;
507813957e3Ssnj	wc.offset = 0;
508813957e3Ssnj	wc.size = bytes(bo);
509813957e3Ssnj	wc.flags = I915_MMAP_WC;
510813957e3Ssnj	if ((err = do_ioctl(kgem->fd, LOCAL_IOCTL_I915_GEM_MMAP_v2, &wc))) {
511813957e3Ssnj		assert(err != EINVAL);
512813957e3Ssnj
513813957e3Ssnj		if (__kgem_throttle_retire(kgem, 0))
514813957e3Ssnj			goto retry_wc;
515813957e3Ssnj
516813957e3Ssnj		if (kgem_cleanup_cache(kgem))
517813957e3Ssnj			goto retry_wc;
518813957e3Ssnj
519813957e3Ssnj		ERR(("%s: failed to mmap handle=%d, %d bytes, into CPU(wc) domain: %d\n",
520813957e3Ssnj		     __FUNCTION__, bo->handle, bytes(bo), -err));
521813957e3Ssnj		return NULL;
522813957e3Ssnj	}
523813957e3Ssnj
524813957e3Ssnj	VG(VALGRIND_MAKE_MEM_DEFINED(wc.addr_ptr, bytes(bo)));
525813957e3Ssnj
526813957e3Ssnj	DBG(("%s: caching CPU(wc) vma for %d\n", __FUNCTION__, bo->handle));
527813957e3Ssnj	return bo->map__wc = (void *)(uintptr_t)wc.addr_ptr;
528813957e3Ssnj}
529813957e3Ssnj
530813957e3Ssnjstatic void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
531813957e3Ssnj{
532813957e3Ssnj	struct drm_i915_gem_mmap mmap_arg;
533813957e3Ssnj	int err;
534813957e3Ssnj
535813957e3Ssnjretry:
536813957e3Ssnj	VG_CLEAR(mmap_arg);
537813957e3Ssnj	mmap_arg.handle = bo->handle;
538813957e3Ssnj	mmap_arg.offset = 0;
539813957e3Ssnj	mmap_arg.size = bytes(bo);
540813957e3Ssnj	if ((err = do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg))) {
541813957e3Ssnj		assert(err != EINVAL);
542813957e3Ssnj
543813957e3Ssnj		if (__kgem_throttle_retire(kgem, 0))
544813957e3Ssnj			goto retry;
545813957e3Ssnj
546813957e3Ssnj		if (kgem_cleanup_cache(kgem))
547813957e3Ssnj			goto retry;
548813957e3Ssnj
549813957e3Ssnj		ERR(("%s: failed to mmap handle=%d, %d bytes, into CPU domain: %d\n",
550813957e3Ssnj		     __FUNCTION__, bo->handle, bytes(bo), -err));
551813957e3Ssnj		return NULL;
552813957e3Ssnj	}
553813957e3Ssnj
554813957e3Ssnj	VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo)));
555813957e3Ssnj
556813957e3Ssnj	DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle));
557813957e3Ssnj	return bo->map__cpu = (void *)(uintptr_t)mmap_arg.addr_ptr;
55803b705cfSriastradh}
55903b705cfSriastradh
5609a906b70Schristosstatic int gem_write(int fd, uint32_t handle,
5619a906b70Schristos		     int offset, int length,
5629a906b70Schristos		     const void *src)
56303b705cfSriastradh{
56403b705cfSriastradh	struct drm_i915_gem_pwrite pwrite;
56503b705cfSriastradh
56603b705cfSriastradh	DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__,
56703b705cfSriastradh	     handle, offset, length));
56803b705cfSriastradh
56903b705cfSriastradh	VG_CLEAR(pwrite);
57003b705cfSriastradh	pwrite.handle = handle;
57103b705cfSriastradh	pwrite.offset = offset;
57203b705cfSriastradh	pwrite.size = length;
57303b705cfSriastradh	pwrite.data_ptr = (uintptr_t)src;
5749a906b70Schristos	return do_ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
57503b705cfSriastradh}
57603b705cfSriastradh
5779a906b70Schristosstatic int gem_write__cachealigned(int fd, uint32_t handle,
5789a906b70Schristos				   int offset, int length,
5799a906b70Schristos				   const void *src)
58003b705cfSriastradh{
58103b705cfSriastradh	struct drm_i915_gem_pwrite pwrite;
58203b705cfSriastradh
58303b705cfSriastradh	DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__,
58403b705cfSriastradh	     handle, offset, length));
58503b705cfSriastradh
58603b705cfSriastradh	VG_CLEAR(pwrite);
58703b705cfSriastradh	pwrite.handle = handle;
58803b705cfSriastradh	/* align the transfer to cachelines; fortuitously this is safe! */
58903b705cfSriastradh	if ((offset | length) & 63) {
59003b705cfSriastradh		pwrite.offset = offset & ~63;
59103b705cfSriastradh		pwrite.size = ALIGN(offset+length, 64) - pwrite.offset;
59203b705cfSriastradh		pwrite.data_ptr = (uintptr_t)src + pwrite.offset - offset;
59303b705cfSriastradh	} else {
59403b705cfSriastradh		pwrite.offset = offset;
59503b705cfSriastradh		pwrite.size = length;
59603b705cfSriastradh		pwrite.data_ptr = (uintptr_t)src;
59703b705cfSriastradh	}
5989a906b70Schristos	return do_ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
59903b705cfSriastradh}
60003b705cfSriastradh
60103b705cfSriastradhstatic int gem_read(int fd, uint32_t handle, const void *dst,
60203b705cfSriastradh		    int offset, int length)
60303b705cfSriastradh{
60403b705cfSriastradh	struct drm_i915_gem_pread pread;
60503b705cfSriastradh	int ret;
60603b705cfSriastradh
60703b705cfSriastradh	DBG(("%s(handle=%d, len=%d)\n", __FUNCTION__,
60803b705cfSriastradh	     handle, length));
60903b705cfSriastradh
61003b705cfSriastradh	VG_CLEAR(pread);
61103b705cfSriastradh	pread.handle = handle;
61203b705cfSriastradh	pread.offset = offset;
61303b705cfSriastradh	pread.size = length;
61403b705cfSriastradh	pread.data_ptr = (uintptr_t)dst;
6159a906b70Schristos	ret = do_ioctl(fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
61603b705cfSriastradh	if (ret) {
6179a906b70Schristos		DBG(("%s: failed, errno=%d\n", __FUNCTION__, -ret));
61803b705cfSriastradh		return ret;
61903b705cfSriastradh	}
62003b705cfSriastradh
62103b705cfSriastradh	VG(VALGRIND_MAKE_MEM_DEFINED(dst, length));
62203b705cfSriastradh	return 0;
62303b705cfSriastradh}
62403b705cfSriastradh
62503b705cfSriastradhbool __kgem_busy(struct kgem *kgem, int handle)
62603b705cfSriastradh{
62703b705cfSriastradh	struct drm_i915_gem_busy busy;
62803b705cfSriastradh
62903b705cfSriastradh	VG_CLEAR(busy);
63003b705cfSriastradh	busy.handle = handle;
63103b705cfSriastradh	busy.busy = !kgem->wedged;
6329a906b70Schristos	(void)do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
63303b705cfSriastradh	DBG(("%s: handle=%d, busy=%d, wedged=%d\n",
63403b705cfSriastradh	     __FUNCTION__, handle, busy.busy, kgem->wedged));
63503b705cfSriastradh
63603b705cfSriastradh	return busy.busy;
63703b705cfSriastradh}
63803b705cfSriastradh
63903b705cfSriastradhstatic void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
64003b705cfSriastradh{
64103b705cfSriastradh	DBG(("%s: retiring bo handle=%d (needed flush? %d), rq? %d [busy?=%d]\n",
64203b705cfSriastradh	     __FUNCTION__, bo->handle, bo->needs_flush, bo->rq != NULL,
64303b705cfSriastradh	     __kgem_busy(kgem, bo->handle)));
64403b705cfSriastradh	assert(bo->exec == NULL);
64503b705cfSriastradh	assert(list_is_empty(&bo->vma));
64603b705cfSriastradh
6479a906b70Schristos	if (bo->rq) {
6489a906b70Schristos		__kgem_bo_clear_busy(bo);
6499a906b70Schristos		kgem_retire(kgem);
6509a906b70Schristos		assert_bo_retired(bo);
6519a906b70Schristos	} else {
6529a906b70Schristos		assert(bo->exec == NULL);
6539a906b70Schristos		assert(list_is_empty(&bo->request));
6549a906b70Schristos		assert(!bo->needs_flush);
6559a906b70Schristos		ASSERT_IDLE(kgem, bo->handle);
6569a906b70Schristos	}
6579a906b70Schristos}
6589a906b70Schristos
6599a906b70Schristosstatic void kgem_bo_maybe_retire(struct kgem *kgem, struct kgem_bo *bo)
6609a906b70Schristos{
6619a906b70Schristos	DBG(("%s: retiring bo handle=%d (needed flush? %d), rq? %d [busy?=%d]\n",
6629a906b70Schristos	     __FUNCTION__, bo->handle, bo->needs_flush, bo->rq != NULL,
6639a906b70Schristos	     __kgem_busy(kgem, bo->handle)));
6649a906b70Schristos	assert(bo->exec == NULL);
6659a906b70Schristos	assert(list_is_empty(&bo->vma));
6669a906b70Schristos
66703b705cfSriastradh	if (bo->rq) {
66803b705cfSriastradh		if (!__kgem_busy(kgem, bo->handle)) {
66903b705cfSriastradh			__kgem_bo_clear_busy(bo);
67003b705cfSriastradh			kgem_retire(kgem);
67103b705cfSriastradh		}
67203b705cfSriastradh	} else {
67303b705cfSriastradh		assert(!bo->needs_flush);
67403b705cfSriastradh		ASSERT_IDLE(kgem, bo->handle);
67503b705cfSriastradh	}
67603b705cfSriastradh}
67703b705cfSriastradh
67803b705cfSriastradhbool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
67903b705cfSriastradh		   const void *data, int length)
68003b705cfSriastradh{
681813957e3Ssnj	void *ptr;
6829a906b70Schristos	int err;
6839a906b70Schristos
68403b705cfSriastradh	assert(bo->refcnt);
68503b705cfSriastradh	assert(bo->proxy == NULL);
68603b705cfSriastradh	ASSERT_IDLE(kgem, bo->handle);
68703b705cfSriastradh
68803b705cfSriastradh	assert(length <= bytes(bo));
6899a906b70Schristosretry:
690813957e3Ssnj	ptr = NULL;
691813957e3Ssnj	if (bo->domain == DOMAIN_CPU || (kgem->has_llc && !bo->scanout)) {
692813957e3Ssnj		ptr = bo->map__cpu;
693813957e3Ssnj		if (ptr == NULL)
694813957e3Ssnj			ptr = __kgem_bo_map__cpu(kgem, bo);
695813957e3Ssnj	} else if (kgem->has_wc_mmap) {
696813957e3Ssnj		ptr = bo->map__wc;
697813957e3Ssnj		if (ptr == NULL)
698813957e3Ssnj			ptr = __kgem_bo_map__wc(kgem, bo);
699813957e3Ssnj	}
700813957e3Ssnj	if (ptr) {
701813957e3Ssnj		/* XXX unsynchronized? */
702813957e3Ssnj		memcpy(ptr, data, length);
703813957e3Ssnj		return true;
704813957e3Ssnj	}
705813957e3Ssnj
7069a906b70Schristos	if ((err = gem_write(kgem->fd, bo->handle, 0, length, data))) {
7079a906b70Schristos		assert(err != EINVAL);
7089a906b70Schristos
7099a906b70Schristos		(void)__kgem_throttle_retire(kgem, 0);
7109a906b70Schristos		if (kgem_expire_cache(kgem))
7119a906b70Schristos			goto retry;
7129a906b70Schristos
7139a906b70Schristos		if (kgem_cleanup_cache(kgem))
7149a906b70Schristos			goto retry;
7159a906b70Schristos
7169a906b70Schristos		ERR(("%s: failed to write %d bytes into BO handle=%d: %d\n",
7179a906b70Schristos		     __FUNCTION__, length, bo->handle, -err));
71803b705cfSriastradh		return false;
7199a906b70Schristos	}
72003b705cfSriastradh
72103b705cfSriastradh	DBG(("%s: flush=%d, domain=%d\n", __FUNCTION__, bo->flush, bo->domain));
7229a906b70Schristos	if (bo->exec == NULL)
7239a906b70Schristos		kgem_bo_maybe_retire(kgem, bo);
7249a906b70Schristos	bo->domain = DOMAIN_NONE;
72503b705cfSriastradh	bo->gtt_dirty = true;
72603b705cfSriastradh	return true;
72703b705cfSriastradh}
72803b705cfSriastradh
72903b705cfSriastradhstatic uint32_t gem_create(int fd, int num_pages)
73003b705cfSriastradh{
73103b705cfSriastradh	struct drm_i915_gem_create create;
73203b705cfSriastradh
73303b705cfSriastradh	VG_CLEAR(create);
73403b705cfSriastradh	create.handle = 0;
73503b705cfSriastradh	create.size = PAGE_SIZE * num_pages;
7369a906b70Schristos	(void)do_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
73703b705cfSriastradh
73803b705cfSriastradh	return create.handle;
73903b705cfSriastradh}
74003b705cfSriastradh
74103b705cfSriastradhstatic bool
74203b705cfSriastradhkgem_bo_set_purgeable(struct kgem *kgem, struct kgem_bo *bo)
74303b705cfSriastradh{
74403b705cfSriastradh#if DBG_NO_MADV
74503b705cfSriastradh	return true;
74603b705cfSriastradh#else
74703b705cfSriastradh	struct drm_i915_gem_madvise madv;
74803b705cfSriastradh
74903b705cfSriastradh	assert(bo->exec == NULL);
75003b705cfSriastradh	assert(!bo->purged);
75103b705cfSriastradh
75203b705cfSriastradh	VG_CLEAR(madv);
75303b705cfSriastradh	madv.handle = bo->handle;
75403b705cfSriastradh	madv.madv = I915_MADV_DONTNEED;
7559a906b70Schristos	if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) {
75603b705cfSriastradh		bo->purged = 1;
75703b705cfSriastradh		kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU;
75803b705cfSriastradh		return madv.retained;
75903b705cfSriastradh	}
76003b705cfSriastradh
76103b705cfSriastradh	return true;
76203b705cfSriastradh#endif
76303b705cfSriastradh}
76403b705cfSriastradh
76503b705cfSriastradhstatic bool
76603b705cfSriastradhkgem_bo_is_retained(struct kgem *kgem, struct kgem_bo *bo)
76703b705cfSriastradh{
76803b705cfSriastradh#if DBG_NO_MADV
76903b705cfSriastradh	return true;
77003b705cfSriastradh#else
77103b705cfSriastradh	struct drm_i915_gem_madvise madv;
77203b705cfSriastradh
77303b705cfSriastradh	if (!bo->purged)
77403b705cfSriastradh		return true;
77503b705cfSriastradh
77603b705cfSriastradh	VG_CLEAR(madv);
77703b705cfSriastradh	madv.handle = bo->handle;
77803b705cfSriastradh	madv.madv = I915_MADV_DONTNEED;
7799a906b70Schristos	if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0)
78003b705cfSriastradh		return madv.retained;
78103b705cfSriastradh
78203b705cfSriastradh	return false;
78303b705cfSriastradh#endif
78403b705cfSriastradh}
78503b705cfSriastradh
78603b705cfSriastradhstatic bool
78703b705cfSriastradhkgem_bo_clear_purgeable(struct kgem *kgem, struct kgem_bo *bo)
78803b705cfSriastradh{
78903b705cfSriastradh#if DBG_NO_MADV
79003b705cfSriastradh	return true;
79103b705cfSriastradh#else
79203b705cfSriastradh	struct drm_i915_gem_madvise madv;
79303b705cfSriastradh
79403b705cfSriastradh	assert(bo->purged);
79503b705cfSriastradh
79603b705cfSriastradh	VG_CLEAR(madv);
79703b705cfSriastradh	madv.handle = bo->handle;
79803b705cfSriastradh	madv.madv = I915_MADV_WILLNEED;
7999a906b70Schristos	if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) {
80003b705cfSriastradh		bo->purged = !madv.retained;
80103b705cfSriastradh		kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU;
80203b705cfSriastradh		return madv.retained;
80303b705cfSriastradh	}
80403b705cfSriastradh
80503b705cfSriastradh	return false;
80603b705cfSriastradh#endif
80703b705cfSriastradh}
80803b705cfSriastradh
80903b705cfSriastradhstatic void gem_close(int fd, uint32_t handle)
81003b705cfSriastradh{
81103b705cfSriastradh	struct drm_gem_close close;
81203b705cfSriastradh
81303b705cfSriastradh	VG_CLEAR(close);
81403b705cfSriastradh	close.handle = handle;
8159a906b70Schristos	(void)do_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
81603b705cfSriastradh}
81703b705cfSriastradh
81803b705cfSriastradhconstant inline static unsigned long __fls(unsigned long word)
81903b705cfSriastradh{
82003b705cfSriastradh#if defined(__GNUC__) && (defined(__i386__) || defined(__x86__) || defined(__x86_64__))
82103b705cfSriastradh	asm("bsr %1,%0"
82203b705cfSriastradh	    : "=r" (word)
82303b705cfSriastradh	    : "rm" (word));
82403b705cfSriastradh	return word;
82503b705cfSriastradh#else
82603b705cfSriastradh	unsigned int v = 0;
82703b705cfSriastradh
82803b705cfSriastradh	while (word >>= 1)
82903b705cfSriastradh		v++;
83003b705cfSriastradh
83103b705cfSriastradh	return v;
83203b705cfSriastradh#endif
83303b705cfSriastradh}
83403b705cfSriastradh
83503b705cfSriastradhconstant inline static int cache_bucket(int num_pages)
83603b705cfSriastradh{
83703b705cfSriastradh	return __fls(num_pages);
83803b705cfSriastradh}
83903b705cfSriastradh
84003b705cfSriastradhstatic struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo,
84103b705cfSriastradh				      int handle, int num_pages)
84203b705cfSriastradh{
8439a906b70Schristos	DBG(("%s(handle=%d, num_pages=%d)\n", __FUNCTION__, handle, num_pages));
8449a906b70Schristos
84503b705cfSriastradh	assert(num_pages);
84603b705cfSriastradh	memset(bo, 0, sizeof(*bo));
84703b705cfSriastradh
84803b705cfSriastradh	bo->refcnt = 1;
84903b705cfSriastradh	bo->handle = handle;
85003b705cfSriastradh	bo->target_handle = -1;
85103b705cfSriastradh	num_pages(bo) = num_pages;
85203b705cfSriastradh	bucket(bo) = cache_bucket(num_pages);
85303b705cfSriastradh	bo->reusable = true;
85403b705cfSriastradh	bo->domain = DOMAIN_CPU;
85503b705cfSriastradh	list_init(&bo->request);
85603b705cfSriastradh	list_init(&bo->list);
85703b705cfSriastradh	list_init(&bo->vma);
85803b705cfSriastradh
85903b705cfSriastradh	return bo;
86003b705cfSriastradh}
86103b705cfSriastradh
86203b705cfSriastradhstatic struct kgem_bo *__kgem_bo_alloc(int handle, int num_pages)
86303b705cfSriastradh{
86403b705cfSriastradh	struct kgem_bo *bo;
86503b705cfSriastradh
86603b705cfSriastradh	if (__kgem_freed_bo) {
86703b705cfSriastradh		bo = __kgem_freed_bo;
86803b705cfSriastradh		__kgem_freed_bo = *(struct kgem_bo **)bo;
86903b705cfSriastradh	} else {
87003b705cfSriastradh		bo = malloc(sizeof(*bo));
87103b705cfSriastradh		if (bo == NULL)
87203b705cfSriastradh			return NULL;
87303b705cfSriastradh	}
87403b705cfSriastradh
87503b705cfSriastradh	return __kgem_bo_init(bo, handle, num_pages);
87603b705cfSriastradh}
87703b705cfSriastradh
87803b705cfSriastradhstatic struct kgem_request *__kgem_request_alloc(struct kgem *kgem)
87903b705cfSriastradh{
88003b705cfSriastradh	struct kgem_request *rq;
88103b705cfSriastradh
88203b705cfSriastradh	rq = __kgem_freed_request;
88303b705cfSriastradh	if (rq) {
88403b705cfSriastradh		__kgem_freed_request = *(struct kgem_request **)rq;
88503b705cfSriastradh	} else {
88603b705cfSriastradh		rq = malloc(sizeof(*rq));
88703b705cfSriastradh		if (rq == NULL)
88803b705cfSriastradh			rq = &kgem->static_request;
88903b705cfSriastradh	}
89003b705cfSriastradh
89103b705cfSriastradh	list_init(&rq->buffers);
89203b705cfSriastradh	rq->bo = NULL;
89303b705cfSriastradh	rq->ring = 0;
89403b705cfSriastradh
89503b705cfSriastradh	return rq;
89603b705cfSriastradh}
89703b705cfSriastradh
89803b705cfSriastradhstatic void __kgem_request_free(struct kgem_request *rq)
89903b705cfSriastradh{
90003b705cfSriastradh	_list_del(&rq->list);
9019a906b70Schristos	if (DBG_NO_MALLOC_CACHE) {
9029a906b70Schristos		free(rq);
9039a906b70Schristos	} else {
9049a906b70Schristos		*(struct kgem_request **)rq = __kgem_freed_request;
9059a906b70Schristos		__kgem_freed_request = rq;
9069a906b70Schristos	}
90703b705cfSriastradh}
90803b705cfSriastradh
90903b705cfSriastradhstatic struct list *inactive(struct kgem *kgem, int num_pages)
91003b705cfSriastradh{
91103b705cfSriastradh	assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE);
91203b705cfSriastradh	assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS);
91303b705cfSriastradh	return &kgem->inactive[cache_bucket(num_pages)];
91403b705cfSriastradh}
91503b705cfSriastradh
91603b705cfSriastradhstatic struct list *active(struct kgem *kgem, int num_pages, int tiling)
91703b705cfSriastradh{
91803b705cfSriastradh	assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE);
91903b705cfSriastradh	assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS);
92003b705cfSriastradh	return &kgem->active[cache_bucket(num_pages)][tiling];
92103b705cfSriastradh}
92203b705cfSriastradh
92303b705cfSriastradhstatic size_t
92403b705cfSriastradhagp_aperture_size(struct pci_device *dev, unsigned gen)
92503b705cfSriastradh{
92603b705cfSriastradh	/* XXX assume that only future chipsets are unknown and follow
92703b705cfSriastradh	 * the post gen2 PCI layout.
92803b705cfSriastradh	 */
92903b705cfSriastradh	return dev->regions[gen < 030 ? 0 : 2].size;
93003b705cfSriastradh}
93103b705cfSriastradh
93203b705cfSriastradhstatic size_t
93303b705cfSriastradhtotal_ram_size(void)
93403b705cfSriastradh{
93503b705cfSriastradh#ifdef HAVE_STRUCT_SYSINFO_TOTALRAM
93603b705cfSriastradh	struct sysinfo info;
93703b705cfSriastradh	if (sysinfo(&info) == 0)
93803b705cfSriastradh		return info.totalram * info.mem_unit;
93903b705cfSriastradh#endif
94003b705cfSriastradh
94103b705cfSriastradh#ifdef _SC_PHYS_PAGES
94203b705cfSriastradh	 return sysconf(_SC_PHYS_PAGES) * sysconf(_SC_PAGE_SIZE);
94303b705cfSriastradh#endif
94403b705cfSriastradh
94503b705cfSriastradh	return 0;
94603b705cfSriastradh}
94703b705cfSriastradh
94803b705cfSriastradhstatic unsigned
94903b705cfSriastradhcpu_cache_size__cpuid4(void)
95003b705cfSriastradh{
9519a906b70Schristos	/* Deterministic Cache Parameters (Function 04h)":
95203b705cfSriastradh	 *    When EAX is initialized to a value of 4, the CPUID instruction
95303b705cfSriastradh	 *    returns deterministic cache information in the EAX, EBX, ECX
95403b705cfSriastradh	 *    and EDX registers.  This function requires ECX be initialized
95503b705cfSriastradh	 *    with an index which indicates which cache to return information
95603b705cfSriastradh	 *    about. The OS is expected to call this function (CPUID.4) with
95703b705cfSriastradh	 *    ECX = 0, 1, 2, until EAX[4:0] == 0, indicating no more caches.
95803b705cfSriastradh	 *    The order in which the caches are returned is not specified
95903b705cfSriastradh	 *    and may change at Intel's discretion.
96003b705cfSriastradh	 *
96103b705cfSriastradh	 * Calculating the Cache Size in bytes:
96203b705cfSriastradh	 *          = (Ways +1) * (Partitions +1) * (Line Size +1) * (Sets +1)
96303b705cfSriastradh	 */
96403b705cfSriastradh
96503b705cfSriastradh	 unsigned int eax, ebx, ecx, edx;
96603b705cfSriastradh	 unsigned int llc_size = 0;
9679a906b70Schristos	 int cnt;
96803b705cfSriastradh
96903b705cfSriastradh	 if (__get_cpuid_max(BASIC_CPUID, NULL) < 4)
97003b705cfSriastradh		 return 0;
97103b705cfSriastradh
9729a906b70Schristos	 cnt = 0;
97303b705cfSriastradh	 do {
97403b705cfSriastradh		 unsigned associativity, line_partitions, line_size, sets;
97503b705cfSriastradh
97603b705cfSriastradh		 __cpuid_count(4, cnt++, eax, ebx, ecx, edx);
97703b705cfSriastradh
97803b705cfSriastradh		 if ((eax & 0x1f) == 0)
97903b705cfSriastradh			 break;
98003b705cfSriastradh
98103b705cfSriastradh		 associativity = ((ebx >> 22) & 0x3ff) + 1;
98203b705cfSriastradh		 line_partitions = ((ebx >> 12) & 0x3ff) + 1;
98303b705cfSriastradh		 line_size = (ebx & 0xfff) + 1;
98403b705cfSriastradh		 sets = ecx + 1;
98503b705cfSriastradh
98603b705cfSriastradh		 llc_size = associativity * line_partitions * line_size * sets;
98703b705cfSriastradh	 } while (1);
98803b705cfSriastradh
98903b705cfSriastradh	 return llc_size;
99003b705cfSriastradh}
99103b705cfSriastradh
99203b705cfSriastradhstatic unsigned
99303b705cfSriastradhcpu_cache_size(void)
99403b705cfSriastradh{
99503b705cfSriastradh	unsigned size;
99603b705cfSriastradh	FILE *file;
99703b705cfSriastradh
99803b705cfSriastradh	size = cpu_cache_size__cpuid4();
99903b705cfSriastradh	if (size)
100003b705cfSriastradh		return size;
100103b705cfSriastradh
100203b705cfSriastradh	file = fopen("/proc/cpuinfo", "r");
100303b705cfSriastradh	if (file) {
100403b705cfSriastradh		size_t len = 0;
100503b705cfSriastradh		char *line = NULL;
100603b705cfSriastradh		while (getline(&line, &len, file) != -1) {
100703b705cfSriastradh			int kb;
100803b705cfSriastradh			if (sscanf(line, "cache size : %d KB", &kb) == 1) {
100903b705cfSriastradh				/* Paranoid check against gargantuan caches */
101003b705cfSriastradh				if (kb <= 1<<20)
101103b705cfSriastradh					size = kb * 1024;
101203b705cfSriastradh				break;
101303b705cfSriastradh			}
101403b705cfSriastradh		}
101503b705cfSriastradh		free(line);
101603b705cfSriastradh		fclose(file);
101703b705cfSriastradh	}
101803b705cfSriastradh
101903b705cfSriastradh	if (size == 0)
102003b705cfSriastradh		size = 64 * 1024;
102103b705cfSriastradh
102203b705cfSriastradh	return size;
102303b705cfSriastradh}
102403b705cfSriastradh
102503b705cfSriastradhstatic int gem_param(struct kgem *kgem, int name)
102603b705cfSriastradh{
102703b705cfSriastradh	drm_i915_getparam_t gp;
102803b705cfSriastradh	int v = -1; /* No param uses the sign bit, reserve it for errors */
102903b705cfSriastradh
103003b705cfSriastradh	VG_CLEAR(gp);
103103b705cfSriastradh	gp.param = name;
103203b705cfSriastradh	gp.value = &v;
10339a906b70Schristos	if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GETPARAM, &gp))
103403b705cfSriastradh		return -1;
103503b705cfSriastradh
103603b705cfSriastradh	VG(VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v)));
103703b705cfSriastradh	return v;
103803b705cfSriastradh}
103903b705cfSriastradh
104003b705cfSriastradhstatic bool test_has_execbuffer2(struct kgem *kgem)
104103b705cfSriastradh{
104203b705cfSriastradh	struct drm_i915_gem_execbuffer2 execbuf;
104303b705cfSriastradh
104403b705cfSriastradh	memset(&execbuf, 0, sizeof(execbuf));
104503b705cfSriastradh	execbuf.buffer_count = 1;
104603b705cfSriastradh
10479a906b70Schristos	return do_ioctl(kgem->fd,
104803b705cfSriastradh			 DRM_IOCTL_I915_GEM_EXECBUFFER2,
10499a906b70Schristos			 &execbuf) == -EFAULT;
105003b705cfSriastradh}
105103b705cfSriastradh
105203b705cfSriastradhstatic bool test_has_no_reloc(struct kgem *kgem)
105303b705cfSriastradh{
105403b705cfSriastradh	if (DBG_NO_FAST_RELOC)
105503b705cfSriastradh		return false;
105603b705cfSriastradh
105703b705cfSriastradh	return gem_param(kgem, LOCAL_I915_PARAM_HAS_NO_RELOC) > 0;
105803b705cfSriastradh}
105903b705cfSriastradh
106003b705cfSriastradhstatic bool test_has_handle_lut(struct kgem *kgem)
106103b705cfSriastradh{
106203b705cfSriastradh	if (DBG_NO_HANDLE_LUT)
106303b705cfSriastradh		return false;
106403b705cfSriastradh
106503b705cfSriastradh	return gem_param(kgem, LOCAL_I915_PARAM_HAS_HANDLE_LUT) > 0;
106603b705cfSriastradh}
106703b705cfSriastradh
106803b705cfSriastradhstatic bool test_has_wt(struct kgem *kgem)
106903b705cfSriastradh{
107003b705cfSriastradh	if (DBG_NO_WT)
107103b705cfSriastradh		return false;
107203b705cfSriastradh
107303b705cfSriastradh	return gem_param(kgem, LOCAL_I915_PARAM_HAS_WT) > 0;
107403b705cfSriastradh}
107503b705cfSriastradh
107603b705cfSriastradhstatic bool test_has_semaphores_enabled(struct kgem *kgem)
107703b705cfSriastradh{
107803b705cfSriastradh	FILE *file;
107903b705cfSriastradh	bool detected = false;
108003b705cfSriastradh	int ret;
108103b705cfSriastradh
108203b705cfSriastradh	if (DBG_NO_SEMAPHORES)
108303b705cfSriastradh		return false;
108403b705cfSriastradh
108503b705cfSriastradh	ret = gem_param(kgem, LOCAL_I915_PARAM_HAS_SEMAPHORES);
108603b705cfSriastradh	if (ret != -1)
108703b705cfSriastradh		return ret > 0;
108803b705cfSriastradh
108903b705cfSriastradh	file = fopen("/sys/module/i915/parameters/semaphores", "r");
109003b705cfSriastradh	if (file) {
109103b705cfSriastradh		int value;
109203b705cfSriastradh		if (fscanf(file, "%d", &value) == 1)
109303b705cfSriastradh			detected = value != 0;
109403b705cfSriastradh		fclose(file);
109503b705cfSriastradh	}
109603b705cfSriastradh
109703b705cfSriastradh	return detected;
109803b705cfSriastradh}
109903b705cfSriastradh
110003b705cfSriastradhstatic bool is_hw_supported(struct kgem *kgem,
110103b705cfSriastradh			    struct pci_device *dev)
110203b705cfSriastradh{
110303b705cfSriastradh	if (DBG_NO_HW)
110403b705cfSriastradh		return false;
110503b705cfSriastradh
110603b705cfSriastradh	if (!test_has_execbuffer2(kgem))
110703b705cfSriastradh		return false;
110803b705cfSriastradh
110903b705cfSriastradh	if (kgem->gen == (unsigned)-1) /* unknown chipset, assume future gen */
111003b705cfSriastradh		return kgem->has_blt;
111103b705cfSriastradh
111203b705cfSriastradh	/* Although pre-855gm the GMCH is fubar, it works mostly. So
111303b705cfSriastradh	 * let the user decide through "NoAccel" whether or not to risk
111403b705cfSriastradh	 * hw acceleration.
111503b705cfSriastradh	 */
111603b705cfSriastradh
11179a906b70Schristos	if (kgem->gen == 060 && dev && dev->revision < 8) {
111803b705cfSriastradh		/* pre-production SNB with dysfunctional BLT */
111903b705cfSriastradh		return false;
112003b705cfSriastradh	}
112103b705cfSriastradh
112203b705cfSriastradh	if (kgem->gen >= 060) /* Only if the kernel supports the BLT ring */
112303b705cfSriastradh		return kgem->has_blt;
112403b705cfSriastradh
112503b705cfSriastradh	return true;
112603b705cfSriastradh}
112703b705cfSriastradh
112803b705cfSriastradhstatic bool test_has_relaxed_fencing(struct kgem *kgem)
112903b705cfSriastradh{
113003b705cfSriastradh	if (kgem->gen < 040) {
113103b705cfSriastradh		if (DBG_NO_RELAXED_FENCING)
113203b705cfSriastradh			return false;
113303b705cfSriastradh
113403b705cfSriastradh		return gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_FENCING) > 0;
113503b705cfSriastradh	} else
113603b705cfSriastradh		return true;
113703b705cfSriastradh}
113803b705cfSriastradh
113903b705cfSriastradhstatic bool test_has_llc(struct kgem *kgem)
114003b705cfSriastradh{
114103b705cfSriastradh	int has_llc = -1;
114203b705cfSriastradh
114303b705cfSriastradh	if (DBG_NO_LLC)
114403b705cfSriastradh		return false;
114503b705cfSriastradh
1146813957e3Ssnj	has_llc = gem_param(kgem, LOCAL_I915_PARAM_HAS_LLC);
114703b705cfSriastradh	if (has_llc == -1) {
114803b705cfSriastradh		DBG(("%s: no kernel/drm support for HAS_LLC, assuming support for LLC based on GPU generation\n", __FUNCTION__));
114903b705cfSriastradh		has_llc = kgem->gen >= 060;
115003b705cfSriastradh	}
115103b705cfSriastradh
115203b705cfSriastradh	return has_llc;
115303b705cfSriastradh}
115403b705cfSriastradh
1155813957e3Ssnjstatic bool test_has_wc_mmap(struct kgem *kgem)
1156813957e3Ssnj{
1157813957e3Ssnj	struct local_i915_gem_mmap2 wc;
1158813957e3Ssnj	bool ret;
1159813957e3Ssnj
1160813957e3Ssnj	if (DBG_NO_WC_MMAP)
1161813957e3Ssnj		return false;
1162813957e3Ssnj
1163813957e3Ssnj	if (gem_param(kgem, LOCAL_I915_PARAM_MMAP_VERSION) < 1)
1164813957e3Ssnj		return false;
1165813957e3Ssnj
1166813957e3Ssnj	VG_CLEAR(wc);
1167813957e3Ssnj	wc.handle = gem_create(kgem->fd, 1);
1168813957e3Ssnj	wc.offset = 0;
1169813957e3Ssnj	wc.size = 4096;
1170813957e3Ssnj	wc.flags = I915_MMAP_WC;
1171813957e3Ssnj	ret = do_ioctl(kgem->fd, LOCAL_IOCTL_I915_GEM_MMAP_v2, &wc) == 0;
1172813957e3Ssnj	gem_close(kgem->fd, wc.handle);
1173813957e3Ssnj
1174813957e3Ssnj	return ret;
1175813957e3Ssnj}
1176813957e3Ssnj
117703b705cfSriastradhstatic bool test_has_caching(struct kgem *kgem)
117803b705cfSriastradh{
117903b705cfSriastradh	uint32_t handle;
118003b705cfSriastradh	bool ret;
118103b705cfSriastradh
118203b705cfSriastradh	if (DBG_NO_CACHE_LEVEL)
118303b705cfSriastradh		return false;
118403b705cfSriastradh
118503b705cfSriastradh	/* Incoherent blt and sampler hangs the GPU */
118603b705cfSriastradh	if (kgem->gen == 040)
118703b705cfSriastradh		return false;
118803b705cfSriastradh
118903b705cfSriastradh	handle = gem_create(kgem->fd, 1);
119003b705cfSriastradh	if (handle == 0)
119103b705cfSriastradh		return false;
119203b705cfSriastradh
119303b705cfSriastradh	ret = gem_set_caching(kgem->fd, handle, UNCACHED);
119403b705cfSriastradh	gem_close(kgem->fd, handle);
119503b705cfSriastradh	return ret;
119603b705cfSriastradh}
119703b705cfSriastradh
119803b705cfSriastradhstatic bool test_has_userptr(struct kgem *kgem)
119903b705cfSriastradh{
120003b705cfSriastradh	uint32_t handle;
120103b705cfSriastradh	void *ptr;
120203b705cfSriastradh
120303b705cfSriastradh	if (DBG_NO_USERPTR)
120403b705cfSriastradh		return false;
120503b705cfSriastradh
120603b705cfSriastradh	/* Incoherent blt and sampler hangs the GPU */
120703b705cfSriastradh	if (kgem->gen == 040)
120803b705cfSriastradh		return false;
120903b705cfSriastradh
121003b705cfSriastradh	if (posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE))
121103b705cfSriastradh		return false;
121203b705cfSriastradh
121303b705cfSriastradh	handle = gem_userptr(kgem->fd, ptr, PAGE_SIZE, false);
121403b705cfSriastradh	gem_close(kgem->fd, handle);
121503b705cfSriastradh	free(ptr);
121603b705cfSriastradh
121703b705cfSriastradh	return handle != 0;
121803b705cfSriastradh}
121903b705cfSriastradh
122003b705cfSriastradhstatic bool test_has_create2(struct kgem *kgem)
122103b705cfSriastradh{
122203b705cfSriastradh#if defined(USE_CREATE2)
122303b705cfSriastradh	struct local_i915_gem_create2 args;
122403b705cfSriastradh
122503b705cfSriastradh	if (DBG_NO_CREATE2)
122603b705cfSriastradh		return false;
122703b705cfSriastradh
122803b705cfSriastradh	memset(&args, 0, sizeof(args));
122903b705cfSriastradh	args.size = PAGE_SIZE;
123003b705cfSriastradh	args.caching = DISPLAY;
12319a906b70Schristos	if (do_ioctl(kgem->fd, LOCAL_IOCTL_I915_GEM_CREATE2, &args) == 0)
123203b705cfSriastradh		gem_close(kgem->fd, args.handle);
123303b705cfSriastradh
123403b705cfSriastradh	return args.handle != 0;
123503b705cfSriastradh#else
123603b705cfSriastradh	return false;
123703b705cfSriastradh#endif
123803b705cfSriastradh}
123903b705cfSriastradh
124003b705cfSriastradhstatic bool test_has_secure_batches(struct kgem *kgem)
124103b705cfSriastradh{
124203b705cfSriastradh	if (DBG_NO_SECURE_BATCHES)
124303b705cfSriastradh		return false;
124403b705cfSriastradh
124503b705cfSriastradh	return gem_param(kgem, LOCAL_I915_PARAM_HAS_SECURE_BATCHES) > 0;
124603b705cfSriastradh}
124703b705cfSriastradh
124803b705cfSriastradhstatic bool test_has_pinned_batches(struct kgem *kgem)
124903b705cfSriastradh{
125003b705cfSriastradh	if (DBG_NO_PINNED_BATCHES)
125103b705cfSriastradh		return false;
125203b705cfSriastradh
125303b705cfSriastradh	return gem_param(kgem, LOCAL_I915_PARAM_HAS_PINNED_BATCHES) > 0;
125403b705cfSriastradh}
125503b705cfSriastradh
125603b705cfSriastradhstatic int kgem_get_screen_index(struct kgem *kgem)
125703b705cfSriastradh{
125803b705cfSriastradh	struct sna *sna = container_of(kgem, struct sna, kgem);
125903b705cfSriastradh	return sna->scrn->scrnIndex;
126003b705cfSriastradh}
126103b705cfSriastradh
12629a906b70Schristosstatic int __find_debugfs(struct kgem *kgem)
12639a906b70Schristos{
12649a906b70Schristos	int i;
12659a906b70Schristos
12669a906b70Schristos	for (i = 0; i < DRM_MAX_MINOR; i++) {
12679a906b70Schristos		char path[80];
12689a906b70Schristos
12699a906b70Schristos		sprintf(path, "/sys/kernel/debug/dri/%d/i915_wedged", i);
12709a906b70Schristos		if (access(path, R_OK) == 0)
12719a906b70Schristos			return i;
12729a906b70Schristos
12739a906b70Schristos		sprintf(path, "/debug/dri/%d/i915_wedged", i);
12749a906b70Schristos		if (access(path, R_OK) == 0)
12759a906b70Schristos			return i;
12769a906b70Schristos	}
12779a906b70Schristos
12789a906b70Schristos	return -1;
12799a906b70Schristos}
12809a906b70Schristos
12819a906b70Schristosstatic int kgem_get_minor(struct kgem *kgem)
12829a906b70Schristos{
12839a906b70Schristos	struct stat st;
12849a906b70Schristos
12859a906b70Schristos	if (fstat(kgem->fd, &st))
12869a906b70Schristos		return __find_debugfs(kgem);
12879a906b70Schristos
12889a906b70Schristos	if (!S_ISCHR(st.st_mode))
12899a906b70Schristos		return __find_debugfs(kgem);
12909a906b70Schristos
12919a906b70Schristos	return st.st_rdev & 0x63;
12929a906b70Schristos}
12939a906b70Schristos
129403b705cfSriastradhstatic bool kgem_init_pinned_batches(struct kgem *kgem)
129503b705cfSriastradh{
129603b705cfSriastradh	int count[2] = { 16, 4 };
129703b705cfSriastradh	int size[2] = { 1, 4 };
129803b705cfSriastradh	int n, i;
129903b705cfSriastradh
130003b705cfSriastradh	if (kgem->wedged)
130103b705cfSriastradh		return true;
130203b705cfSriastradh
130303b705cfSriastradh	for (n = 0; n < ARRAY_SIZE(count); n++) {
130403b705cfSriastradh		for (i = 0; i < count[n]; i++) {
130503b705cfSriastradh			struct drm_i915_gem_pin pin;
130603b705cfSriastradh			struct kgem_bo *bo;
130703b705cfSriastradh
130803b705cfSriastradh			VG_CLEAR(pin);
130903b705cfSriastradh
131003b705cfSriastradh			pin.handle = gem_create(kgem->fd, size[n]);
131103b705cfSriastradh			if (pin.handle == 0)
131203b705cfSriastradh				goto err;
131303b705cfSriastradh
131403b705cfSriastradh			DBG(("%s: new handle=%d, num_pages=%d\n",
131503b705cfSriastradh			     __FUNCTION__, pin.handle, size[n]));
131603b705cfSriastradh
131703b705cfSriastradh			bo = __kgem_bo_alloc(pin.handle, size[n]);
131803b705cfSriastradh			if (bo == NULL) {
131903b705cfSriastradh				gem_close(kgem->fd, pin.handle);
132003b705cfSriastradh				goto err;
132103b705cfSriastradh			}
132203b705cfSriastradh
132303b705cfSriastradh			pin.alignment = 0;
13249a906b70Schristos			if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_PIN, &pin)) {
132503b705cfSriastradh				gem_close(kgem->fd, pin.handle);
13269a906b70Schristos				free(bo);
132703b705cfSriastradh				goto err;
132803b705cfSriastradh			}
132903b705cfSriastradh			bo->presumed_offset = pin.offset;
133003b705cfSriastradh			debug_alloc__bo(kgem, bo);
133103b705cfSriastradh			list_add(&bo->list, &kgem->pinned_batches[n]);
133203b705cfSriastradh		}
133303b705cfSriastradh	}
133403b705cfSriastradh
133503b705cfSriastradh	return true;
133603b705cfSriastradh
133703b705cfSriastradherr:
133803b705cfSriastradh	for (n = 0; n < ARRAY_SIZE(kgem->pinned_batches); n++) {
133903b705cfSriastradh		while (!list_is_empty(&kgem->pinned_batches[n])) {
134003b705cfSriastradh			kgem_bo_destroy(kgem,
134103b705cfSriastradh					list_first_entry(&kgem->pinned_batches[n],
134203b705cfSriastradh							 struct kgem_bo, list));
134303b705cfSriastradh		}
134403b705cfSriastradh	}
134503b705cfSriastradh
134603b705cfSriastradh	/* For simplicity populate the lists with a single unpinned bo */
134703b705cfSriastradh	for (n = 0; n < ARRAY_SIZE(count); n++) {
134803b705cfSriastradh		struct kgem_bo *bo;
134903b705cfSriastradh		uint32_t handle;
135003b705cfSriastradh
135103b705cfSriastradh		handle = gem_create(kgem->fd, size[n]);
135203b705cfSriastradh		if (handle == 0)
135303b705cfSriastradh			break;
135403b705cfSriastradh
135503b705cfSriastradh		bo = __kgem_bo_alloc(handle, size[n]);
135603b705cfSriastradh		if (bo == NULL) {
135703b705cfSriastradh			gem_close(kgem->fd, handle);
135803b705cfSriastradh			break;
135903b705cfSriastradh		}
136003b705cfSriastradh
136103b705cfSriastradh		debug_alloc__bo(kgem, bo);
136203b705cfSriastradh		list_add(&bo->list, &kgem->pinned_batches[n]);
136303b705cfSriastradh	}
136403b705cfSriastradh	return false;
136503b705cfSriastradh}
136603b705cfSriastradh
136703b705cfSriastradhstatic void kgem_init_swizzling(struct kgem *kgem)
136803b705cfSriastradh{
1369813957e3Ssnj	struct local_i915_gem_get_tiling_v2 {
1370813957e3Ssnj		uint32_t handle;
1371813957e3Ssnj		uint32_t tiling_mode;
1372813957e3Ssnj		uint32_t swizzle_mode;
1373813957e3Ssnj		uint32_t phys_swizzle_mode;
1374813957e3Ssnj	} tiling;
1375813957e3Ssnj#define LOCAL_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct local_i915_gem_get_tiling_v2)
137603b705cfSriastradh
137703b705cfSriastradh	VG_CLEAR(tiling);
137803b705cfSriastradh	tiling.handle = gem_create(kgem->fd, 1);
137903b705cfSriastradh	if (!tiling.handle)
138003b705cfSriastradh		return;
138103b705cfSriastradh
138203b705cfSriastradh	if (!gem_set_tiling(kgem->fd, tiling.handle, I915_TILING_X, 512))
138303b705cfSriastradh		goto out;
138403b705cfSriastradh
1385813957e3Ssnj	if (do_ioctl(kgem->fd, LOCAL_IOCTL_I915_GEM_GET_TILING, &tiling))
1386813957e3Ssnj		goto out;
1387813957e3Ssnj
1388813957e3Ssnj	if (kgem->gen < 50 && tiling.phys_swizzle_mode != tiling.swizzle_mode)
138903b705cfSriastradh		goto out;
139003b705cfSriastradh
139103b705cfSriastradh	choose_memcpy_tiled_x(kgem, tiling.swizzle_mode);
139203b705cfSriastradhout:
139303b705cfSriastradh	gem_close(kgem->fd, tiling.handle);
139403b705cfSriastradh}
139503b705cfSriastradh
1396813957e3Ssnjstatic void kgem_fixup_relocs(struct kgem *kgem, struct kgem_bo *bo, int shrink)
1397813957e3Ssnj{
1398813957e3Ssnj	int n;
1399813957e3Ssnj
1400813957e3Ssnj	bo->target_handle = kgem->has_handle_lut ? kgem->nexec : bo->handle;
1401813957e3Ssnj
1402813957e3Ssnj	assert(kgem->nreloc__self <= 256);
1403813957e3Ssnj	if (kgem->nreloc__self == 0)
1404813957e3Ssnj		return;
1405813957e3Ssnj
1406813957e3Ssnj	DBG(("%s: fixing up %d%s self-relocations to handle=%p, presumed-offset=%llx\n",
1407813957e3Ssnj	     __FUNCTION__, kgem->nreloc__self,
1408813957e3Ssnj	     kgem->nreloc__self == 256 ? "+" : "",
1409813957e3Ssnj	     bo->handle, (long long)bo->presumed_offset));
1410813957e3Ssnj	for (n = 0; n < kgem->nreloc__self; n++) {
1411813957e3Ssnj		int i = kgem->reloc__self[n];
1412813957e3Ssnj
1413813957e3Ssnj		assert(kgem->reloc[i].target_handle == ~0U);
1414813957e3Ssnj		kgem->reloc[i].target_handle = bo->target_handle;
1415813957e3Ssnj		kgem->reloc[i].presumed_offset = bo->presumed_offset;
1416813957e3Ssnj
1417813957e3Ssnj		if (kgem->reloc[i].read_domains == I915_GEM_DOMAIN_INSTRUCTION) {
1418813957e3Ssnj			DBG(("%s: moving base of self-reloc[%d:%d] %d -> %d\n",
1419813957e3Ssnj			     __FUNCTION__, n, i,
1420813957e3Ssnj			     kgem->reloc[i].delta,
1421813957e3Ssnj			     kgem->reloc[i].delta - shrink));
1422813957e3Ssnj
1423813957e3Ssnj			kgem->reloc[i].delta -= shrink;
1424813957e3Ssnj		}
1425813957e3Ssnj		kgem->batch[kgem->reloc[i].offset/sizeof(uint32_t)] =
1426813957e3Ssnj			kgem->reloc[i].delta + bo->presumed_offset;
1427813957e3Ssnj	}
1428813957e3Ssnj
1429813957e3Ssnj	if (n == 256) {
1430813957e3Ssnj		for (n = kgem->reloc__self[255]; n < kgem->nreloc; n++) {
1431813957e3Ssnj			if (kgem->reloc[n].target_handle == ~0U) {
1432813957e3Ssnj				kgem->reloc[n].target_handle = bo->target_handle;
1433813957e3Ssnj				kgem->reloc[n].presumed_offset = bo->presumed_offset;
1434813957e3Ssnj
1435813957e3Ssnj				if (kgem->reloc[n].read_domains == I915_GEM_DOMAIN_INSTRUCTION) {
1436813957e3Ssnj					DBG(("%s: moving base of reloc[%d] %d -> %d\n",
1437813957e3Ssnj					     __FUNCTION__, n,
1438813957e3Ssnj					     kgem->reloc[n].delta,
1439813957e3Ssnj					     kgem->reloc[n].delta - shrink));
1440813957e3Ssnj					kgem->reloc[n].delta -= shrink;
1441813957e3Ssnj				}
1442813957e3Ssnj				kgem->batch[kgem->reloc[n].offset/sizeof(uint32_t)] =
1443813957e3Ssnj					kgem->reloc[n].delta + bo->presumed_offset;
1444813957e3Ssnj			}
1445813957e3Ssnj		}
1446813957e3Ssnj	}
1447813957e3Ssnj
1448813957e3Ssnj	if (shrink) {
1449813957e3Ssnj		DBG(("%s: shrinking by %d\n", __FUNCTION__, shrink));
1450813957e3Ssnj		for (n = 0; n < kgem->nreloc; n++) {
1451813957e3Ssnj			if (kgem->reloc[n].offset >= sizeof(uint32_t)*kgem->nbatch)
1452813957e3Ssnj				kgem->reloc[n].offset -= shrink;
1453813957e3Ssnj		}
1454813957e3Ssnj	}
1455813957e3Ssnj}
1456813957e3Ssnj
1457813957e3Ssnjstatic struct kgem_bo *kgem_new_batch(struct kgem *kgem)
1458813957e3Ssnj{
1459813957e3Ssnj	struct kgem_bo *last;
1460813957e3Ssnj	unsigned flags;
1461813957e3Ssnj
1462813957e3Ssnj	last = kgem->batch_bo;
1463813957e3Ssnj	if (last) {
1464813957e3Ssnj		kgem_fixup_relocs(kgem, last, 0);
1465813957e3Ssnj		kgem->batch = NULL;
1466813957e3Ssnj	}
1467813957e3Ssnj
1468813957e3Ssnj	if (kgem->batch) {
1469813957e3Ssnj		assert(last == NULL);
1470813957e3Ssnj		return NULL;
1471813957e3Ssnj	}
1472813957e3Ssnj
1473813957e3Ssnj	flags = CREATE_CPU_MAP | CREATE_NO_THROTTLE;
1474813957e3Ssnj	if (!kgem->has_llc)
1475813957e3Ssnj		flags |= CREATE_UNCACHED;
1476813957e3Ssnj
1477813957e3Ssnj	kgem->batch_bo = kgem_create_linear(kgem,
1478813957e3Ssnj					    sizeof(uint32_t)*kgem->batch_size,
1479813957e3Ssnj					    flags);
1480813957e3Ssnj	if (kgem->batch_bo)
1481813957e3Ssnj		kgem->batch = kgem_bo_map__cpu(kgem, kgem->batch_bo);
1482813957e3Ssnj	if (kgem->batch == NULL) {
1483813957e3Ssnj		DBG(("%s: unable to map batch bo, mallocing(size=%d)\n",
1484813957e3Ssnj		     __FUNCTION__,
1485813957e3Ssnj		     sizeof(uint32_t)*kgem->batch_size));
1486813957e3Ssnj		if (kgem->batch_bo) {
1487813957e3Ssnj			kgem_bo_destroy(kgem, kgem->batch_bo);
1488813957e3Ssnj			kgem->batch_bo = NULL;
1489813957e3Ssnj		}
1490813957e3Ssnj
1491813957e3Ssnj		if (posix_memalign((void **)&kgem->batch, PAGE_SIZE,
1492813957e3Ssnj				   ALIGN(sizeof(uint32_t) * kgem->batch_size, PAGE_SIZE))) {
1493813957e3Ssnj			ERR(("%s: batch allocation failed, disabling acceleration\n", __FUNCTION__));
1494813957e3Ssnj			__kgem_set_wedged(kgem);
1495813957e3Ssnj		}
1496813957e3Ssnj	} else {
1497813957e3Ssnj		DBG(("%s: allocated and mapped batch handle=%d [size=%d]\n",
1498813957e3Ssnj		     __FUNCTION__, kgem->batch_bo->handle,
1499813957e3Ssnj		     sizeof(uint32_t)*kgem->batch_size));
1500813957e3Ssnj		kgem_bo_sync__cpu(kgem, kgem->batch_bo);
1501813957e3Ssnj	}
1502813957e3Ssnj
1503813957e3Ssnj	DBG(("%s: using last batch handle=%d\n",
1504813957e3Ssnj	     __FUNCTION__, last ? last->handle : 0));
1505813957e3Ssnj	return last;
1506813957e3Ssnj}
150703b705cfSriastradh
150803b705cfSriastradhvoid kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen)
150903b705cfSriastradh{
151003b705cfSriastradh	struct drm_i915_gem_get_aperture aperture;
151103b705cfSriastradh	size_t totalram;
151203b705cfSriastradh	unsigned half_gpu_max;
151303b705cfSriastradh	unsigned int i, j;
151403b705cfSriastradh
151503b705cfSriastradh	DBG(("%s: fd=%d, gen=%d\n", __FUNCTION__, fd, gen));
151603b705cfSriastradh
151703b705cfSriastradh	kgem->fd = fd;
151803b705cfSriastradh	kgem->gen = gen;
151903b705cfSriastradh
152003b705cfSriastradh	list_init(&kgem->requests[0]);
152103b705cfSriastradh	list_init(&kgem->requests[1]);
152203b705cfSriastradh	list_init(&kgem->batch_buffers);
152303b705cfSriastradh	list_init(&kgem->active_buffers);
152403b705cfSriastradh	list_init(&kgem->flushing);
152503b705cfSriastradh	list_init(&kgem->large);
152603b705cfSriastradh	list_init(&kgem->large_inactive);
152703b705cfSriastradh	list_init(&kgem->snoop);
152803b705cfSriastradh	list_init(&kgem->scanout);
152903b705cfSriastradh	for (i = 0; i < ARRAY_SIZE(kgem->pinned_batches); i++)
153003b705cfSriastradh		list_init(&kgem->pinned_batches[i]);
153103b705cfSriastradh	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
153203b705cfSriastradh		list_init(&kgem->inactive[i]);
153303b705cfSriastradh	for (i = 0; i < ARRAY_SIZE(kgem->active); i++) {
153403b705cfSriastradh		for (j = 0; j < ARRAY_SIZE(kgem->active[i]); j++)
153503b705cfSriastradh			list_init(&kgem->active[i][j]);
153603b705cfSriastradh	}
153703b705cfSriastradh	for (i = 0; i < ARRAY_SIZE(kgem->vma); i++) {
153803b705cfSriastradh		for (j = 0; j < ARRAY_SIZE(kgem->vma[i].inactive); j++)
153903b705cfSriastradh			list_init(&kgem->vma[i].inactive[j]);
154003b705cfSriastradh	}
154103b705cfSriastradh	kgem->vma[MAP_GTT].count = -MAX_GTT_VMA_CACHE;
154203b705cfSriastradh	kgem->vma[MAP_CPU].count = -MAX_CPU_VMA_CACHE;
154303b705cfSriastradh
154403b705cfSriastradh	kgem->has_blt = gem_param(kgem, LOCAL_I915_PARAM_HAS_BLT) > 0;
154503b705cfSriastradh	DBG(("%s: has BLT ring? %d\n", __FUNCTION__,
154603b705cfSriastradh	     kgem->has_blt));
154703b705cfSriastradh
154803b705cfSriastradh	kgem->has_relaxed_delta =
154903b705cfSriastradh		gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_DELTA) > 0;
155003b705cfSriastradh	DBG(("%s: has relaxed delta? %d\n", __FUNCTION__,
155103b705cfSriastradh	     kgem->has_relaxed_delta));
155203b705cfSriastradh
155303b705cfSriastradh	kgem->has_relaxed_fencing = test_has_relaxed_fencing(kgem);
155403b705cfSriastradh	DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__,
155503b705cfSriastradh	     kgem->has_relaxed_fencing));
155603b705cfSriastradh
155703b705cfSriastradh	kgem->has_llc = test_has_llc(kgem);
155803b705cfSriastradh	DBG(("%s: has shared last-level-cache? %d\n", __FUNCTION__,
155903b705cfSriastradh	     kgem->has_llc));
156003b705cfSriastradh
156103b705cfSriastradh	kgem->has_wt = test_has_wt(kgem);
156203b705cfSriastradh	DBG(("%s: has write-through caching for scanouts? %d\n", __FUNCTION__,
156303b705cfSriastradh	     kgem->has_wt));
156403b705cfSriastradh
1565813957e3Ssnj	kgem->has_wc_mmap = test_has_wc_mmap(kgem);
1566813957e3Ssnj	DBG(("%s: has wc-mmapping? %d\n", __FUNCTION__,
1567813957e3Ssnj	     kgem->has_wc_mmap));
1568813957e3Ssnj
156903b705cfSriastradh	kgem->has_caching = test_has_caching(kgem);
157003b705cfSriastradh	DBG(("%s: has set-cache-level? %d\n", __FUNCTION__,
157103b705cfSriastradh	     kgem->has_caching));
157203b705cfSriastradh
157303b705cfSriastradh	kgem->has_userptr = test_has_userptr(kgem);
157403b705cfSriastradh	DBG(("%s: has userptr? %d\n", __FUNCTION__,
157503b705cfSriastradh	     kgem->has_userptr));
157603b705cfSriastradh
157703b705cfSriastradh	kgem->has_create2 = test_has_create2(kgem);
157803b705cfSriastradh	DBG(("%s: has create2? %d\n", __FUNCTION__,
157903b705cfSriastradh	     kgem->has_create2));
158003b705cfSriastradh
158103b705cfSriastradh	kgem->has_no_reloc = test_has_no_reloc(kgem);
158203b705cfSriastradh	DBG(("%s: has no-reloc? %d\n", __FUNCTION__,
158303b705cfSriastradh	     kgem->has_no_reloc));
158403b705cfSriastradh
158503b705cfSriastradh	kgem->has_handle_lut = test_has_handle_lut(kgem);
158603b705cfSriastradh	DBG(("%s: has handle-lut? %d\n", __FUNCTION__,
158703b705cfSriastradh	     kgem->has_handle_lut));
158803b705cfSriastradh
158903b705cfSriastradh	kgem->has_semaphores = false;
159003b705cfSriastradh	if (kgem->has_blt && test_has_semaphores_enabled(kgem))
159103b705cfSriastradh		kgem->has_semaphores = true;
159203b705cfSriastradh	DBG(("%s: semaphores enabled? %d\n", __FUNCTION__,
159303b705cfSriastradh	     kgem->has_semaphores));
159403b705cfSriastradh
159503b705cfSriastradh	kgem->can_blt_cpu = gen >= 030;
159603b705cfSriastradh	DBG(("%s: can blt to cpu? %d\n", __FUNCTION__,
159703b705cfSriastradh	     kgem->can_blt_cpu));
159803b705cfSriastradh
15999a906b70Schristos	kgem->can_render_y = gen != 021 && (gen >> 3) != 4;
16009a906b70Schristos	DBG(("%s: can render to Y-tiled surfaces? %d\n", __FUNCTION__,
16019a906b70Schristos	     kgem->can_render_y));
16029a906b70Schristos
160303b705cfSriastradh	kgem->has_secure_batches = test_has_secure_batches(kgem);
160403b705cfSriastradh	DBG(("%s: can use privileged batchbuffers? %d\n", __FUNCTION__,
160503b705cfSriastradh	     kgem->has_secure_batches));
160603b705cfSriastradh
160703b705cfSriastradh	kgem->has_pinned_batches = test_has_pinned_batches(kgem);
160803b705cfSriastradh	DBG(("%s: can use pinned batchbuffers (to avoid CS w/a)? %d\n", __FUNCTION__,
160903b705cfSriastradh	     kgem->has_pinned_batches));
161003b705cfSriastradh
161103b705cfSriastradh	if (!is_hw_supported(kgem, dev)) {
161203b705cfSriastradh		xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
161303b705cfSriastradh			   "Detected unsupported/dysfunctional hardware, disabling acceleration.\n");
1614813957e3Ssnj		__kgem_set_wedged(kgem);
16159a906b70Schristos	} else if (__kgem_throttle(kgem, false)) {
161603b705cfSriastradh		xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
161703b705cfSriastradh			   "Detected a hung GPU, disabling acceleration.\n");
1618813957e3Ssnj		__kgem_set_wedged(kgem);
161903b705cfSriastradh	}
162003b705cfSriastradh
1621813957e3Ssnj	kgem->batch_size = UINT16_MAX & ~7;
162203b705cfSriastradh	if (gen == 020 && !kgem->has_pinned_batches)
162303b705cfSriastradh		/* Limited to what we can pin */
162403b705cfSriastradh		kgem->batch_size = 4*1024;
162503b705cfSriastradh	if (gen == 022)
162603b705cfSriastradh		/* 865g cannot handle a batch spanning multiple pages */
162703b705cfSriastradh		kgem->batch_size = PAGE_SIZE / sizeof(uint32_t);
16289a906b70Schristos	if (gen >= 070)
162903b705cfSriastradh		kgem->batch_size = 16*1024;
163003b705cfSriastradh	if (!kgem->has_relaxed_delta && kgem->batch_size > 4*1024)
163103b705cfSriastradh		kgem->batch_size = 4*1024;
163203b705cfSriastradh
163303b705cfSriastradh	if (!kgem_init_pinned_batches(kgem) && gen == 020) {
163403b705cfSriastradh		xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
163503b705cfSriastradh			   "Unable to reserve memory for GPU, disabling acceleration.\n");
1636813957e3Ssnj		__kgem_set_wedged(kgem);
163703b705cfSriastradh	}
163803b705cfSriastradh
163903b705cfSriastradh	DBG(("%s: maximum batch size? %d\n", __FUNCTION__,
164003b705cfSriastradh	     kgem->batch_size));
1641813957e3Ssnj	kgem_new_batch(kgem);
164203b705cfSriastradh
164303b705cfSriastradh	kgem->half_cpu_cache_pages = cpu_cache_size() >> 13;
164403b705cfSriastradh	DBG(("%s: last-level cache size: %d bytes, threshold in pages: %d\n",
164503b705cfSriastradh	     __FUNCTION__, cpu_cache_size(), kgem->half_cpu_cache_pages));
164603b705cfSriastradh
164703b705cfSriastradh	kgem->next_request = __kgem_request_alloc(kgem);
164803b705cfSriastradh
164903b705cfSriastradh	DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, userptr? %d\n", __FUNCTION__,
165003b705cfSriastradh	     !DBG_NO_CPU && (kgem->has_llc | kgem->has_userptr | kgem->has_caching),
165103b705cfSriastradh	     kgem->has_llc, kgem->has_caching, kgem->has_userptr));
165203b705cfSriastradh
165303b705cfSriastradh	VG_CLEAR(aperture);
165403b705cfSriastradh	aperture.aper_size = 0;
16559a906b70Schristos	(void)do_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
165603b705cfSriastradh	if (aperture.aper_size == 0)
165703b705cfSriastradh		aperture.aper_size = 64*1024*1024;
165803b705cfSriastradh
165903b705cfSriastradh	DBG(("%s: aperture size %lld, available now %lld\n",
166003b705cfSriastradh	     __FUNCTION__,
166103b705cfSriastradh	     (long long)aperture.aper_size,
166203b705cfSriastradh	     (long long)aperture.aper_available_size));
166303b705cfSriastradh
166403b705cfSriastradh	kgem->aperture_total = aperture.aper_size;
166503b705cfSriastradh	kgem->aperture_high = aperture.aper_size * 3/4;
166603b705cfSriastradh	kgem->aperture_low = aperture.aper_size * 1/3;
166703b705cfSriastradh	if (gen < 033) {
166803b705cfSriastradh		/* Severe alignment penalties */
166903b705cfSriastradh		kgem->aperture_high /= 2;
167003b705cfSriastradh		kgem->aperture_low /= 2;
167103b705cfSriastradh	}
167203b705cfSriastradh	DBG(("%s: aperture low=%d [%d], high=%d [%d]\n", __FUNCTION__,
167303b705cfSriastradh	     kgem->aperture_low, kgem->aperture_low / (1024*1024),
167403b705cfSriastradh	     kgem->aperture_high, kgem->aperture_high / (1024*1024)));
167503b705cfSriastradh
16769a906b70Schristos	kgem->aperture_mappable = 256 * 1024 * 1024;
16779a906b70Schristos	if (dev != NULL)
16789a906b70Schristos		kgem->aperture_mappable = agp_aperture_size(dev, gen);
167903b705cfSriastradh	if (kgem->aperture_mappable == 0 ||
168003b705cfSriastradh	    kgem->aperture_mappable > aperture.aper_size)
168103b705cfSriastradh		kgem->aperture_mappable = aperture.aper_size;
168203b705cfSriastradh	DBG(("%s: aperture mappable=%d [%d MiB]\n", __FUNCTION__,
168303b705cfSriastradh	     kgem->aperture_mappable, kgem->aperture_mappable / (1024*1024)));
168403b705cfSriastradh
16859a906b70Schristos	kgem->aperture_fenceable = MIN(256*1024*1024, kgem->aperture_mappable);
16869a906b70Schristos	DBG(("%s: aperture fenceable=%d [%d MiB]\n", __FUNCTION__,
16879a906b70Schristos	     kgem->aperture_fenceable, kgem->aperture_fenceable / (1024*1024)));
16889a906b70Schristos
168903b705cfSriastradh	kgem->buffer_size = 64 * 1024;
169003b705cfSriastradh	while (kgem->buffer_size < kgem->aperture_mappable >> 10)
169103b705cfSriastradh		kgem->buffer_size *= 2;
169203b705cfSriastradh	if (kgem->buffer_size >> 12 > kgem->half_cpu_cache_pages)
169303b705cfSriastradh		kgem->buffer_size = kgem->half_cpu_cache_pages << 12;
169403b705cfSriastradh	kgem->buffer_size = 1 << __fls(kgem->buffer_size);
169503b705cfSriastradh	DBG(("%s: buffer size=%d [%d KiB]\n", __FUNCTION__,
169603b705cfSriastradh	     kgem->buffer_size, kgem->buffer_size / 1024));
169703b705cfSriastradh	assert(kgem->buffer_size);
169803b705cfSriastradh
169903b705cfSriastradh	kgem->max_object_size = 3 * (kgem->aperture_high >> 12) << 10;
170003b705cfSriastradh	kgem->max_gpu_size = kgem->max_object_size;
170103b705cfSriastradh	if (!kgem->has_llc && kgem->max_gpu_size > MAX_CACHE_SIZE)
170203b705cfSriastradh		kgem->max_gpu_size = MAX_CACHE_SIZE;
170303b705cfSriastradh
170403b705cfSriastradh	totalram = total_ram_size();
170503b705cfSriastradh	if (totalram == 0) {
170603b705cfSriastradh		DBG(("%s: total ram size unknown, assuming maximum of total aperture\n",
170703b705cfSriastradh		     __FUNCTION__));
170803b705cfSriastradh		totalram = kgem->aperture_total;
170903b705cfSriastradh	}
171003b705cfSriastradh	DBG(("%s: total ram=%ld\n", __FUNCTION__, (long)totalram));
171103b705cfSriastradh	if (kgem->max_object_size > totalram / 2)
171203b705cfSriastradh		kgem->max_object_size = totalram / 2;
171303b705cfSriastradh	if (kgem->max_gpu_size > totalram / 4)
171403b705cfSriastradh		kgem->max_gpu_size = totalram / 4;
171503b705cfSriastradh
17169a906b70Schristos	if (kgem->aperture_high > totalram / 2) {
17179a906b70Schristos		kgem->aperture_high = totalram / 2;
17189a906b70Schristos		kgem->aperture_low = kgem->aperture_high / 4;
17199a906b70Schristos		DBG(("%s: reduced aperture watermaks to fit into ram; low=%d [%d], high=%d [%d]\n", __FUNCTION__,
17209a906b70Schristos		     kgem->aperture_low, kgem->aperture_low / (1024*1024),
17219a906b70Schristos		     kgem->aperture_high, kgem->aperture_high / (1024*1024)));
17229a906b70Schristos	}
17239a906b70Schristos
172403b705cfSriastradh	kgem->max_cpu_size = kgem->max_object_size;
172503b705cfSriastradh
172603b705cfSriastradh	half_gpu_max = kgem->max_gpu_size / 2;
172703b705cfSriastradh	kgem->max_copy_tile_size = (MAX_CACHE_SIZE + 1)/2;
172803b705cfSriastradh	if (kgem->max_copy_tile_size > half_gpu_max)
172903b705cfSriastradh		kgem->max_copy_tile_size = half_gpu_max;
173003b705cfSriastradh
173103b705cfSriastradh	if (kgem->has_llc)
173203b705cfSriastradh		kgem->max_upload_tile_size = kgem->max_copy_tile_size;
173303b705cfSriastradh	else
17349a906b70Schristos		kgem->max_upload_tile_size = kgem->aperture_fenceable / 4;
173503b705cfSriastradh	if (kgem->max_upload_tile_size > half_gpu_max)
173603b705cfSriastradh		kgem->max_upload_tile_size = half_gpu_max;
173703b705cfSriastradh	if (kgem->max_upload_tile_size > kgem->aperture_high/2)
173803b705cfSriastradh		kgem->max_upload_tile_size = kgem->aperture_high/2;
173903b705cfSriastradh	if (kgem->max_upload_tile_size > kgem->aperture_low)
174003b705cfSriastradh		kgem->max_upload_tile_size = kgem->aperture_low;
174103b705cfSriastradh	if (kgem->max_upload_tile_size < 16*PAGE_SIZE)
174203b705cfSriastradh		kgem->max_upload_tile_size = 16*PAGE_SIZE;
174303b705cfSriastradh
174403b705cfSriastradh	kgem->large_object_size = MAX_CACHE_SIZE;
174503b705cfSriastradh	if (kgem->large_object_size > half_gpu_max)
174603b705cfSriastradh		kgem->large_object_size = half_gpu_max;
174703b705cfSriastradh	if (kgem->max_copy_tile_size > kgem->aperture_high/2)
174803b705cfSriastradh		kgem->max_copy_tile_size = kgem->aperture_high/2;
174903b705cfSriastradh	if (kgem->max_copy_tile_size > kgem->aperture_low)
175003b705cfSriastradh		kgem->max_copy_tile_size = kgem->aperture_low;
175103b705cfSriastradh	if (kgem->max_copy_tile_size < 16*PAGE_SIZE)
175203b705cfSriastradh		kgem->max_copy_tile_size = 16*PAGE_SIZE;
175303b705cfSriastradh
175403b705cfSriastradh	if (kgem->has_llc | kgem->has_caching | kgem->has_userptr) {
175503b705cfSriastradh		if (kgem->large_object_size > kgem->max_cpu_size)
175603b705cfSriastradh			kgem->large_object_size = kgem->max_cpu_size;
175703b705cfSriastradh	} else
175803b705cfSriastradh		kgem->max_cpu_size = 0;
175903b705cfSriastradh	if (DBG_NO_CPU)
176003b705cfSriastradh		kgem->max_cpu_size = 0;
176103b705cfSriastradh
176203b705cfSriastradh	DBG(("%s: maximum object size=%d\n",
176303b705cfSriastradh	     __FUNCTION__, kgem->max_object_size));
176403b705cfSriastradh	DBG(("%s: large object thresold=%d\n",
176503b705cfSriastradh	     __FUNCTION__, kgem->large_object_size));
176603b705cfSriastradh	DBG(("%s: max object sizes (gpu=%d, cpu=%d, tile upload=%d, copy=%d)\n",
176703b705cfSriastradh	     __FUNCTION__,
176803b705cfSriastradh	     kgem->max_gpu_size, kgem->max_cpu_size,
176903b705cfSriastradh	     kgem->max_upload_tile_size, kgem->max_copy_tile_size));
177003b705cfSriastradh
177103b705cfSriastradh	/* Convert the aperture thresholds to pages */
17729a906b70Schristos	kgem->aperture_mappable /= PAGE_SIZE;
17739a906b70Schristos	kgem->aperture_fenceable /= PAGE_SIZE;
177403b705cfSriastradh	kgem->aperture_low /= PAGE_SIZE;
177503b705cfSriastradh	kgem->aperture_high /= PAGE_SIZE;
17769a906b70Schristos	kgem->aperture_total /= PAGE_SIZE;
177703b705cfSriastradh
177803b705cfSriastradh	kgem->fence_max = gem_param(kgem, I915_PARAM_NUM_FENCES_AVAIL) - 2;
177903b705cfSriastradh	if ((int)kgem->fence_max < 0)
178003b705cfSriastradh		kgem->fence_max = 5; /* minimum safe value for all hw */
178103b705cfSriastradh	DBG(("%s: max fences=%d\n", __FUNCTION__, kgem->fence_max));
178203b705cfSriastradh
178303b705cfSriastradh	kgem->batch_flags_base = 0;
178403b705cfSriastradh	if (kgem->has_no_reloc)
178503b705cfSriastradh		kgem->batch_flags_base |= LOCAL_I915_EXEC_NO_RELOC;
178603b705cfSriastradh	if (kgem->has_handle_lut)
178703b705cfSriastradh		kgem->batch_flags_base |= LOCAL_I915_EXEC_HANDLE_LUT;
178803b705cfSriastradh	if (kgem->has_pinned_batches)
178903b705cfSriastradh		kgem->batch_flags_base |= LOCAL_I915_EXEC_IS_PINNED;
179003b705cfSriastradh
179103b705cfSriastradh	kgem_init_swizzling(kgem);
179203b705cfSriastradh}
179303b705cfSriastradh
179403b705cfSriastradh/* XXX hopefully a good approximation */
17959a906b70Schristosstatic uint32_t kgem_get_unique_id(struct kgem *kgem)
179603b705cfSriastradh{
179703b705cfSriastradh	uint32_t id;
179803b705cfSriastradh	id = ++kgem->unique_id;
179903b705cfSriastradh	if (id == 0)
180003b705cfSriastradh		id = ++kgem->unique_id;
180103b705cfSriastradh	return id;
180203b705cfSriastradh}
180303b705cfSriastradh
180403b705cfSriastradhinline static uint32_t kgem_pitch_alignment(struct kgem *kgem, unsigned flags)
180503b705cfSriastradh{
180603b705cfSriastradh	if (flags & CREATE_PRIME)
180703b705cfSriastradh		return 256;
180803b705cfSriastradh	if (flags & CREATE_SCANOUT)
180903b705cfSriastradh		return 64;
1810813957e3Ssnj	if (kgem->gen >= 0100)
1811813957e3Ssnj		return 32;
18129a906b70Schristos	return 8;
181303b705cfSriastradh}
181403b705cfSriastradh
18159a906b70Schristosvoid kgem_get_tile_size(struct kgem *kgem, int tiling, int pitch,
181603b705cfSriastradh			int *tile_width, int *tile_height, int *tile_size)
181703b705cfSriastradh{
181803b705cfSriastradh	if (kgem->gen <= 030) {
181903b705cfSriastradh		if (tiling) {
182003b705cfSriastradh			if (kgem->gen < 030) {
182103b705cfSriastradh				*tile_width = 128;
182203b705cfSriastradh				*tile_height = 16;
182303b705cfSriastradh				*tile_size = 2048;
182403b705cfSriastradh			} else {
182503b705cfSriastradh				*tile_width = 512;
182603b705cfSriastradh				*tile_height = 8;
182703b705cfSriastradh				*tile_size = 4096;
182803b705cfSriastradh			}
182903b705cfSriastradh		} else {
183003b705cfSriastradh			*tile_width = 1;
183103b705cfSriastradh			*tile_height = 1;
183203b705cfSriastradh			*tile_size = 1;
183303b705cfSriastradh		}
183403b705cfSriastradh	} else switch (tiling) {
183503b705cfSriastradh	default:
183603b705cfSriastradh	case I915_TILING_NONE:
183703b705cfSriastradh		*tile_width = 1;
183803b705cfSriastradh		*tile_height = 1;
183903b705cfSriastradh		*tile_size = 1;
184003b705cfSriastradh		break;
184103b705cfSriastradh	case I915_TILING_X:
184203b705cfSriastradh		*tile_width = 512;
184303b705cfSriastradh		*tile_height = 8;
184403b705cfSriastradh		*tile_size = 4096;
184503b705cfSriastradh		break;
184603b705cfSriastradh	case I915_TILING_Y:
184703b705cfSriastradh		*tile_width = 128;
184803b705cfSriastradh		*tile_height = 32;
184903b705cfSriastradh		*tile_size = 4096;
185003b705cfSriastradh		break;
185103b705cfSriastradh	}
18529a906b70Schristos
18539a906b70Schristos	/* Force offset alignment to tile-row */
18549a906b70Schristos	if (tiling && kgem->gen < 033)
18559a906b70Schristos		*tile_width = pitch;
185603b705cfSriastradh}
185703b705cfSriastradh
185803b705cfSriastradhstatic uint32_t kgem_surface_size(struct kgem *kgem,
185903b705cfSriastradh				  bool relaxed_fencing,
186003b705cfSriastradh				  unsigned flags,
186103b705cfSriastradh				  uint32_t width,
186203b705cfSriastradh				  uint32_t height,
186303b705cfSriastradh				  uint32_t bpp,
186403b705cfSriastradh				  uint32_t tiling,
186503b705cfSriastradh				  uint32_t *pitch)
186603b705cfSriastradh{
186703b705cfSriastradh	uint32_t tile_width, tile_height;
186803b705cfSriastradh	uint32_t size;
186903b705cfSriastradh
187003b705cfSriastradh	assert(width <= MAXSHORT);
187103b705cfSriastradh	assert(height <= MAXSHORT);
187203b705cfSriastradh	assert(bpp >= 8);
187303b705cfSriastradh
187403b705cfSriastradh	if (kgem->gen <= 030) {
187503b705cfSriastradh		if (tiling) {
187603b705cfSriastradh			if (kgem->gen < 030) {
187703b705cfSriastradh				tile_width = 128;
18789a906b70Schristos				tile_height = 16;
187903b705cfSriastradh			} else {
188003b705cfSriastradh				tile_width = 512;
18819a906b70Schristos				tile_height = 8;
188203b705cfSriastradh			}
188303b705cfSriastradh		} else {
188403b705cfSriastradh			tile_width = 2 * bpp >> 3;
188503b705cfSriastradh			tile_width = ALIGN(tile_width,
188603b705cfSriastradh					   kgem_pitch_alignment(kgem, flags));
18879a906b70Schristos			tile_height = 1;
188803b705cfSriastradh		}
188903b705cfSriastradh	} else switch (tiling) {
189003b705cfSriastradh	default:
189103b705cfSriastradh	case I915_TILING_NONE:
189203b705cfSriastradh		tile_width = 2 * bpp >> 3;
189303b705cfSriastradh		tile_width = ALIGN(tile_width,
189403b705cfSriastradh				   kgem_pitch_alignment(kgem, flags));
18959a906b70Schristos		tile_height = 1;
189603b705cfSriastradh		break;
189703b705cfSriastradh
189803b705cfSriastradh	case I915_TILING_X:
189903b705cfSriastradh		tile_width = 512;
19009a906b70Schristos		tile_height = 8;
190103b705cfSriastradh		break;
190203b705cfSriastradh	case I915_TILING_Y:
190303b705cfSriastradh		tile_width = 128;
19049a906b70Schristos		tile_height = 32;
190503b705cfSriastradh		break;
190603b705cfSriastradh	}
19079a906b70Schristos	/* XXX align to an even tile row */
19089a906b70Schristos	if (!kgem->has_relaxed_fencing)
19099a906b70Schristos		tile_height *= 2;
191003b705cfSriastradh
191103b705cfSriastradh	*pitch = ALIGN(width * bpp / 8, tile_width);
191203b705cfSriastradh	height = ALIGN(height, tile_height);
19139a906b70Schristos	DBG(("%s: tile_width=%d, tile_height=%d => aligned pitch=%d, height=%d\n",
19149a906b70Schristos	     __FUNCTION__, tile_width, tile_height, *pitch, height));
19159a906b70Schristos
191603b705cfSriastradh	if (kgem->gen >= 040)
191703b705cfSriastradh		return PAGE_ALIGN(*pitch * height);
191803b705cfSriastradh
191903b705cfSriastradh	/* If it is too wide for the blitter, don't even bother.  */
192003b705cfSriastradh	if (tiling != I915_TILING_NONE) {
19219a906b70Schristos		if (*pitch > 8192) {
19229a906b70Schristos			DBG(("%s: too wide for tiled surface (pitch=%d, limit=%d)\n",
19239a906b70Schristos			     __FUNCTION__, *pitch, 8192));
192403b705cfSriastradh			return 0;
19259a906b70Schristos		}
192603b705cfSriastradh
192703b705cfSriastradh		for (size = tile_width; size < *pitch; size <<= 1)
192803b705cfSriastradh			;
192903b705cfSriastradh		*pitch = size;
193003b705cfSriastradh	} else {
19319a906b70Schristos		if (*pitch >= 32768) {
19329a906b70Schristos			DBG(("%s: too wide for linear surface (pitch=%d, limit=%d)\n",
19339a906b70Schristos			     __FUNCTION__, *pitch, 32767));
193403b705cfSriastradh			return 0;
19359a906b70Schristos		}
193603b705cfSriastradh	}
193703b705cfSriastradh
193803b705cfSriastradh	size = *pitch * height;
193903b705cfSriastradh	if (relaxed_fencing || tiling == I915_TILING_NONE)
194003b705cfSriastradh		return PAGE_ALIGN(size);
194103b705cfSriastradh
19429a906b70Schristos	/* We need to allocate a pot fence region for a tiled buffer. */
194303b705cfSriastradh	if (kgem->gen < 030)
194403b705cfSriastradh		tile_width = 512 * 1024;
194503b705cfSriastradh	else
194603b705cfSriastradh		tile_width = 1024 * 1024;
194703b705cfSriastradh	while (tile_width < size)
194803b705cfSriastradh		tile_width *= 2;
194903b705cfSriastradh	return tile_width;
195003b705cfSriastradh}
195103b705cfSriastradh
19529a906b70Schristosbool kgem_check_surface_size(struct kgem *kgem,
19539a906b70Schristos			     uint32_t width,
19549a906b70Schristos			     uint32_t height,
19559a906b70Schristos			     uint32_t bpp,
19569a906b70Schristos			     uint32_t tiling,
19579a906b70Schristos			     uint32_t pitch,
19589a906b70Schristos			     uint32_t size)
19599a906b70Schristos{
19609a906b70Schristos	uint32_t min_size, min_pitch;
19619a906b70Schristos	int tile_width, tile_height, tile_size;
19629a906b70Schristos
19639a906b70Schristos	DBG(("%s(width=%d, height=%d, bpp=%d, tiling=%d, pitch=%d, size=%d)\n",
19649a906b70Schristos	     __FUNCTION__, width, height, bpp, tiling, pitch, size));
19659a906b70Schristos
19669a906b70Schristos	if (pitch & 3)
19679a906b70Schristos		return false;
19689a906b70Schristos
19699a906b70Schristos	min_size = kgem_surface_size(kgem, kgem->has_relaxed_fencing, 0,
19709a906b70Schristos				     width, height, bpp, tiling,
19719a906b70Schristos				     &min_pitch);
19729a906b70Schristos
19739a906b70Schristos	DBG(("%s: min_pitch=%d, min_size=%d\n", __FUNCTION__, min_pitch, min_size));
19749a906b70Schristos
19759a906b70Schristos	if (size < min_size)
19769a906b70Schristos		return false;
19779a906b70Schristos
19789a906b70Schristos	if (pitch < min_pitch)
19799a906b70Schristos		return false;
19809a906b70Schristos
19819a906b70Schristos	kgem_get_tile_size(kgem, tiling, min_pitch,
19829a906b70Schristos			   &tile_width, &tile_height, &tile_size);
19839a906b70Schristos
19849a906b70Schristos	DBG(("%s: tile_width=%d, tile_size=%d\n", __FUNCTION__, tile_width, tile_size));
19859a906b70Schristos	if (pitch & (tile_width - 1))
19869a906b70Schristos		return false;
19879a906b70Schristos	if (size & (tile_size - 1))
19889a906b70Schristos		return false;
19899a906b70Schristos
19909a906b70Schristos	return true;
19919a906b70Schristos}
19929a906b70Schristos
199303b705cfSriastradhstatic uint32_t kgem_aligned_height(struct kgem *kgem,
199403b705cfSriastradh				    uint32_t height, uint32_t tiling)
199503b705cfSriastradh{
199603b705cfSriastradh	uint32_t tile_height;
199703b705cfSriastradh
199803b705cfSriastradh	if (kgem->gen <= 030) {
19999a906b70Schristos		tile_height = tiling ? kgem->gen < 030 ? 16 : 8 : 1;
200003b705cfSriastradh	} else switch (tiling) {
200103b705cfSriastradh		/* XXX align to an even tile row */
200203b705cfSriastradh	default:
200303b705cfSriastradh	case I915_TILING_NONE:
200403b705cfSriastradh		tile_height = 1;
200503b705cfSriastradh		break;
200603b705cfSriastradh	case I915_TILING_X:
20079a906b70Schristos		tile_height = 8;
200803b705cfSriastradh		break;
200903b705cfSriastradh	case I915_TILING_Y:
20109a906b70Schristos		tile_height = 32;
201103b705cfSriastradh		break;
201203b705cfSriastradh	}
201303b705cfSriastradh
20149a906b70Schristos	/* XXX align to an even tile row */
20159a906b70Schristos	if (!kgem->has_relaxed_fencing)
20169a906b70Schristos		tile_height *= 2;
20179a906b70Schristos
201803b705cfSriastradh	return ALIGN(height, tile_height);
201903b705cfSriastradh}
202003b705cfSriastradh
202103b705cfSriastradhstatic struct drm_i915_gem_exec_object2 *
202203b705cfSriastradhkgem_add_handle(struct kgem *kgem, struct kgem_bo *bo)
202303b705cfSriastradh{
202403b705cfSriastradh	struct drm_i915_gem_exec_object2 *exec;
202503b705cfSriastradh
202603b705cfSriastradh	DBG(("%s: handle=%d, index=%d\n",
202703b705cfSriastradh	     __FUNCTION__, bo->handle, kgem->nexec));
202803b705cfSriastradh
202903b705cfSriastradh	assert(kgem->nexec < ARRAY_SIZE(kgem->exec));
203003b705cfSriastradh	bo->target_handle = kgem->has_handle_lut ? kgem->nexec : bo->handle;
203103b705cfSriastradh	exec = memset(&kgem->exec[kgem->nexec++], 0, sizeof(*exec));
203203b705cfSriastradh	exec->handle = bo->handle;
203303b705cfSriastradh	exec->offset = bo->presumed_offset;
203403b705cfSriastradh
203503b705cfSriastradh	kgem->aperture += num_pages(bo);
203603b705cfSriastradh
203703b705cfSriastradh	return exec;
203803b705cfSriastradh}
203903b705cfSriastradh
204003b705cfSriastradhstatic void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
204103b705cfSriastradh{
20429a906b70Schristos	assert(bo->refcnt);
20439a906b70Schristos	assert(bo->proxy == NULL);
20449a906b70Schristos
204503b705cfSriastradh	bo->exec = kgem_add_handle(kgem, bo);
204603b705cfSriastradh	bo->rq = MAKE_REQUEST(kgem->next_request, kgem->ring);
204703b705cfSriastradh
204803b705cfSriastradh	list_move_tail(&bo->request, &kgem->next_request->buffers);
20499a906b70Schristos	if (bo->io && !list_is_empty(&bo->list))
20509a906b70Schristos		list_move(&bo->list, &kgem->batch_buffers);
205103b705cfSriastradh
205203b705cfSriastradh	/* XXX is it worth working around gcc here? */
205303b705cfSriastradh	kgem->flush |= bo->flush;
205403b705cfSriastradh}
205503b705cfSriastradh
205603b705cfSriastradhstatic uint32_t kgem_end_batch(struct kgem *kgem)
205703b705cfSriastradh{
205803b705cfSriastradh	kgem->batch[kgem->nbatch++] = MI_BATCH_BUFFER_END;
205903b705cfSriastradh	if (kgem->nbatch & 1)
206003b705cfSriastradh		kgem->batch[kgem->nbatch++] = MI_NOOP;
206103b705cfSriastradh
206203b705cfSriastradh	return kgem->nbatch;
206303b705cfSriastradh}
206403b705cfSriastradh
206503b705cfSriastradhstatic void kgem_bo_binding_free(struct kgem *kgem, struct kgem_bo *bo)
206603b705cfSriastradh{
206703b705cfSriastradh	struct kgem_bo_binding *b;
206803b705cfSriastradh
206903b705cfSriastradh	b = bo->binding.next;
207003b705cfSriastradh	while (b) {
207103b705cfSriastradh		struct kgem_bo_binding *next = b->next;
20729a906b70Schristos		free(b);
207303b705cfSriastradh		b = next;
207403b705cfSriastradh	}
207503b705cfSriastradh}
207603b705cfSriastradh
20779a906b70Schristosstatic void kgem_bo_rmfb(struct kgem *kgem, struct kgem_bo *bo)
207803b705cfSriastradh{
20799a906b70Schristos	if (bo->scanout && bo->delta) {
20809a906b70Schristos		DBG(("%s: releasing fb=%d for handle=%d\n",
20819a906b70Schristos		     __FUNCTION__, bo->delta, bo->handle));
20829a906b70Schristos		/* XXX will leak if we are not DRM_MASTER. *shrug* */
20839a906b70Schristos		do_ioctl(kgem->fd, DRM_IOCTL_MODE_RMFB, &bo->delta);
20849a906b70Schristos		bo->delta = 0;
208503b705cfSriastradh	}
208603b705cfSriastradh}
208703b705cfSriastradh
208803b705cfSriastradhstatic void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
208903b705cfSriastradh{
20909a906b70Schristos	DBG(("%s: handle=%d, size=%d\n", __FUNCTION__, bo->handle, bytes(bo)));
209103b705cfSriastradh	assert(bo->refcnt == 0);
209203b705cfSriastradh	assert(bo->proxy == NULL);
209303b705cfSriastradh	assert(bo->exec == NULL);
209403b705cfSriastradh	assert(!bo->snoop || bo->rq == NULL);
209503b705cfSriastradh
209603b705cfSriastradh#ifdef DEBUG_MEMORY
209703b705cfSriastradh	kgem->debug_memory.bo_allocs--;
209803b705cfSriastradh	kgem->debug_memory.bo_bytes -= bytes(bo);
209903b705cfSriastradh#endif
210003b705cfSriastradh
210103b705cfSriastradh	kgem_bo_binding_free(kgem, bo);
21029a906b70Schristos	kgem_bo_rmfb(kgem, bo);
210303b705cfSriastradh
21049a906b70Schristos	if (IS_USER_MAP(bo->map__cpu)) {
210503b705cfSriastradh		assert(bo->rq == NULL);
210603b705cfSriastradh		assert(!__kgem_busy(kgem, bo->handle));
21079a906b70Schristos		assert(MAP(bo->map__cpu) != bo || bo->io || bo->flush);
210803b705cfSriastradh		if (!(bo->io || bo->flush)) {
210903b705cfSriastradh			DBG(("%s: freeing snooped base\n", __FUNCTION__));
21109a906b70Schristos			assert(bo != MAP(bo->map__cpu));
21119a906b70Schristos			free(MAP(bo->map__cpu));
211203b705cfSriastradh		}
21139a906b70Schristos		bo->map__cpu = NULL;
21149a906b70Schristos	}
21159a906b70Schristos
21169a906b70Schristos	DBG(("%s: releasing %p:%p vma for handle=%d, count=%d\n",
21179a906b70Schristos	     __FUNCTION__, bo->map__gtt, bo->map__cpu,
2118813957e3Ssnj	     bo->handle, list_is_empty(&bo->vma) ? 0 : kgem->vma[bo->map__gtt == NULL && bo->map__wc == NULL].count));
21199a906b70Schristos
21209a906b70Schristos	if (!list_is_empty(&bo->vma)) {
21219a906b70Schristos		_list_del(&bo->vma);
2122813957e3Ssnj		kgem->vma[bo->map__gtt == NULL && bo->map__wc == NULL].count--;
212303b705cfSriastradh	}
21249a906b70Schristos
21259a906b70Schristos	if (bo->map__gtt)
2126813957e3Ssnj		munmap(bo->map__gtt, bytes(bo));
2127813957e3Ssnj	if (bo->map__wc) {
2128813957e3Ssnj		VG(VALGRIND_MAKE_MEM_NOACCESS(bo->map__wc, bytes(bo)));
2129813957e3Ssnj		munmap(bo->map__wc, bytes(bo));
2130813957e3Ssnj	}
2131813957e3Ssnj	if (bo->map__cpu) {
2132813957e3Ssnj		VG(VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map__cpu), bytes(bo)));
21339a906b70Schristos		munmap(MAP(bo->map__cpu), bytes(bo));
2134813957e3Ssnj	}
213503b705cfSriastradh
213603b705cfSriastradh	_list_del(&bo->list);
213703b705cfSriastradh	_list_del(&bo->request);
213803b705cfSriastradh	gem_close(kgem->fd, bo->handle);
213903b705cfSriastradh
21409a906b70Schristos	if (!bo->io && !DBG_NO_MALLOC_CACHE) {
214103b705cfSriastradh		*(struct kgem_bo **)bo = __kgem_freed_bo;
214203b705cfSriastradh		__kgem_freed_bo = bo;
214303b705cfSriastradh	} else
214403b705cfSriastradh		free(bo);
214503b705cfSriastradh}
214603b705cfSriastradh
214703b705cfSriastradhinline static void kgem_bo_move_to_inactive(struct kgem *kgem,
214803b705cfSriastradh					    struct kgem_bo *bo)
214903b705cfSriastradh{
215003b705cfSriastradh	DBG(("%s: moving handle=%d to inactive\n", __FUNCTION__, bo->handle));
215103b705cfSriastradh
215203b705cfSriastradh	assert(bo->refcnt == 0);
215303b705cfSriastradh	assert(bo->reusable);
215403b705cfSriastradh	assert(bo->rq == NULL);
215503b705cfSriastradh	assert(bo->exec == NULL);
215603b705cfSriastradh	assert(bo->domain != DOMAIN_GPU);
215703b705cfSriastradh	assert(!bo->proxy);
215803b705cfSriastradh	assert(!bo->io);
215903b705cfSriastradh	assert(!bo->scanout);
216003b705cfSriastradh	assert(!bo->snoop);
216103b705cfSriastradh	assert(!bo->flush);
216203b705cfSriastradh	assert(!bo->needs_flush);
216303b705cfSriastradh	assert(list_is_empty(&bo->vma));
216403b705cfSriastradh	assert_tiling(kgem, bo);
21659a906b70Schristos	assert_cacheing(kgem, bo);
216603b705cfSriastradh	ASSERT_IDLE(kgem, bo->handle);
216703b705cfSriastradh
216803b705cfSriastradh	if (bucket(bo) >= NUM_CACHE_BUCKETS) {
21699a906b70Schristos		if (bo->map__gtt) {
2170813957e3Ssnj			munmap(bo->map__gtt, bytes(bo));
21719a906b70Schristos			bo->map__gtt = NULL;
21729a906b70Schristos		}
217303b705cfSriastradh
21749a906b70Schristos		list_move(&bo->list, &kgem->large_inactive);
21759a906b70Schristos	} else {
21769a906b70Schristos		assert(bo->flush == false);
2177813957e3Ssnj		assert(list_is_empty(&bo->vma));
21789a906b70Schristos		list_move(&bo->list, &kgem->inactive[bucket(bo)]);
2179813957e3Ssnj		if (bo->map__gtt && !kgem_bo_can_map(kgem, bo)) {
2180813957e3Ssnj			munmap(bo->map__gtt, bytes(bo));
2181813957e3Ssnj			bo->map__gtt = NULL;
218203b705cfSriastradh		}
2183813957e3Ssnj		if (bo->map__gtt || (bo->map__wc && !bo->tiling)) {
2184813957e3Ssnj			list_add(&bo->vma, &kgem->vma[0].inactive[bucket(bo)]);
2185813957e3Ssnj			kgem->vma[0].count++;
2186813957e3Ssnj		}
2187813957e3Ssnj		if (bo->map__cpu && list_is_empty(&bo->vma)) {
21889a906b70Schristos			list_add(&bo->vma, &kgem->vma[1].inactive[bucket(bo)]);
21899a906b70Schristos			kgem->vma[1].count++;
219003b705cfSriastradh		}
219103b705cfSriastradh	}
21929a906b70Schristos
21939a906b70Schristos	kgem->need_expire = true;
219403b705cfSriastradh}
219503b705cfSriastradh
219603b705cfSriastradhstatic struct kgem_bo *kgem_bo_replace_io(struct kgem_bo *bo)
219703b705cfSriastradh{
219803b705cfSriastradh	struct kgem_bo *base;
219903b705cfSriastradh
220003b705cfSriastradh	if (!bo->io)
220103b705cfSriastradh		return bo;
220203b705cfSriastradh
220303b705cfSriastradh	assert(!bo->snoop);
22049a906b70Schristos	if (__kgem_freed_bo) {
22059a906b70Schristos		base = __kgem_freed_bo;
22069a906b70Schristos		__kgem_freed_bo = *(struct kgem_bo **)base;
22079a906b70Schristos	} else
22089a906b70Schristos		base = malloc(sizeof(*base));
220903b705cfSriastradh	if (base) {
221003b705cfSriastradh		DBG(("%s: transferring io handle=%d to bo\n",
221103b705cfSriastradh		     __FUNCTION__, bo->handle));
221203b705cfSriastradh		/* transfer the handle to a minimum bo */
221303b705cfSriastradh		memcpy(base, bo, sizeof(*base));
221403b705cfSriastradh		base->io = false;
221503b705cfSriastradh		list_init(&base->list);
221603b705cfSriastradh		list_replace(&bo->request, &base->request);
221703b705cfSriastradh		list_replace(&bo->vma, &base->vma);
221803b705cfSriastradh		free(bo);
221903b705cfSriastradh		bo = base;
222003b705cfSriastradh	} else
222103b705cfSriastradh		bo->reusable = false;
222203b705cfSriastradh
222303b705cfSriastradh	return bo;
222403b705cfSriastradh}
222503b705cfSriastradh
222603b705cfSriastradhinline static void kgem_bo_remove_from_inactive(struct kgem *kgem,
222703b705cfSriastradh						struct kgem_bo *bo)
222803b705cfSriastradh{
222903b705cfSriastradh	DBG(("%s: removing handle=%d from inactive\n", __FUNCTION__, bo->handle));
223003b705cfSriastradh
223103b705cfSriastradh	list_del(&bo->list);
223203b705cfSriastradh	assert(bo->rq == NULL);
223303b705cfSriastradh	assert(bo->exec == NULL);
22349a906b70Schristos	if (!list_is_empty(&bo->vma)) {
2235813957e3Ssnj		assert(bo->map__gtt || bo->map__wc || bo->map__cpu);
223603b705cfSriastradh		list_del(&bo->vma);
2237813957e3Ssnj		kgem->vma[bo->map__gtt == NULL && bo->map__wc == NULL].count--;
223803b705cfSriastradh	}
223903b705cfSriastradh}
224003b705cfSriastradh
224103b705cfSriastradhinline static void kgem_bo_remove_from_active(struct kgem *kgem,
224203b705cfSriastradh					      struct kgem_bo *bo)
224303b705cfSriastradh{
224403b705cfSriastradh	DBG(("%s: removing handle=%d from active\n", __FUNCTION__, bo->handle));
224503b705cfSriastradh
224603b705cfSriastradh	list_del(&bo->list);
224703b705cfSriastradh	assert(bo->rq != NULL);
22489a906b70Schristos	if (RQ(bo->rq) == (void *)kgem) {
22499a906b70Schristos		assert(bo->exec == NULL);
225003b705cfSriastradh		list_del(&bo->request);
22519a906b70Schristos	}
225203b705cfSriastradh	assert(list_is_empty(&bo->vma));
225303b705cfSriastradh}
225403b705cfSriastradh
225503b705cfSriastradhstatic void _kgem_bo_delete_buffer(struct kgem *kgem, struct kgem_bo *bo)
225603b705cfSriastradh{
225703b705cfSriastradh	struct kgem_buffer *io = (struct kgem_buffer *)bo->proxy;
225803b705cfSriastradh
225903b705cfSriastradh	DBG(("%s: size=%d, offset=%d, parent used=%d\n",
226003b705cfSriastradh	     __FUNCTION__, bo->size.bytes, bo->delta, io->used));
226103b705cfSriastradh
226203b705cfSriastradh	if (ALIGN(bo->delta + bo->size.bytes, UPLOAD_ALIGNMENT) == io->used)
226303b705cfSriastradh		io->used = bo->delta;
226403b705cfSriastradh}
226503b705cfSriastradh
226603b705cfSriastradhstatic bool check_scanout_size(struct kgem *kgem,
226703b705cfSriastradh			       struct kgem_bo *bo,
226803b705cfSriastradh			       int width, int height)
226903b705cfSriastradh{
227003b705cfSriastradh	struct drm_mode_fb_cmd info;
227103b705cfSriastradh
227203b705cfSriastradh	assert(bo->scanout);
227303b705cfSriastradh
227403b705cfSriastradh	VG_CLEAR(info);
227503b705cfSriastradh	info.fb_id = bo->delta;
227603b705cfSriastradh
22779a906b70Schristos	if (do_ioctl(kgem->fd, DRM_IOCTL_MODE_GETFB, &info))
227803b705cfSriastradh		return false;
227903b705cfSriastradh
228003b705cfSriastradh	gem_close(kgem->fd, info.handle);
228103b705cfSriastradh
228203b705cfSriastradh	if (width != info.width || height != info.height) {
228303b705cfSriastradh		DBG(("%s: not using scanout %d (%dx%d), want (%dx%d)\n",
228403b705cfSriastradh		     __FUNCTION__,
228503b705cfSriastradh		     info.fb_id, info.width, info.height,
228603b705cfSriastradh		     width, height));
228703b705cfSriastradh		return false;
228803b705cfSriastradh	}
228903b705cfSriastradh
229003b705cfSriastradh	return true;
229103b705cfSriastradh}
229203b705cfSriastradh
229303b705cfSriastradhstatic void kgem_bo_move_to_scanout(struct kgem *kgem, struct kgem_bo *bo)
229403b705cfSriastradh{
229503b705cfSriastradh	assert(bo->refcnt == 0);
229603b705cfSriastradh	assert(bo->scanout);
229703b705cfSriastradh	assert(!bo->flush);
229803b705cfSriastradh	assert(!bo->snoop);
229903b705cfSriastradh	assert(!bo->io);
230003b705cfSriastradh
23019a906b70Schristos	if (bo->purged) { /* for stolen fb */
23029a906b70Schristos		if (!bo->exec) {
23039a906b70Schristos			DBG(("%s: discarding purged scanout - stolen?\n",
23049a906b70Schristos			     __FUNCTION__));
23059a906b70Schristos			kgem_bo_free(kgem, bo);
23069a906b70Schristos		}
230703b705cfSriastradh		return;
230803b705cfSriastradh	}
230903b705cfSriastradh
231003b705cfSriastradh	DBG(("%s: moving %d [fb %d] to scanout cache, active? %d\n",
231103b705cfSriastradh	     __FUNCTION__, bo->handle, bo->delta, bo->rq != NULL));
231203b705cfSriastradh	if (bo->rq)
231303b705cfSriastradh		list_move_tail(&bo->list, &kgem->scanout);
231403b705cfSriastradh	else
231503b705cfSriastradh		list_move(&bo->list, &kgem->scanout);
23169a906b70Schristos
23179a906b70Schristos	kgem->need_expire = true;
23189a906b70Schristos
231903b705cfSriastradh}
232003b705cfSriastradh
232103b705cfSriastradhstatic void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo)
232203b705cfSriastradh{
232303b705cfSriastradh	assert(bo->reusable);
23249a906b70Schristos	assert(!bo->scanout);
232503b705cfSriastradh	assert(!bo->flush);
232603b705cfSriastradh	assert(!bo->needs_flush);
232703b705cfSriastradh	assert(bo->refcnt == 0);
232803b705cfSriastradh	assert(bo->exec == NULL);
232903b705cfSriastradh
23309a906b70Schristos	if (DBG_NO_SNOOP_CACHE) {
23319a906b70Schristos		kgem_bo_free(kgem, bo);
23329a906b70Schristos		return;
23339a906b70Schristos	}
23349a906b70Schristos
233503b705cfSriastradh	if (num_pages(bo) > kgem->max_cpu_size >> 13) {
233603b705cfSriastradh		DBG(("%s handle=%d discarding large CPU buffer (%d >%d pages)\n",
233703b705cfSriastradh		     __FUNCTION__, bo->handle, num_pages(bo), kgem->max_cpu_size >> 13));
233803b705cfSriastradh		kgem_bo_free(kgem, bo);
233903b705cfSriastradh		return;
234003b705cfSriastradh	}
234103b705cfSriastradh
234203b705cfSriastradh	assert(bo->tiling == I915_TILING_NONE);
234303b705cfSriastradh	assert(bo->rq == NULL);
234403b705cfSriastradh
234503b705cfSriastradh	DBG(("%s: moving %d to snoop cachee\n", __FUNCTION__, bo->handle));
234603b705cfSriastradh	list_add(&bo->list, &kgem->snoop);
23479a906b70Schristos	kgem->need_expire = true;
23489a906b70Schristos}
23499a906b70Schristos
23509a906b70Schristosstatic bool kgem_bo_move_to_cache(struct kgem *kgem, struct kgem_bo *bo)
23519a906b70Schristos{
23529a906b70Schristos	bool retired = false;
23539a906b70Schristos
23549a906b70Schristos	DBG(("%s: release handle=%d\n", __FUNCTION__, bo->handle));
23559a906b70Schristos
23569a906b70Schristos	if (bo->prime) {
23579a906b70Schristos		DBG(("%s: discarding imported prime handle=%d\n",
23589a906b70Schristos		     __FUNCTION__, bo->handle));
23599a906b70Schristos		kgem_bo_free(kgem, bo);
23609a906b70Schristos	} else if (bo->snoop) {
23619a906b70Schristos		kgem_bo_move_to_snoop(kgem, bo);
23629a906b70Schristos	} else if (bo->scanout) {
23639a906b70Schristos		kgem_bo_move_to_scanout(kgem, bo);
23649a906b70Schristos	} else if ((bo = kgem_bo_replace_io(bo))->reusable &&
23659a906b70Schristos		   kgem_bo_set_purgeable(kgem, bo)) {
23669a906b70Schristos		kgem_bo_move_to_inactive(kgem, bo);
23679a906b70Schristos		retired = true;
23689a906b70Schristos	} else
23699a906b70Schristos		kgem_bo_free(kgem, bo);
23709a906b70Schristos
23719a906b70Schristos	return retired;
237203b705cfSriastradh}
237303b705cfSriastradh
237403b705cfSriastradhstatic struct kgem_bo *
237503b705cfSriastradhsearch_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
237603b705cfSriastradh{
237703b705cfSriastradh	struct kgem_bo *bo, *first = NULL;
237803b705cfSriastradh
237903b705cfSriastradh	DBG(("%s: num_pages=%d, flags=%x\n", __FUNCTION__, num_pages, flags));
238003b705cfSriastradh
238103b705cfSriastradh	if ((kgem->has_caching | kgem->has_userptr) == 0)
238203b705cfSriastradh		return NULL;
238303b705cfSriastradh
238403b705cfSriastradh	if (list_is_empty(&kgem->snoop)) {
238503b705cfSriastradh		DBG(("%s: inactive and cache empty\n", __FUNCTION__));
238603b705cfSriastradh		if (!__kgem_throttle_retire(kgem, flags)) {
238703b705cfSriastradh			DBG(("%s: nothing retired\n", __FUNCTION__));
238803b705cfSriastradh			return NULL;
238903b705cfSriastradh		}
239003b705cfSriastradh	}
239103b705cfSriastradh
239203b705cfSriastradh	list_for_each_entry(bo, &kgem->snoop, list) {
239303b705cfSriastradh		assert(bo->refcnt == 0);
239403b705cfSriastradh		assert(bo->snoop);
239503b705cfSriastradh		assert(!bo->scanout);
239603b705cfSriastradh		assert(!bo->purged);
239703b705cfSriastradh		assert(bo->proxy == NULL);
239803b705cfSriastradh		assert(bo->tiling == I915_TILING_NONE);
239903b705cfSriastradh		assert(bo->rq == NULL);
240003b705cfSriastradh		assert(bo->exec == NULL);
240103b705cfSriastradh
240203b705cfSriastradh		if (num_pages > num_pages(bo))
240303b705cfSriastradh			continue;
240403b705cfSriastradh
240503b705cfSriastradh		if (num_pages(bo) > 2*num_pages) {
240603b705cfSriastradh			if (first == NULL)
240703b705cfSriastradh				first = bo;
240803b705cfSriastradh			continue;
240903b705cfSriastradh		}
241003b705cfSriastradh
241103b705cfSriastradh		list_del(&bo->list);
241203b705cfSriastradh		bo->pitch = 0;
241303b705cfSriastradh		bo->delta = 0;
241403b705cfSriastradh
241503b705cfSriastradh		DBG(("  %s: found handle=%d (num_pages=%d) in snoop cache\n",
241603b705cfSriastradh		     __FUNCTION__, bo->handle, num_pages(bo)));
241703b705cfSriastradh		return bo;
241803b705cfSriastradh	}
241903b705cfSriastradh
242003b705cfSriastradh	if (first) {
242103b705cfSriastradh		list_del(&first->list);
242203b705cfSriastradh		first->pitch = 0;
242303b705cfSriastradh		first->delta = 0;
242403b705cfSriastradh
242503b705cfSriastradh		DBG(("  %s: found handle=%d (num_pages=%d) in snoop cache\n",
242603b705cfSriastradh		     __FUNCTION__, first->handle, num_pages(first)));
242703b705cfSriastradh		return first;
242803b705cfSriastradh	}
242903b705cfSriastradh
243003b705cfSriastradh	return NULL;
243103b705cfSriastradh}
243203b705cfSriastradh
243303b705cfSriastradhvoid kgem_bo_undo(struct kgem *kgem, struct kgem_bo *bo)
243403b705cfSriastradh{
243503b705cfSriastradh	if (kgem->nexec != 1 || bo->exec == NULL)
243603b705cfSriastradh		return;
243703b705cfSriastradh
24389a906b70Schristos	assert(bo);
243903b705cfSriastradh	DBG(("%s: only handle in batch, discarding last operations for handle=%d\n",
244003b705cfSriastradh	     __FUNCTION__, bo->handle));
244103b705cfSriastradh
244203b705cfSriastradh	assert(bo->exec == &kgem->exec[0]);
244303b705cfSriastradh	assert(kgem->exec[0].handle == bo->handle);
244403b705cfSriastradh	assert(RQ(bo->rq) == kgem->next_request);
244503b705cfSriastradh
244603b705cfSriastradh	bo->refcnt++;
244703b705cfSriastradh	kgem_reset(kgem);
244803b705cfSriastradh	bo->refcnt--;
24499a906b70Schristos
24509a906b70Schristos	assert(kgem->nreloc == 0);
24519a906b70Schristos	assert(kgem->nexec == 0);
24529a906b70Schristos	assert(bo->exec == NULL);
245303b705cfSriastradh}
245403b705cfSriastradh
24559a906b70Schristosvoid kgem_bo_pair_undo(struct kgem *kgem, struct kgem_bo *a, struct kgem_bo *b)
245603b705cfSriastradh{
24579a906b70Schristos	if (kgem->nexec > 2)
24589a906b70Schristos		return;
245903b705cfSriastradh
24609a906b70Schristos	if (kgem->nexec == 1) {
24619a906b70Schristos		if (a)
24629a906b70Schristos			kgem_bo_undo(kgem, a);
24639a906b70Schristos		if (b)
24649a906b70Schristos			kgem_bo_undo(kgem, b);
24659a906b70Schristos		return;
24669a906b70Schristos	}
24679a906b70Schristos
24689a906b70Schristos	if (a == NULL || b == NULL)
24699a906b70Schristos		return;
24709a906b70Schristos	if (a->exec == NULL || b->exec == NULL)
24719a906b70Schristos		return;
24729a906b70Schristos
24739a906b70Schristos	DBG(("%s: only handles in batch, discarding last operations for handle=%d and handle=%d\n",
24749a906b70Schristos	     __FUNCTION__, a->handle, b->handle));
24759a906b70Schristos
24769a906b70Schristos	assert(a->exec == &kgem->exec[0] || a->exec == &kgem->exec[1]);
24779a906b70Schristos	assert(a->handle == kgem->exec[0].handle || a->handle == kgem->exec[1].handle);
24789a906b70Schristos	assert(RQ(a->rq) == kgem->next_request);
24799a906b70Schristos	assert(b->exec == &kgem->exec[0] || b->exec == &kgem->exec[1]);
24809a906b70Schristos	assert(b->handle == kgem->exec[0].handle || b->handle == kgem->exec[1].handle);
24819a906b70Schristos	assert(RQ(b->rq) == kgem->next_request);
24829a906b70Schristos
24839a906b70Schristos	a->refcnt++;
24849a906b70Schristos	b->refcnt++;
24859a906b70Schristos	kgem_reset(kgem);
24869a906b70Schristos	b->refcnt--;
24879a906b70Schristos	a->refcnt--;
24889a906b70Schristos
24899a906b70Schristos	assert(kgem->nreloc == 0);
24909a906b70Schristos	assert(kgem->nexec == 0);
24919a906b70Schristos	assert(a->exec == NULL);
24929a906b70Schristos	assert(b->exec == NULL);
24939a906b70Schristos}
24949a906b70Schristos
24959a906b70Schristosstatic void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
24969a906b70Schristos{
24979a906b70Schristos	DBG(("%s: handle=%d, size=%d\n", __FUNCTION__, bo->handle, bytes(bo)));
24989a906b70Schristos
24999a906b70Schristos	assert(list_is_empty(&bo->list));
25009a906b70Schristos	assert(bo->refcnt == 0);
25019a906b70Schristos	assert(bo->proxy == NULL);
25029a906b70Schristos	assert(bo->active_scanout == 0);
25039a906b70Schristos	assert_tiling(kgem, bo);
25049a906b70Schristos
25059a906b70Schristos	bo->binding.offset = 0;
250603b705cfSriastradh
250703b705cfSriastradh	if (DBG_NO_CACHE)
250803b705cfSriastradh		goto destroy;
250903b705cfSriastradh
25109a906b70Schristos	if (bo->prime)
25119a906b70Schristos		goto destroy;
25129a906b70Schristos
251303b705cfSriastradh	if (bo->snoop && !bo->flush) {
251403b705cfSriastradh		DBG(("%s: handle=%d is snooped\n", __FUNCTION__, bo->handle));
251503b705cfSriastradh		assert(bo->reusable);
251603b705cfSriastradh		assert(list_is_empty(&bo->list));
251703b705cfSriastradh		if (bo->exec == NULL && bo->rq && !__kgem_busy(kgem, bo->handle))
251803b705cfSriastradh			__kgem_bo_clear_busy(bo);
251903b705cfSriastradh		if (bo->rq == NULL)
252003b705cfSriastradh			kgem_bo_move_to_snoop(kgem, bo);
252103b705cfSriastradh		return;
252203b705cfSriastradh	}
25239a906b70Schristos	if (!IS_USER_MAP(bo->map__cpu))
252403b705cfSriastradh		bo->flush = false;
252503b705cfSriastradh
252603b705cfSriastradh	if (bo->scanout) {
252703b705cfSriastradh		kgem_bo_move_to_scanout(kgem, bo);
252803b705cfSriastradh		return;
252903b705cfSriastradh	}
253003b705cfSriastradh
253103b705cfSriastradh	if (bo->io)
253203b705cfSriastradh		bo = kgem_bo_replace_io(bo);
253303b705cfSriastradh	if (!bo->reusable) {
253403b705cfSriastradh		DBG(("%s: handle=%d, not reusable\n",
253503b705cfSriastradh		     __FUNCTION__, bo->handle));
253603b705cfSriastradh		goto destroy;
253703b705cfSriastradh	}
253803b705cfSriastradh
253903b705cfSriastradh	assert(list_is_empty(&bo->vma));
254003b705cfSriastradh	assert(list_is_empty(&bo->list));
254103b705cfSriastradh	assert(bo->flush == false);
254203b705cfSriastradh	assert(bo->snoop == false);
254303b705cfSriastradh	assert(bo->io == false);
254403b705cfSriastradh	assert(bo->scanout == false);
25459a906b70Schristos	assert_cacheing(kgem, bo);
254603b705cfSriastradh
254703b705cfSriastradh	kgem_bo_undo(kgem, bo);
254803b705cfSriastradh	assert(bo->refcnt == 0);
254903b705cfSriastradh
255003b705cfSriastradh	if (bo->rq && bo->exec == NULL && !__kgem_busy(kgem, bo->handle))
255103b705cfSriastradh		__kgem_bo_clear_busy(bo);
255203b705cfSriastradh
255303b705cfSriastradh	if (bo->rq) {
255403b705cfSriastradh		struct list *cache;
255503b705cfSriastradh
255603b705cfSriastradh		DBG(("%s: handle=%d -> active\n", __FUNCTION__, bo->handle));
255703b705cfSriastradh		if (bucket(bo) < NUM_CACHE_BUCKETS)
255803b705cfSriastradh			cache = &kgem->active[bucket(bo)][bo->tiling];
255903b705cfSriastradh		else
256003b705cfSriastradh			cache = &kgem->large;
256103b705cfSriastradh		list_add(&bo->list, cache);
256203b705cfSriastradh		return;
256303b705cfSriastradh	}
256403b705cfSriastradh
256503b705cfSriastradh	assert(bo->exec == NULL);
256603b705cfSriastradh	assert(list_is_empty(&bo->request));
256703b705cfSriastradh
25689a906b70Schristos	if (bo->map__cpu == NULL || bucket(bo) >= NUM_CACHE_BUCKETS) {
256903b705cfSriastradh		if (!kgem_bo_set_purgeable(kgem, bo))
257003b705cfSriastradh			goto destroy;
257103b705cfSriastradh
257203b705cfSriastradh		if (!kgem->has_llc && bo->domain == DOMAIN_CPU)
257303b705cfSriastradh			goto destroy;
257403b705cfSriastradh
257503b705cfSriastradh		DBG(("%s: handle=%d, purged\n",
257603b705cfSriastradh		     __FUNCTION__, bo->handle));
257703b705cfSriastradh	}
257803b705cfSriastradh
257903b705cfSriastradh	kgem_bo_move_to_inactive(kgem, bo);
258003b705cfSriastradh	return;
258103b705cfSriastradh
258203b705cfSriastradhdestroy:
258303b705cfSriastradh	if (!bo->exec)
258403b705cfSriastradh		kgem_bo_free(kgem, bo);
258503b705cfSriastradh}
258603b705cfSriastradh
258703b705cfSriastradhstatic void kgem_bo_unref(struct kgem *kgem, struct kgem_bo *bo)
258803b705cfSriastradh{
258903b705cfSriastradh	assert(bo->refcnt);
259003b705cfSriastradh	if (--bo->refcnt == 0)
259103b705cfSriastradh		__kgem_bo_destroy(kgem, bo);
259203b705cfSriastradh}
259303b705cfSriastradh
259403b705cfSriastradhstatic void kgem_buffer_release(struct kgem *kgem, struct kgem_buffer *bo)
259503b705cfSriastradh{
25969a906b70Schristos	assert(bo->base.io);
259703b705cfSriastradh	while (!list_is_empty(&bo->base.vma)) {
259803b705cfSriastradh		struct kgem_bo *cached;
259903b705cfSriastradh
260003b705cfSriastradh		cached = list_first_entry(&bo->base.vma, struct kgem_bo, vma);
260103b705cfSriastradh		assert(cached->proxy == &bo->base);
26029a906b70Schristos		assert(cached != &bo->base);
260303b705cfSriastradh		list_del(&cached->vma);
260403b705cfSriastradh
26059a906b70Schristos		assert(*(struct kgem_bo **)cached->map__gtt == cached);
26069a906b70Schristos		*(struct kgem_bo **)cached->map__gtt = NULL;
26079a906b70Schristos		cached->map__gtt = NULL;
260803b705cfSriastradh
260903b705cfSriastradh		kgem_bo_destroy(kgem, cached);
261003b705cfSriastradh	}
261103b705cfSriastradh}
261203b705cfSriastradh
26139a906b70Schristosvoid kgem_retire__buffers(struct kgem *kgem)
261403b705cfSriastradh{
261503b705cfSriastradh	while (!list_is_empty(&kgem->active_buffers)) {
261603b705cfSriastradh		struct kgem_buffer *bo =
261703b705cfSriastradh			list_last_entry(&kgem->active_buffers,
261803b705cfSriastradh					struct kgem_buffer,
261903b705cfSriastradh					base.list);
262003b705cfSriastradh
26219a906b70Schristos		DBG(("%s: handle=%d, busy? %d [%d]\n",
26229a906b70Schristos		     __FUNCTION__, bo->base.handle, bo->base.rq != NULL, bo->base.exec != NULL));
26239a906b70Schristos
26249a906b70Schristos		assert(bo->base.exec == NULL || RQ(bo->base.rq) == kgem->next_request);
262503b705cfSriastradh		if (bo->base.rq)
262603b705cfSriastradh			break;
262703b705cfSriastradh
262803b705cfSriastradh		DBG(("%s: releasing upload cache for handle=%d? %d\n",
262903b705cfSriastradh		     __FUNCTION__, bo->base.handle, !list_is_empty(&bo->base.vma)));
263003b705cfSriastradh		list_del(&bo->base.list);
263103b705cfSriastradh		kgem_buffer_release(kgem, bo);
263203b705cfSriastradh		kgem_bo_unref(kgem, &bo->base);
263303b705cfSriastradh	}
263403b705cfSriastradh}
263503b705cfSriastradh
263603b705cfSriastradhstatic bool kgem_retire__flushing(struct kgem *kgem)
263703b705cfSriastradh{
263803b705cfSriastradh	struct kgem_bo *bo, *next;
263903b705cfSriastradh	bool retired = false;
264003b705cfSriastradh
264103b705cfSriastradh	list_for_each_entry_safe(bo, next, &kgem->flushing, request) {
26429a906b70Schristos		assert(RQ(bo->rq) == (void *)kgem);
264303b705cfSriastradh		assert(bo->exec == NULL);
264403b705cfSriastradh
264503b705cfSriastradh		if (__kgem_busy(kgem, bo->handle))
264603b705cfSriastradh			break;
264703b705cfSriastradh
264803b705cfSriastradh		__kgem_bo_clear_busy(bo);
264903b705cfSriastradh
265003b705cfSriastradh		if (bo->refcnt)
265103b705cfSriastradh			continue;
265203b705cfSriastradh
26539a906b70Schristos		retired |= kgem_bo_move_to_cache(kgem, bo);
265403b705cfSriastradh	}
265503b705cfSriastradh#if HAS_DEBUG_FULL
265603b705cfSriastradh	{
265703b705cfSriastradh		int count = 0;
265803b705cfSriastradh		list_for_each_entry(bo, &kgem->flushing, request)
265903b705cfSriastradh			count++;
26609a906b70Schristos		DBG(("%s: %d bo on flushing list\n", __FUNCTION__, count));
266103b705cfSriastradh	}
266203b705cfSriastradh#endif
266303b705cfSriastradh
266403b705cfSriastradh	kgem->need_retire |= !list_is_empty(&kgem->flushing);
266503b705cfSriastradh
266603b705cfSriastradh	return retired;
266703b705cfSriastradh}
266803b705cfSriastradh
266903b705cfSriastradhstatic bool __kgem_retire_rq(struct kgem *kgem, struct kgem_request *rq)
267003b705cfSriastradh{
267103b705cfSriastradh	bool retired = false;
267203b705cfSriastradh
267303b705cfSriastradh	DBG(("%s: request %d complete\n",
267403b705cfSriastradh	     __FUNCTION__, rq->bo->handle));
26759a906b70Schristos	assert(RQ(rq->bo->rq) == rq);
267603b705cfSriastradh
2677813957e3Ssnj	if (rq == kgem->fence[rq->ring])
2678813957e3Ssnj		kgem->fence[rq->ring] = NULL;
2679813957e3Ssnj
268003b705cfSriastradh	while (!list_is_empty(&rq->buffers)) {
268103b705cfSriastradh		struct kgem_bo *bo;
268203b705cfSriastradh
268303b705cfSriastradh		bo = list_first_entry(&rq->buffers,
268403b705cfSriastradh				      struct kgem_bo,
268503b705cfSriastradh				      request);
268603b705cfSriastradh
268703b705cfSriastradh		assert(RQ(bo->rq) == rq);
268803b705cfSriastradh		assert(bo->exec == NULL);
268903b705cfSriastradh		assert(bo->domain == DOMAIN_GPU || bo->domain == DOMAIN_NONE);
269003b705cfSriastradh
269103b705cfSriastradh		list_del(&bo->request);
269203b705cfSriastradh
269303b705cfSriastradh		if (bo->needs_flush)
269403b705cfSriastradh			bo->needs_flush = __kgem_busy(kgem, bo->handle);
269503b705cfSriastradh		if (bo->needs_flush) {
269603b705cfSriastradh			DBG(("%s: moving %d to flushing\n",
269703b705cfSriastradh			     __FUNCTION__, bo->handle));
269803b705cfSriastradh			list_add(&bo->request, &kgem->flushing);
26999a906b70Schristos			bo->rq = MAKE_REQUEST(kgem, RQ_RING(bo->rq));
27009a906b70Schristos			kgem->need_retire = true;
270103b705cfSriastradh			continue;
270203b705cfSriastradh		}
270303b705cfSriastradh
270403b705cfSriastradh		bo->domain = DOMAIN_NONE;
2705813957e3Ssnj		bo->gtt_dirty = false;
270603b705cfSriastradh		bo->rq = NULL;
270703b705cfSriastradh		if (bo->refcnt)
270803b705cfSriastradh			continue;
270903b705cfSriastradh
27109a906b70Schristos		retired |= kgem_bo_move_to_cache(kgem, bo);
271103b705cfSriastradh	}
271203b705cfSriastradh
271303b705cfSriastradh	assert(rq->bo->rq == NULL);
27149a906b70Schristos	assert(rq->bo->exec == NULL);
271503b705cfSriastradh	assert(list_is_empty(&rq->bo->request));
27169a906b70Schristos	assert(rq->bo->refcnt > 0);
271703b705cfSriastradh
271803b705cfSriastradh	if (--rq->bo->refcnt == 0) {
271903b705cfSriastradh		if (kgem_bo_set_purgeable(kgem, rq->bo)) {
272003b705cfSriastradh			kgem_bo_move_to_inactive(kgem, rq->bo);
272103b705cfSriastradh			retired = true;
272203b705cfSriastradh		} else {
272303b705cfSriastradh			DBG(("%s: closing %d\n",
272403b705cfSriastradh			     __FUNCTION__, rq->bo->handle));
272503b705cfSriastradh			kgem_bo_free(kgem, rq->bo);
272603b705cfSriastradh		}
272703b705cfSriastradh	}
272803b705cfSriastradh
272903b705cfSriastradh	__kgem_request_free(rq);
273003b705cfSriastradh	return retired;
273103b705cfSriastradh}
273203b705cfSriastradh
273303b705cfSriastradhstatic bool kgem_retire__requests_ring(struct kgem *kgem, int ring)
273403b705cfSriastradh{
273503b705cfSriastradh	bool retired = false;
273603b705cfSriastradh
273703b705cfSriastradh	while (!list_is_empty(&kgem->requests[ring])) {
273803b705cfSriastradh		struct kgem_request *rq;
273903b705cfSriastradh
274003b705cfSriastradh		rq = list_first_entry(&kgem->requests[ring],
274103b705cfSriastradh				      struct kgem_request,
274203b705cfSriastradh				      list);
27439a906b70Schristos		assert(rq->ring == ring);
274403b705cfSriastradh		if (__kgem_busy(kgem, rq->bo->handle))
274503b705cfSriastradh			break;
274603b705cfSriastradh
274703b705cfSriastradh		retired |= __kgem_retire_rq(kgem, rq);
274803b705cfSriastradh	}
274903b705cfSriastradh
275003b705cfSriastradh#if HAS_DEBUG_FULL
275103b705cfSriastradh	{
275203b705cfSriastradh		struct kgem_bo *bo;
275303b705cfSriastradh		int count = 0;
275403b705cfSriastradh
275503b705cfSriastradh		list_for_each_entry(bo, &kgem->requests[ring], request)
275603b705cfSriastradh			count++;
275703b705cfSriastradh
275803b705cfSriastradh		bo = NULL;
275903b705cfSriastradh		if (!list_is_empty(&kgem->requests[ring]))
276003b705cfSriastradh			bo = list_first_entry(&kgem->requests[ring],
276103b705cfSriastradh					      struct kgem_request,
276203b705cfSriastradh					      list)->bo;
276303b705cfSriastradh
27649a906b70Schristos		DBG(("%s: ring=%d, %d outstanding requests, oldest=%d\n",
27659a906b70Schristos		     __FUNCTION__, ring, count, bo ? bo->handle : 0));
276603b705cfSriastradh	}
276703b705cfSriastradh#endif
276803b705cfSriastradh
276903b705cfSriastradh	return retired;
277003b705cfSriastradh}
277103b705cfSriastradh
277203b705cfSriastradhstatic bool kgem_retire__requests(struct kgem *kgem)
277303b705cfSriastradh{
277403b705cfSriastradh	bool retired = false;
277503b705cfSriastradh	int n;
277603b705cfSriastradh
277703b705cfSriastradh	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) {
277803b705cfSriastradh		retired |= kgem_retire__requests_ring(kgem, n);
277903b705cfSriastradh		kgem->need_retire |= !list_is_empty(&kgem->requests[n]);
278003b705cfSriastradh	}
278103b705cfSriastradh
278203b705cfSriastradh	return retired;
278303b705cfSriastradh}
278403b705cfSriastradh
278503b705cfSriastradhbool kgem_retire(struct kgem *kgem)
278603b705cfSriastradh{
278703b705cfSriastradh	bool retired = false;
278803b705cfSriastradh
27899a906b70Schristos	DBG(("%s, need_retire?=%d\n", __FUNCTION__, kgem->need_retire));
279003b705cfSriastradh
279103b705cfSriastradh	kgem->need_retire = false;
279203b705cfSriastradh
279303b705cfSriastradh	retired |= kgem_retire__flushing(kgem);
279403b705cfSriastradh	retired |= kgem_retire__requests(kgem);
279503b705cfSriastradh
279603b705cfSriastradh	DBG(("%s -- retired=%d, need_retire=%d\n",
279703b705cfSriastradh	     __FUNCTION__, retired, kgem->need_retire));
279803b705cfSriastradh
279903b705cfSriastradh	kgem->retire(kgem);
280003b705cfSriastradh
280103b705cfSriastradh	return retired;
280203b705cfSriastradh}
280303b705cfSriastradh
280403b705cfSriastradhbool __kgem_ring_is_idle(struct kgem *kgem, int ring)
280503b705cfSriastradh{
280603b705cfSriastradh	struct kgem_request *rq;
280703b705cfSriastradh
28089a906b70Schristos	assert(ring < ARRAY_SIZE(kgem->requests));
280903b705cfSriastradh	assert(!list_is_empty(&kgem->requests[ring]));
281003b705cfSriastradh
2811813957e3Ssnj	rq = kgem->fence[ring];
2812813957e3Ssnj	if (rq) {
2813813957e3Ssnj		struct kgem_request *tmp;
2814813957e3Ssnj
2815813957e3Ssnj		if (__kgem_busy(kgem, rq->bo->handle)) {
2816813957e3Ssnj			DBG(("%s: last fence handle=%d still busy\n",
2817813957e3Ssnj			     __FUNCTION__, rq->bo->handle));
2818813957e3Ssnj			return false;
2819813957e3Ssnj		}
2820813957e3Ssnj
2821813957e3Ssnj		do {
2822813957e3Ssnj			tmp = list_first_entry(&kgem->requests[ring],
2823813957e3Ssnj					       struct kgem_request,
2824813957e3Ssnj					       list);
2825813957e3Ssnj			assert(tmp->ring == ring);
2826813957e3Ssnj			__kgem_retire_rq(kgem, tmp);
2827813957e3Ssnj		} while (tmp != rq);
2828813957e3Ssnj
2829813957e3Ssnj		assert(kgem->fence[ring] == NULL);
2830813957e3Ssnj		if (list_is_empty(&kgem->requests[ring]))
2831813957e3Ssnj			return true;
2832813957e3Ssnj	}
2833813957e3Ssnj
283403b705cfSriastradh	rq = list_last_entry(&kgem->requests[ring],
283503b705cfSriastradh			     struct kgem_request, list);
28369a906b70Schristos	assert(rq->ring == ring);
283703b705cfSriastradh	if (__kgem_busy(kgem, rq->bo->handle)) {
283803b705cfSriastradh		DBG(("%s: last requests handle=%d still busy\n",
283903b705cfSriastradh		     __FUNCTION__, rq->bo->handle));
2840813957e3Ssnj		kgem->fence[ring] = rq;
284103b705cfSriastradh		return false;
284203b705cfSriastradh	}
284303b705cfSriastradh
284403b705cfSriastradh	DBG(("%s: ring=%d idle (handle=%d)\n",
284503b705cfSriastradh	     __FUNCTION__, ring, rq->bo->handle));
284603b705cfSriastradh
2847813957e3Ssnj	while (!list_is_empty(&kgem->requests[ring])) {
2848813957e3Ssnj		rq = list_first_entry(&kgem->requests[ring],
2849813957e3Ssnj				      struct kgem_request,
2850813957e3Ssnj				      list);
2851813957e3Ssnj		assert(rq->ring == ring);
2852813957e3Ssnj		__kgem_retire_rq(kgem, rq);
2853813957e3Ssnj	}
28549a906b70Schristos
285503b705cfSriastradh	return true;
285603b705cfSriastradh}
285703b705cfSriastradh
2858813957e3Ssnjvoid __kgem_retire_requests_upto(struct kgem *kgem, struct kgem_bo *bo)
2859813957e3Ssnj{
2860813957e3Ssnj	struct kgem_request *rq = bo->rq, *tmp;
2861813957e3Ssnj	struct list *requests = &kgem->requests[RQ_RING(rq) == I915_EXEC_BLT];
2862813957e3Ssnj
2863813957e3Ssnj	rq = RQ(rq);
2864813957e3Ssnj	assert(rq != &kgem->static_request);
2865813957e3Ssnj	if (rq == (struct kgem_request *)kgem) {
2866813957e3Ssnj		__kgem_bo_clear_busy(bo);
2867813957e3Ssnj		return;
2868813957e3Ssnj	}
2869813957e3Ssnj
2870813957e3Ssnj	do {
2871813957e3Ssnj		tmp = list_first_entry(requests, struct kgem_request, list);
2872813957e3Ssnj		assert(tmp->ring == rq->ring);
2873813957e3Ssnj		__kgem_retire_rq(kgem, tmp);
2874813957e3Ssnj	} while (tmp != rq);
2875813957e3Ssnj}
2876813957e3Ssnj
28779a906b70Schristos#if 0
28789a906b70Schristosstatic void kgem_commit__check_reloc(struct kgem *kgem)
28799a906b70Schristos{
28809a906b70Schristos	struct kgem_request *rq = kgem->next_request;
28819a906b70Schristos	struct kgem_bo *bo;
28829a906b70Schristos	bool has_64bit = kgem->gen >= 0100;
28839a906b70Schristos	int i;
28849a906b70Schristos
28859a906b70Schristos	for (i = 0; i < kgem->nreloc; i++) {
28869a906b70Schristos		list_for_each_entry(bo, &rq->buffers, request) {
28879a906b70Schristos			if (bo->target_handle == kgem->reloc[i].target_handle) {
28889a906b70Schristos				uint64_t value = 0;
28899a906b70Schristos				gem_read(kgem->fd, rq->bo->handle, &value, kgem->reloc[i].offset, has_64bit ? 8 : 4);
28909a906b70Schristos				assert(bo->exec->offset == -1 || value == bo->exec->offset + (int)kgem->reloc[i].delta);
28919a906b70Schristos				break;
28929a906b70Schristos			}
28939a906b70Schristos		}
28949a906b70Schristos	}
28959a906b70Schristos}
28969a906b70Schristos#else
28979a906b70Schristos#define kgem_commit__check_reloc(kgem)
28989a906b70Schristos#endif
28999a906b70Schristos
29009a906b70Schristos#ifndef NDEBUG
29019a906b70Schristosstatic void kgem_commit__check_buffers(struct kgem *kgem)
29029a906b70Schristos{
29039a906b70Schristos	struct kgem_buffer *bo;
29049a906b70Schristos
29059a906b70Schristos	list_for_each_entry(bo, &kgem->active_buffers, base.list)
29069a906b70Schristos		assert(bo->base.exec == NULL);
29079a906b70Schristos}
29089a906b70Schristos#else
29099a906b70Schristos#define kgem_commit__check_buffers(kgem)
29109a906b70Schristos#endif
29119a906b70Schristos
291203b705cfSriastradhstatic void kgem_commit(struct kgem *kgem)
291303b705cfSriastradh{
291403b705cfSriastradh	struct kgem_request *rq = kgem->next_request;
291503b705cfSriastradh	struct kgem_bo *bo, *next;
291603b705cfSriastradh
29179a906b70Schristos	kgem_commit__check_reloc(kgem);
29189a906b70Schristos
291903b705cfSriastradh	list_for_each_entry_safe(bo, next, &rq->buffers, request) {
292003b705cfSriastradh		assert(next->request.prev == &bo->request);
292103b705cfSriastradh
292203b705cfSriastradh		DBG(("%s: release handle=%d (proxy? %d), dirty? %d flush? %d, snoop? %d -> offset=%x\n",
292303b705cfSriastradh		     __FUNCTION__, bo->handle, bo->proxy != NULL,
292403b705cfSriastradh		     bo->gpu_dirty, bo->needs_flush, bo->snoop,
292503b705cfSriastradh		     (unsigned)bo->exec->offset));
292603b705cfSriastradh
292703b705cfSriastradh		assert(bo->exec);
292803b705cfSriastradh		assert(bo->proxy == NULL || bo->exec == &_kgem_dummy_exec);
292903b705cfSriastradh		assert(RQ(bo->rq) == rq || (RQ(bo->proxy->rq) == rq));
293003b705cfSriastradh
293103b705cfSriastradh		bo->presumed_offset = bo->exec->offset;
293203b705cfSriastradh		bo->exec = NULL;
293303b705cfSriastradh		bo->target_handle = -1;
293403b705cfSriastradh
293503b705cfSriastradh		if (!bo->refcnt && !bo->reusable) {
293603b705cfSriastradh			assert(!bo->snoop);
29379a906b70Schristos			assert(!bo->proxy);
293803b705cfSriastradh			kgem_bo_free(kgem, bo);
293903b705cfSriastradh			continue;
294003b705cfSriastradh		}
294103b705cfSriastradh
294203b705cfSriastradh		bo->binding.offset = 0;
294303b705cfSriastradh		bo->domain = DOMAIN_GPU;
294403b705cfSriastradh		bo->gpu_dirty = false;
294503b705cfSriastradh
294603b705cfSriastradh		if (bo->proxy) {
294703b705cfSriastradh			/* proxies are not used for domain tracking */
294803b705cfSriastradh			__kgem_bo_clear_busy(bo);
294903b705cfSriastradh		}
295003b705cfSriastradh
29519a906b70Schristos		kgem->scanout_busy |= bo->scanout && bo->needs_flush;
295203b705cfSriastradh	}
295303b705cfSriastradh
295403b705cfSriastradh	if (rq == &kgem->static_request) {
295503b705cfSriastradh		struct drm_i915_gem_set_domain set_domain;
295603b705cfSriastradh
295703b705cfSriastradh		DBG(("%s: syncing due to allocation failure\n", __FUNCTION__));
295803b705cfSriastradh
295903b705cfSriastradh		VG_CLEAR(set_domain);
296003b705cfSriastradh		set_domain.handle = rq->bo->handle;
296103b705cfSriastradh		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
296203b705cfSriastradh		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
29639a906b70Schristos		if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) {
296403b705cfSriastradh			DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
296503b705cfSriastradh			kgem_throttle(kgem);
296603b705cfSriastradh		}
296703b705cfSriastradh
296803b705cfSriastradh		kgem_retire(kgem);
296903b705cfSriastradh		assert(list_is_empty(&rq->buffers));
297003b705cfSriastradh
29719a906b70Schristos		assert(rq->bo->map__gtt == NULL);
2972813957e3Ssnj		assert(rq->bo->map__wc == NULL);
29739a906b70Schristos		assert(rq->bo->map__cpu == NULL);
297403b705cfSriastradh		gem_close(kgem->fd, rq->bo->handle);
297503b705cfSriastradh		kgem_cleanup_cache(kgem);
297603b705cfSriastradh	} else {
29779a906b70Schristos		assert(rq->ring < ARRAY_SIZE(kgem->requests));
297803b705cfSriastradh		list_add_tail(&rq->list, &kgem->requests[rq->ring]);
297903b705cfSriastradh		kgem->need_throttle = kgem->need_retire = 1;
2980813957e3Ssnj
2981813957e3Ssnj		if (kgem->fence[rq->ring] == NULL &&
2982813957e3Ssnj		    __kgem_busy(kgem, rq->bo->handle))
2983813957e3Ssnj			kgem->fence[rq->ring] = rq;
298403b705cfSriastradh	}
298503b705cfSriastradh
298603b705cfSriastradh	kgem->next_request = NULL;
29879a906b70Schristos
29889a906b70Schristos	kgem_commit__check_buffers(kgem);
298903b705cfSriastradh}
299003b705cfSriastradh
299103b705cfSriastradhstatic void kgem_close_list(struct kgem *kgem, struct list *head)
299203b705cfSriastradh{
299303b705cfSriastradh	while (!list_is_empty(head))
299403b705cfSriastradh		kgem_bo_free(kgem, list_first_entry(head, struct kgem_bo, list));
299503b705cfSriastradh}
299603b705cfSriastradh
299703b705cfSriastradhstatic void kgem_close_inactive(struct kgem *kgem)
299803b705cfSriastradh{
299903b705cfSriastradh	unsigned int i;
300003b705cfSriastradh
300103b705cfSriastradh	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
300203b705cfSriastradh		kgem_close_list(kgem, &kgem->inactive[i]);
300303b705cfSriastradh}
300403b705cfSriastradh
300503b705cfSriastradhstatic void kgem_finish_buffers(struct kgem *kgem)
300603b705cfSriastradh{
300703b705cfSriastradh	struct kgem_buffer *bo, *next;
300803b705cfSriastradh
300903b705cfSriastradh	list_for_each_entry_safe(bo, next, &kgem->batch_buffers, base.list) {
30109a906b70Schristos		DBG(("%s: buffer handle=%d, used=%d, exec?=%d, write=%d, mmapped=%s, refcnt=%d\n",
301103b705cfSriastradh		     __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL,
30129a906b70Schristos		     bo->write, bo->mmapped == MMAPPED_CPU ? "cpu" : bo->mmapped == MMAPPED_GTT ? "gtt" : "no",
30139a906b70Schristos		     bo->base.refcnt));
301403b705cfSriastradh
301503b705cfSriastradh		assert(next->base.list.prev == &bo->base.list);
301603b705cfSriastradh		assert(bo->base.io);
301703b705cfSriastradh		assert(bo->base.refcnt >= 1);
301803b705cfSriastradh
30199a906b70Schristos		if (bo->base.refcnt > 1 && !bo->base.exec) {
30209a906b70Schristos			DBG(("%s: skipping unattached handle=%d, used=%d, refcnt=%d\n",
30219a906b70Schristos			     __FUNCTION__, bo->base.handle, bo->used, bo->base.refcnt));
302203b705cfSriastradh			continue;
302303b705cfSriastradh		}
302403b705cfSriastradh
302503b705cfSriastradh		if (!bo->write) {
302603b705cfSriastradh			assert(bo->base.exec || bo->base.refcnt > 1);
302703b705cfSriastradh			goto decouple;
302803b705cfSriastradh		}
302903b705cfSriastradh
303003b705cfSriastradh		if (bo->mmapped) {
30319a906b70Schristos			uint32_t used;
303203b705cfSriastradh
303303b705cfSriastradh			assert(!bo->need_io);
303403b705cfSriastradh
303503b705cfSriastradh			used = ALIGN(bo->used, PAGE_SIZE);
303603b705cfSriastradh			if (!DBG_NO_UPLOAD_ACTIVE &&
303703b705cfSriastradh			    used + PAGE_SIZE <= bytes(&bo->base) &&
30389a906b70Schristos			    (kgem->has_llc || bo->mmapped == MMAPPED_GTT || bo->base.snoop)) {
30399a906b70Schristos				DBG(("%s: retaining upload buffer (%d/%d): used=%d, refcnt=%d\n",
30409a906b70Schristos				     __FUNCTION__, bo->used, bytes(&bo->base), used, bo->base.refcnt));
304103b705cfSriastradh				bo->used = used;
304203b705cfSriastradh				list_move(&bo->base.list,
304303b705cfSriastradh					  &kgem->active_buffers);
30449a906b70Schristos				kgem->need_retire = true;
304503b705cfSriastradh				continue;
304603b705cfSriastradh			}
304703b705cfSriastradh			DBG(("%s: discarding mmapped buffer, used=%d, map type=%d\n",
30489a906b70Schristos			     __FUNCTION__, bo->used, bo->mmapped));
304903b705cfSriastradh			goto decouple;
305003b705cfSriastradh		}
305103b705cfSriastradh
30529a906b70Schristos		if (!bo->used || !bo->base.exec) {
305303b705cfSriastradh			/* Unless we replace the handle in the execbuffer,
305403b705cfSriastradh			 * then this bo will become active. So decouple it
305503b705cfSriastradh			 * from the buffer list and track it in the normal
305603b705cfSriastradh			 * manner.
305703b705cfSriastradh			 */
305803b705cfSriastradh			goto decouple;
305903b705cfSriastradh		}
306003b705cfSriastradh
306103b705cfSriastradh		assert(bo->need_io);
306203b705cfSriastradh		assert(bo->base.rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
306303b705cfSriastradh		assert(bo->base.domain != DOMAIN_GPU);
306403b705cfSriastradh
306503b705cfSriastradh		if (bo->base.refcnt == 1 &&
306603b705cfSriastradh		    bo->base.size.pages.count > 1 &&
306703b705cfSriastradh		    bo->used < bytes(&bo->base) / 2) {
306803b705cfSriastradh			struct kgem_bo *shrink;
306903b705cfSriastradh			unsigned alloc = NUM_PAGES(bo->used);
307003b705cfSriastradh
307103b705cfSriastradh			shrink = search_snoop_cache(kgem, alloc,
307203b705cfSriastradh						    CREATE_INACTIVE | CREATE_NO_RETIRE);
307303b705cfSriastradh			if (shrink) {
307403b705cfSriastradh				void *map;
307503b705cfSriastradh				int n;
307603b705cfSriastradh
307703b705cfSriastradh				DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n",
307803b705cfSriastradh				     __FUNCTION__,
307903b705cfSriastradh				     bo->used, bytes(&bo->base), bytes(shrink),
308003b705cfSriastradh				     bo->base.handle, shrink->handle));
308103b705cfSriastradh
308203b705cfSriastradh				assert(bo->used <= bytes(shrink));
308303b705cfSriastradh				map = kgem_bo_map__cpu(kgem, shrink);
308403b705cfSriastradh				if (map) {
308503b705cfSriastradh					kgem_bo_sync__cpu(kgem, shrink);
308603b705cfSriastradh					memcpy(map, bo->mem, bo->used);
308703b705cfSriastradh
308803b705cfSriastradh					shrink->target_handle =
308903b705cfSriastradh						kgem->has_handle_lut ? bo->base.target_handle : shrink->handle;
309003b705cfSriastradh					for (n = 0; n < kgem->nreloc; n++) {
309103b705cfSriastradh						if (kgem->reloc[n].target_handle == bo->base.target_handle) {
309203b705cfSriastradh							kgem->reloc[n].target_handle = shrink->target_handle;
309303b705cfSriastradh							kgem->reloc[n].presumed_offset = shrink->presumed_offset;
309403b705cfSriastradh							kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] =
309503b705cfSriastradh								kgem->reloc[n].delta + shrink->presumed_offset;
309603b705cfSriastradh						}
309703b705cfSriastradh					}
309803b705cfSriastradh
309903b705cfSriastradh					bo->base.exec->handle = shrink->handle;
310003b705cfSriastradh					bo->base.exec->offset = shrink->presumed_offset;
310103b705cfSriastradh					shrink->exec = bo->base.exec;
310203b705cfSriastradh					shrink->rq = bo->base.rq;
310303b705cfSriastradh					list_replace(&bo->base.request,
310403b705cfSriastradh						     &shrink->request);
310503b705cfSriastradh					list_init(&bo->base.request);
310603b705cfSriastradh					shrink->needs_flush = bo->base.gpu_dirty;
310703b705cfSriastradh
310803b705cfSriastradh					bo->base.exec = NULL;
310903b705cfSriastradh					bo->base.rq = NULL;
311003b705cfSriastradh					bo->base.gpu_dirty = false;
311103b705cfSriastradh					bo->base.needs_flush = false;
311203b705cfSriastradh					bo->used = 0;
311303b705cfSriastradh
311403b705cfSriastradh					goto decouple;
311503b705cfSriastradh				}
311603b705cfSriastradh
311703b705cfSriastradh				__kgem_bo_destroy(kgem, shrink);
311803b705cfSriastradh			}
311903b705cfSriastradh
312003b705cfSriastradh			shrink = search_linear_cache(kgem, alloc,
312103b705cfSriastradh						     CREATE_INACTIVE | CREATE_NO_RETIRE);
312203b705cfSriastradh			if (shrink) {
312303b705cfSriastradh				int n;
312403b705cfSriastradh
312503b705cfSriastradh				DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n",
312603b705cfSriastradh				     __FUNCTION__,
312703b705cfSriastradh				     bo->used, bytes(&bo->base), bytes(shrink),
312803b705cfSriastradh				     bo->base.handle, shrink->handle));
312903b705cfSriastradh
313003b705cfSriastradh				assert(bo->used <= bytes(shrink));
31319a906b70Schristos				if (gem_write__cachealigned(kgem->fd, shrink->handle,
31329a906b70Schristos							    0, bo->used, bo->mem) == 0) {
313303b705cfSriastradh					shrink->target_handle =
313403b705cfSriastradh						kgem->has_handle_lut ? bo->base.target_handle : shrink->handle;
313503b705cfSriastradh					for (n = 0; n < kgem->nreloc; n++) {
313603b705cfSriastradh						if (kgem->reloc[n].target_handle == bo->base.target_handle) {
313703b705cfSriastradh							kgem->reloc[n].target_handle = shrink->target_handle;
313803b705cfSriastradh							kgem->reloc[n].presumed_offset = shrink->presumed_offset;
313903b705cfSriastradh							kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] =
314003b705cfSriastradh								kgem->reloc[n].delta + shrink->presumed_offset;
314103b705cfSriastradh						}
314203b705cfSriastradh					}
314303b705cfSriastradh
314403b705cfSriastradh					bo->base.exec->handle = shrink->handle;
314503b705cfSriastradh					bo->base.exec->offset = shrink->presumed_offset;
314603b705cfSriastradh					shrink->exec = bo->base.exec;
314703b705cfSriastradh					shrink->rq = bo->base.rq;
314803b705cfSriastradh					list_replace(&bo->base.request,
314903b705cfSriastradh						     &shrink->request);
315003b705cfSriastradh					list_init(&bo->base.request);
315103b705cfSriastradh					shrink->needs_flush = bo->base.gpu_dirty;
315203b705cfSriastradh
315303b705cfSriastradh					bo->base.exec = NULL;
315403b705cfSriastradh					bo->base.rq = NULL;
315503b705cfSriastradh					bo->base.gpu_dirty = false;
315603b705cfSriastradh					bo->base.needs_flush = false;
315703b705cfSriastradh					bo->used = 0;
315803b705cfSriastradh
315903b705cfSriastradh					goto decouple;
316003b705cfSriastradh				}
316103b705cfSriastradh
316203b705cfSriastradh				__kgem_bo_destroy(kgem, shrink);
316303b705cfSriastradh			}
316403b705cfSriastradh		}
316503b705cfSriastradh
316603b705cfSriastradh		DBG(("%s: handle=%d, uploading %d/%d\n",
316703b705cfSriastradh		     __FUNCTION__, bo->base.handle, bo->used, bytes(&bo->base)));
316803b705cfSriastradh		ASSERT_IDLE(kgem, bo->base.handle);
316903b705cfSriastradh		assert(bo->used <= bytes(&bo->base));
31709a906b70Schristos		gem_write__cachealigned(kgem->fd, bo->base.handle,
31719a906b70Schristos					0, bo->used, bo->mem);
317203b705cfSriastradh		bo->need_io = 0;
317303b705cfSriastradh
317403b705cfSriastradhdecouple:
317503b705cfSriastradh		DBG(("%s: releasing handle=%d\n",
317603b705cfSriastradh		     __FUNCTION__, bo->base.handle));
317703b705cfSriastradh		list_del(&bo->base.list);
317803b705cfSriastradh		kgem_bo_unref(kgem, &bo->base);
317903b705cfSriastradh	}
318003b705cfSriastradh}
318103b705cfSriastradh
318203b705cfSriastradhstatic void kgem_cleanup(struct kgem *kgem)
318303b705cfSriastradh{
318403b705cfSriastradh	int n;
318503b705cfSriastradh
318603b705cfSriastradh	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) {
318703b705cfSriastradh		while (!list_is_empty(&kgem->requests[n])) {
318803b705cfSriastradh			struct kgem_request *rq;
318903b705cfSriastradh
319003b705cfSriastradh			rq = list_first_entry(&kgem->requests[n],
319103b705cfSriastradh					      struct kgem_request,
319203b705cfSriastradh					      list);
31939a906b70Schristos			assert(rq->ring == n);
319403b705cfSriastradh			while (!list_is_empty(&rq->buffers)) {
319503b705cfSriastradh				struct kgem_bo *bo;
319603b705cfSriastradh
319703b705cfSriastradh				bo = list_first_entry(&rq->buffers,
319803b705cfSriastradh						      struct kgem_bo,
319903b705cfSriastradh						      request);
320003b705cfSriastradh
320103b705cfSriastradh				bo->exec = NULL;
320203b705cfSriastradh				bo->gpu_dirty = false;
320303b705cfSriastradh				__kgem_bo_clear_busy(bo);
320403b705cfSriastradh				if (bo->refcnt == 0)
320503b705cfSriastradh					kgem_bo_free(kgem, bo);
320603b705cfSriastradh			}
320703b705cfSriastradh
320803b705cfSriastradh			__kgem_request_free(rq);
320903b705cfSriastradh		}
321003b705cfSriastradh	}
321103b705cfSriastradh
321203b705cfSriastradh	kgem_close_inactive(kgem);
321303b705cfSriastradh}
321403b705cfSriastradh
3215813957e3Ssnjstatic int
3216813957e3Ssnjkgem_batch_write(struct kgem *kgem,
3217813957e3Ssnj		 struct kgem_bo *bo,
3218813957e3Ssnj		 uint32_t size)
321903b705cfSriastradh{
3220813957e3Ssnj	char *ptr;
322103b705cfSriastradh	int ret;
322203b705cfSriastradh
3223813957e3Ssnj	ASSERT_IDLE(kgem, bo->handle);
322403b705cfSriastradh
32259a906b70Schristos#if DBG_NO_EXEC
32269a906b70Schristos	{
32279a906b70Schristos		uint32_t batch[] = { MI_BATCH_BUFFER_END, 0};
3228813957e3Ssnj		return gem_write(kgem->fd, bo->handle, 0, sizeof(batch), batch);
32299a906b70Schristos	}
32309a906b70Schristos#endif
32319a906b70Schristos
3232813957e3Ssnj	assert(!bo->scanout);
32339a906b70Schristosretry:
3234813957e3Ssnj	ptr = NULL;
3235813957e3Ssnj	if (bo->domain == DOMAIN_CPU || kgem->has_llc) {
3236813957e3Ssnj		ptr = bo->map__cpu;
3237813957e3Ssnj		if (ptr == NULL)
3238813957e3Ssnj			ptr = __kgem_bo_map__cpu(kgem, bo);
3239813957e3Ssnj	} else if (kgem->has_wc_mmap) {
3240813957e3Ssnj		ptr = bo->map__wc;
3241813957e3Ssnj		if (ptr == NULL)
3242813957e3Ssnj			ptr = __kgem_bo_map__wc(kgem, bo);
3243813957e3Ssnj	}
3244813957e3Ssnj	if (ptr) {
3245813957e3Ssnj		memcpy(ptr, kgem->batch, sizeof(uint32_t)*kgem->nbatch);
3246813957e3Ssnj		if (kgem->surface != kgem->batch_size) {
3247813957e3Ssnj			ret = PAGE_ALIGN(sizeof(uint32_t) * kgem->batch_size);
3248813957e3Ssnj			ret -= sizeof(uint32_t) * kgem->surface;
3249813957e3Ssnj			ptr += size - ret;
3250813957e3Ssnj			memcpy(ptr, kgem->batch + kgem->surface,
3251813957e3Ssnj			       (kgem->batch_size - kgem->surface)*sizeof(uint32_t));
3252813957e3Ssnj		}
3253813957e3Ssnj		return 0;
3254813957e3Ssnj	}
3255813957e3Ssnj
325603b705cfSriastradh	/* If there is no surface data, just upload the batch */
32579a906b70Schristos	if (kgem->surface == kgem->batch_size) {
3258813957e3Ssnj		if ((ret = gem_write__cachealigned(kgem->fd, bo->handle,
32599a906b70Schristos						   0, sizeof(uint32_t)*kgem->nbatch,
32609a906b70Schristos						   kgem->batch)) == 0)
32619a906b70Schristos			return 0;
32629a906b70Schristos
32639a906b70Schristos		goto expire;
32649a906b70Schristos	}
326503b705cfSriastradh
326603b705cfSriastradh	/* Are the batch pages conjoint with the surface pages? */
326703b705cfSriastradh	if (kgem->surface < kgem->nbatch + PAGE_SIZE/sizeof(uint32_t)) {
326803b705cfSriastradh		assert(size == PAGE_ALIGN(kgem->batch_size*sizeof(uint32_t)));
3269813957e3Ssnj		if ((ret = gem_write__cachealigned(kgem->fd, bo->handle,
32709a906b70Schristos						   0, kgem->batch_size*sizeof(uint32_t),
32719a906b70Schristos						   kgem->batch)) == 0)
32729a906b70Schristos			return 0;
32739a906b70Schristos
32749a906b70Schristos		goto expire;
327503b705cfSriastradh	}
327603b705cfSriastradh
327703b705cfSriastradh	/* Disjoint surface/batch, upload separately */
3278813957e3Ssnj	if ((ret = gem_write__cachealigned(kgem->fd, bo->handle,
32799a906b70Schristos					   0, sizeof(uint32_t)*kgem->nbatch,
32809a906b70Schristos					   kgem->batch)))
32819a906b70Schristos		goto expire;
328203b705cfSriastradh
328303b705cfSriastradh	ret = PAGE_ALIGN(sizeof(uint32_t) * kgem->batch_size);
328403b705cfSriastradh	ret -= sizeof(uint32_t) * kgem->surface;
328503b705cfSriastradh	assert(size-ret >= kgem->nbatch*sizeof(uint32_t));
3286813957e3Ssnj	if (gem_write(kgem->fd, bo->handle,
32879a906b70Schristos		      size - ret, (kgem->batch_size - kgem->surface)*sizeof(uint32_t),
32889a906b70Schristos		      kgem->batch + kgem->surface))
32899a906b70Schristos		goto expire;
32909a906b70Schristos
32919a906b70Schristos	return 0;
32929a906b70Schristos
32939a906b70Schristosexpire:
32949a906b70Schristos	assert(ret != EINVAL);
32959a906b70Schristos
32969a906b70Schristos	(void)__kgem_throttle_retire(kgem, 0);
32979a906b70Schristos	if (kgem_expire_cache(kgem))
32989a906b70Schristos		goto retry;
32999a906b70Schristos
33009a906b70Schristos	if (kgem_cleanup_cache(kgem))
33019a906b70Schristos		goto retry;
33029a906b70Schristos
33039a906b70Schristos	ERR(("%s: failed to write batch (handle=%d): %d\n",
3304813957e3Ssnj	     __FUNCTION__, bo->handle, -ret));
33059a906b70Schristos	return ret;
330603b705cfSriastradh}
330703b705cfSriastradh
330803b705cfSriastradhvoid kgem_reset(struct kgem *kgem)
330903b705cfSriastradh{
331003b705cfSriastradh	if (kgem->next_request) {
331103b705cfSriastradh		struct kgem_request *rq = kgem->next_request;
331203b705cfSriastradh
331303b705cfSriastradh		while (!list_is_empty(&rq->buffers)) {
331403b705cfSriastradh			struct kgem_bo *bo =
331503b705cfSriastradh				list_first_entry(&rq->buffers,
331603b705cfSriastradh						 struct kgem_bo,
331703b705cfSriastradh						 request);
331803b705cfSriastradh			list_del(&bo->request);
331903b705cfSriastradh
332003b705cfSriastradh			assert(RQ(bo->rq) == rq);
332103b705cfSriastradh
332203b705cfSriastradh			bo->binding.offset = 0;
332303b705cfSriastradh			bo->exec = NULL;
332403b705cfSriastradh			bo->target_handle = -1;
332503b705cfSriastradh			bo->gpu_dirty = false;
332603b705cfSriastradh
332703b705cfSriastradh			if (bo->needs_flush && __kgem_busy(kgem, bo->handle)) {
332803b705cfSriastradh				assert(bo->domain == DOMAIN_GPU || bo->domain == DOMAIN_NONE);
332903b705cfSriastradh				list_add(&bo->request, &kgem->flushing);
333003b705cfSriastradh				bo->rq = (void *)kgem;
33319a906b70Schristos				kgem->need_retire = true;
333203b705cfSriastradh			} else
333303b705cfSriastradh				__kgem_bo_clear_busy(bo);
333403b705cfSriastradh
333503b705cfSriastradh			if (bo->refcnt || bo->rq)
333603b705cfSriastradh				continue;
333703b705cfSriastradh
33389a906b70Schristos			kgem_bo_move_to_cache(kgem, bo);
333903b705cfSriastradh		}
334003b705cfSriastradh
334103b705cfSriastradh		if (rq != &kgem->static_request) {
334203b705cfSriastradh			list_init(&rq->list);
334303b705cfSriastradh			__kgem_request_free(rq);
334403b705cfSriastradh		}
334503b705cfSriastradh	}
334603b705cfSriastradh
334703b705cfSriastradh	kgem->nfence = 0;
334803b705cfSriastradh	kgem->nexec = 0;
334903b705cfSriastradh	kgem->nreloc = 0;
335003b705cfSriastradh	kgem->nreloc__self = 0;
335103b705cfSriastradh	kgem->aperture = 0;
335203b705cfSriastradh	kgem->aperture_fenced = 0;
33539a906b70Schristos	kgem->aperture_max_fence = 0;
335403b705cfSriastradh	kgem->nbatch = 0;
335503b705cfSriastradh	kgem->surface = kgem->batch_size;
335603b705cfSriastradh	kgem->mode = KGEM_NONE;
33579a906b70Schristos	kgem->needs_semaphore = false;
33589a906b70Schristos	kgem->needs_reservation = false;
335903b705cfSriastradh	kgem->flush = 0;
336003b705cfSriastradh	kgem->batch_flags = kgem->batch_flags_base;
3361813957e3Ssnj	assert(kgem->batch);
336203b705cfSriastradh
336303b705cfSriastradh	kgem->next_request = __kgem_request_alloc(kgem);
336403b705cfSriastradh
336503b705cfSriastradh	kgem_sna_reset(kgem);
336603b705cfSriastradh}
336703b705cfSriastradh
3368813957e3Ssnjstatic int compact_batch_surface(struct kgem *kgem, int *shrink)
336903b705cfSriastradh{
3370813957e3Ssnj	int size, n;
337103b705cfSriastradh
337203b705cfSriastradh	if (!kgem->has_relaxed_delta)
33739a906b70Schristos		return kgem->batch_size * sizeof(uint32_t);
337403b705cfSriastradh
337503b705cfSriastradh	/* See if we can pack the contents into one or two pages */
337603b705cfSriastradh	n = ALIGN(kgem->batch_size, 1024);
337703b705cfSriastradh	size = n - kgem->surface + kgem->nbatch;
337803b705cfSriastradh	size = ALIGN(size, 1024);
337903b705cfSriastradh
3380813957e3Ssnj	*shrink = (n - size) * sizeof(uint32_t);
338103b705cfSriastradh	return size * sizeof(uint32_t);
338203b705cfSriastradh}
338303b705cfSriastradh
338403b705cfSriastradhstatic struct kgem_bo *
3385813957e3Ssnjkgem_create_batch(struct kgem *kgem)
338603b705cfSriastradh{
3387813957e3Ssnj#if !DBG_NO_SHRINK_BATCHES
338803b705cfSriastradh	struct drm_i915_gem_set_domain set_domain;
338903b705cfSriastradh	struct kgem_bo *bo;
3390813957e3Ssnj	int shrink = 0;
3391813957e3Ssnj	int size;
3392813957e3Ssnj
3393813957e3Ssnj	if (kgem->surface != kgem->batch_size)
3394813957e3Ssnj		size = compact_batch_surface(kgem, &shrink);
3395813957e3Ssnj	else
3396813957e3Ssnj		size = kgem->nbatch * sizeof(uint32_t);
339703b705cfSriastradh
339803b705cfSriastradh	if (size <= 4096) {
339903b705cfSriastradh		bo = list_first_entry(&kgem->pinned_batches[0],
340003b705cfSriastradh				      struct kgem_bo,
340103b705cfSriastradh				      list);
340203b705cfSriastradh		if (!bo->rq) {
340303b705cfSriastradhout_4096:
34049a906b70Schristos			assert(bo->refcnt > 0);
340503b705cfSriastradh			list_move_tail(&bo->list, &kgem->pinned_batches[0]);
3406813957e3Ssnj			bo = kgem_bo_reference(bo);
3407813957e3Ssnj			goto write;
340803b705cfSriastradh		}
340903b705cfSriastradh
341003b705cfSriastradh		if (!__kgem_busy(kgem, bo->handle)) {
3411813957e3Ssnj			assert(RQ(bo->rq)->bo == bo);
341203b705cfSriastradh			__kgem_retire_rq(kgem, RQ(bo->rq));
341303b705cfSriastradh			goto out_4096;
341403b705cfSriastradh		}
341503b705cfSriastradh	}
341603b705cfSriastradh
341703b705cfSriastradh	if (size <= 16384) {
341803b705cfSriastradh		bo = list_first_entry(&kgem->pinned_batches[1],
341903b705cfSriastradh				      struct kgem_bo,
342003b705cfSriastradh				      list);
342103b705cfSriastradh		if (!bo->rq) {
342203b705cfSriastradhout_16384:
34239a906b70Schristos			assert(bo->refcnt > 0);
342403b705cfSriastradh			list_move_tail(&bo->list, &kgem->pinned_batches[1]);
3425813957e3Ssnj			bo = kgem_bo_reference(bo);
3426813957e3Ssnj			goto write;
342703b705cfSriastradh		}
342803b705cfSriastradh
342903b705cfSriastradh		if (!__kgem_busy(kgem, bo->handle)) {
343003b705cfSriastradh			__kgem_retire_rq(kgem, RQ(bo->rq));
343103b705cfSriastradh			goto out_16384;
343203b705cfSriastradh		}
343303b705cfSriastradh	}
343403b705cfSriastradh
34359a906b70Schristos	if (kgem->gen == 020) {
34369a906b70Schristos		bo = kgem_create_linear(kgem, size, CREATE_CACHED | CREATE_TEMPORARY);
34379a906b70Schristos		if (bo)
3438813957e3Ssnj			goto write;
343903b705cfSriastradh
34409a906b70Schristos		/* Nothing available for reuse, rely on the kernel wa */
34419a906b70Schristos		if (kgem->has_pinned_batches) {
34429a906b70Schristos			bo = kgem_create_linear(kgem, size, CREATE_CACHED | CREATE_TEMPORARY);
34439a906b70Schristos			if (bo) {
34449a906b70Schristos				kgem->batch_flags &= ~LOCAL_I915_EXEC_IS_PINNED;
3445813957e3Ssnj				goto write;
34469a906b70Schristos			}
34479a906b70Schristos		}
34489a906b70Schristos
34499a906b70Schristos		if (size < 16384) {
34509a906b70Schristos			bo = list_first_entry(&kgem->pinned_batches[size > 4096],
34519a906b70Schristos					      struct kgem_bo,
34529a906b70Schristos					      list);
34539a906b70Schristos			list_move_tail(&bo->list, &kgem->pinned_batches[size > 4096]);
345403b705cfSriastradh
34559a906b70Schristos			DBG(("%s: syncing due to busy batches\n", __FUNCTION__));
345603b705cfSriastradh
34579a906b70Schristos			VG_CLEAR(set_domain);
34589a906b70Schristos			set_domain.handle = bo->handle;
34599a906b70Schristos			set_domain.read_domains = I915_GEM_DOMAIN_GTT;
34609a906b70Schristos			set_domain.write_domain = I915_GEM_DOMAIN_GTT;
34619a906b70Schristos			if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) {
34629a906b70Schristos				DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
34639a906b70Schristos				kgem_throttle(kgem);
34649a906b70Schristos				return NULL;
34659a906b70Schristos			}
346603b705cfSriastradh
34679a906b70Schristos			kgem_retire(kgem);
34689a906b70Schristos			assert(bo->rq == NULL);
3469813957e3Ssnj			bo = kgem_bo_reference(bo);
3470813957e3Ssnj			goto write;
34719a906b70Schristos		}
347203b705cfSriastradh	}
347303b705cfSriastradh
3474813957e3Ssnj	bo = NULL;
3475813957e3Ssnj	if (!kgem->has_llc) {
3476813957e3Ssnj		bo = kgem_create_linear(kgem, size, CREATE_NO_THROTTLE);
3477813957e3Ssnj		if (bo) {
3478813957e3Ssnjwrite:
3479813957e3Ssnj			kgem_fixup_relocs(kgem, bo, shrink);
3480813957e3Ssnj			if (kgem_batch_write(kgem, bo, size)) {
3481813957e3Ssnj				kgem_bo_destroy(kgem, bo);
3482813957e3Ssnj				return NULL;
3483813957e3Ssnj			}
3484813957e3Ssnj		}
3485813957e3Ssnj	}
3486813957e3Ssnj	if (bo == NULL)
3487813957e3Ssnj		bo = kgem_new_batch(kgem);
3488813957e3Ssnj	return bo;
3489813957e3Ssnj#else
3490813957e3Ssnj	return kgem_new_batch(kgem);
3491813957e3Ssnj#endif
349203b705cfSriastradh}
349303b705cfSriastradh
34949a906b70Schristos#if !NDEBUG
34959a906b70Schristosstatic bool dump_file(const char *path)
34969a906b70Schristos{
34979a906b70Schristos	FILE *file;
34989a906b70Schristos	size_t len = 0;
34999a906b70Schristos	char *line = NULL;
35009a906b70Schristos
35019a906b70Schristos	file = fopen(path, "r");
35029a906b70Schristos	if (file == NULL)
35039a906b70Schristos		return false;
35049a906b70Schristos
35059a906b70Schristos	while (getline(&line, &len, file) != -1)
35069a906b70Schristos		ErrorF("%s", line);
35079a906b70Schristos
35089a906b70Schristos	free(line);
35099a906b70Schristos	fclose(file);
35109a906b70Schristos	return true;
35119a906b70Schristos}
35129a906b70Schristos
35139a906b70Schristosstatic void dump_debugfs(struct kgem *kgem, const char *name)
35149a906b70Schristos{
35159a906b70Schristos	char path[80];
35169a906b70Schristos	int minor = kgem_get_minor(kgem);
35179a906b70Schristos
35189a906b70Schristos	if (minor < 0)
35199a906b70Schristos		return;
35209a906b70Schristos
35219a906b70Schristos	sprintf(path, "/sys/kernel/debug/dri/%d/%s", minor, name);
35229a906b70Schristos	if (dump_file(path))
35239a906b70Schristos		return;
35249a906b70Schristos
35259a906b70Schristos	sprintf(path, "/debug/dri/%d/%s", minor, name);
35269a906b70Schristos	if (dump_file(path))
35279a906b70Schristos		return;
35289a906b70Schristos}
35299a906b70Schristos
35309a906b70Schristosstatic void dump_gtt_info(struct kgem *kgem)
35319a906b70Schristos{
35329a906b70Schristos	dump_debugfs(kgem, "i915_gem_gtt");
35339a906b70Schristos}
35349a906b70Schristos
35359a906b70Schristosstatic void dump_fence_regs(struct kgem *kgem)
35369a906b70Schristos{
35379a906b70Schristos	dump_debugfs(kgem, "i915_gem_fence_regs");
35389a906b70Schristos}
35399a906b70Schristos#endif
35409a906b70Schristos
35419a906b70Schristosstatic int do_execbuf(struct kgem *kgem, struct drm_i915_gem_execbuffer2 *execbuf)
35429a906b70Schristos{
3543813957e3Ssnj	int ret, err;
35449a906b70Schristos
35459a906b70Schristosretry:
35469a906b70Schristos	ret = do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
35479a906b70Schristos	if (ret == 0)
35489a906b70Schristos		return 0;
35499a906b70Schristos
35509a906b70Schristos	DBG(("%s: failed ret=%d, throttling and discarding cache\n", __FUNCTION__, ret));
35519a906b70Schristos	(void)__kgem_throttle_retire(kgem, 0);
35529a906b70Schristos	if (kgem_expire_cache(kgem))
35539a906b70Schristos		goto retry;
35549a906b70Schristos
35559a906b70Schristos	if (kgem_cleanup_cache(kgem))
35569a906b70Schristos		goto retry;
35579a906b70Schristos
35589a906b70Schristos	/* last gasp */
3559813957e3Ssnj	ret = do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
3560813957e3Ssnj	if (ret == 0)
3561813957e3Ssnj		return 0;
3562813957e3Ssnj
3563813957e3Ssnj	xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
3564813957e3Ssnj		   "Failed to submit rendering commands, trying again with outputs disabled.\n");
3565813957e3Ssnj
3566813957e3Ssnj	/* One last trick up our sleeve for when we run out of space.
3567813957e3Ssnj	 * We turn everything off to free up our pinned framebuffers,
3568813957e3Ssnj	 * sprites and cursors, and try one last time.
3569813957e3Ssnj	 */
3570813957e3Ssnj	err = errno;
3571813957e3Ssnj	if (sna_mode_disable(container_of(kgem, struct sna, kgem))) {
3572813957e3Ssnj		kgem_cleanup_cache(kgem);
3573813957e3Ssnj		ret = do_ioctl(kgem->fd,
3574813957e3Ssnj			       DRM_IOCTL_I915_GEM_EXECBUFFER2,
3575813957e3Ssnj			       execbuf);
3576813957e3Ssnj		DBG(("%s: last_gasp ret=%d\n", __FUNCTION__, ret));
3577813957e3Ssnj		sna_mode_enable(container_of(kgem, struct sna, kgem));
3578813957e3Ssnj	}
3579813957e3Ssnj	errno = err;
3580813957e3Ssnj
3581813957e3Ssnj	return ret;
35829a906b70Schristos}
35839a906b70Schristos
358403b705cfSriastradhvoid _kgem_submit(struct kgem *kgem)
358503b705cfSriastradh{
358603b705cfSriastradh	struct kgem_request *rq;
358703b705cfSriastradh	uint32_t batch_end;
358803b705cfSriastradh
358903b705cfSriastradh	assert(!DBG_NO_HW);
359003b705cfSriastradh	assert(!kgem->wedged);
359103b705cfSriastradh
359203b705cfSriastradh	assert(kgem->nbatch);
359303b705cfSriastradh	assert(kgem->nbatch <= KGEM_BATCH_SIZE(kgem));
359403b705cfSriastradh	assert(kgem->nbatch <= kgem->surface);
359503b705cfSriastradh
359603b705cfSriastradh	batch_end = kgem_end_batch(kgem);
359703b705cfSriastradh	kgem_sna_flush(kgem);
359803b705cfSriastradh
35999a906b70Schristos	DBG(("batch[%d/%d, flags=%x]: %d %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d [fenced=%d]\n",
36009a906b70Schristos	     kgem->mode, kgem->ring, kgem->batch_flags,
36019a906b70Schristos	     batch_end, kgem->nbatch, kgem->surface, kgem->batch_size,
36029a906b70Schristos	     kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, kgem->aperture_fenced));
360303b705cfSriastradh
360403b705cfSriastradh	assert(kgem->nbatch <= kgem->batch_size);
360503b705cfSriastradh	assert(kgem->nbatch <= kgem->surface);
360603b705cfSriastradh	assert(kgem->nreloc <= ARRAY_SIZE(kgem->reloc));
360703b705cfSriastradh	assert(kgem->nexec < ARRAY_SIZE(kgem->exec));
360803b705cfSriastradh	assert(kgem->nfence <= kgem->fence_max);
360903b705cfSriastradh
361003b705cfSriastradh	kgem_finish_buffers(kgem);
361103b705cfSriastradh
36129a906b70Schristos#if SHOW_BATCH_BEFORE
361303b705cfSriastradh	__kgem_batch_debug(kgem, batch_end);
361403b705cfSriastradh#endif
361503b705cfSriastradh
361603b705cfSriastradh	rq = kgem->next_request;
3617813957e3Ssnj	assert(rq->bo == NULL);
3618813957e3Ssnj
3619813957e3Ssnj	rq->bo = kgem_create_batch(kgem);
362003b705cfSriastradh	if (rq->bo) {
3621813957e3Ssnj		struct drm_i915_gem_execbuffer2 execbuf;
3622813957e3Ssnj		int i, ret;
362303b705cfSriastradh
362403b705cfSriastradh		assert(!rq->bo->needs_flush);
362503b705cfSriastradh
362603b705cfSriastradh		i = kgem->nexec++;
3627813957e3Ssnj		kgem->exec[i].handle = rq->bo->handle;
362803b705cfSriastradh		kgem->exec[i].relocation_count = kgem->nreloc;
362903b705cfSriastradh		kgem->exec[i].relocs_ptr = (uintptr_t)kgem->reloc;
363003b705cfSriastradh		kgem->exec[i].alignment = 0;
363103b705cfSriastradh		kgem->exec[i].offset = rq->bo->presumed_offset;
363203b705cfSriastradh		kgem->exec[i].flags = 0;
363303b705cfSriastradh		kgem->exec[i].rsvd1 = 0;
363403b705cfSriastradh		kgem->exec[i].rsvd2 = 0;
363503b705cfSriastradh
363603b705cfSriastradh		rq->bo->exec = &kgem->exec[i];
363703b705cfSriastradh		rq->bo->rq = MAKE_REQUEST(rq, kgem->ring); /* useful sanity check */
363803b705cfSriastradh		list_add(&rq->bo->request, &rq->buffers);
363903b705cfSriastradh		rq->ring = kgem->ring == KGEM_BLT;
364003b705cfSriastradh
3641813957e3Ssnj		memset(&execbuf, 0, sizeof(execbuf));
3642813957e3Ssnj		execbuf.buffers_ptr = (uintptr_t)kgem->exec;
3643813957e3Ssnj		execbuf.buffer_count = kgem->nexec;
3644813957e3Ssnj		execbuf.batch_len = batch_end*sizeof(uint32_t);
3645813957e3Ssnj		execbuf.flags = kgem->ring | kgem->batch_flags;
3646813957e3Ssnj
3647813957e3Ssnj		if (DBG_DUMP) {
3648813957e3Ssnj			int fd = open("/tmp/i915-batchbuffers.dump",
3649813957e3Ssnj				      O_WRONLY | O_CREAT | O_APPEND,
3650813957e3Ssnj				      0666);
3651813957e3Ssnj			if (fd != -1) {
3652813957e3Ssnj				ret = write(fd, kgem->batch, batch_end*sizeof(uint32_t));
3653813957e3Ssnj				fd = close(fd);
365403b705cfSriastradh			}
3655813957e3Ssnj		}
365603b705cfSriastradh
3657813957e3Ssnj		ret = do_execbuf(kgem, &execbuf);
3658813957e3Ssnj		if (DEBUG_SYNC && ret == 0) {
3659813957e3Ssnj			struct drm_i915_gem_set_domain set_domain;
366003b705cfSriastradh
3661813957e3Ssnj			VG_CLEAR(set_domain);
3662813957e3Ssnj			set_domain.handle = rq->bo->handle;
3663813957e3Ssnj			set_domain.read_domains = I915_GEM_DOMAIN_GTT;
3664813957e3Ssnj			set_domain.write_domain = I915_GEM_DOMAIN_GTT;
366503b705cfSriastradh
3666813957e3Ssnj			ret = do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
3667813957e3Ssnj		}
3668813957e3Ssnj		if (ret < 0) {
3669813957e3Ssnj			kgem_throttle(kgem);
3670813957e3Ssnj			if (!kgem->wedged) {
3671813957e3Ssnj				xf86DrvMsg(kgem_get_screen_index(kgem), X_ERROR,
3672813957e3Ssnj					   "Failed to submit rendering commands, disabling acceleration.\n");
3673813957e3Ssnj				__kgem_set_wedged(kgem);
367403b705cfSriastradh			}
367503b705cfSriastradh
367603b705cfSriastradh#if !NDEBUG
3677813957e3Ssnj			ErrorF("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d, fenced=%d, high=%d,%d: errno=%d\n",
3678813957e3Ssnj			       kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface,
3679813957e3Ssnj			       kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, kgem->aperture_fenced, kgem->aperture_high, kgem->aperture_total, -ret);
368003b705cfSriastradh
3681813957e3Ssnj			for (i = 0; i < kgem->nexec; i++) {
3682813957e3Ssnj				struct kgem_bo *bo, *found = NULL;
368303b705cfSriastradh
3684813957e3Ssnj				list_for_each_entry(bo, &kgem->next_request->buffers, request) {
3685813957e3Ssnj					if (bo->handle == kgem->exec[i].handle) {
3686813957e3Ssnj						found = bo;
3687813957e3Ssnj						break;
368803b705cfSriastradh					}
368903b705cfSriastradh				}
3690813957e3Ssnj				ErrorF("exec[%d] = handle:%d, presumed offset: %x, size: %d, tiling %d, fenced %d, snooped %d, deleted %d\n",
3691813957e3Ssnj				       i,
3692813957e3Ssnj				       kgem->exec[i].handle,
3693813957e3Ssnj				       (int)kgem->exec[i].offset,
3694813957e3Ssnj				       found ? kgem_bo_size(found) : -1,
3695813957e3Ssnj				       found ? found->tiling : -1,
3696813957e3Ssnj				       (int)(kgem->exec[i].flags & EXEC_OBJECT_NEEDS_FENCE),
3697813957e3Ssnj				       found ? found->snoop : -1,
3698813957e3Ssnj				       found ? found->purged : -1);
3699813957e3Ssnj			}
3700813957e3Ssnj			for (i = 0; i < kgem->nreloc; i++) {
3701813957e3Ssnj				ErrorF("reloc[%d] = pos:%d, target:%d, delta:%d, read:%x, write:%x, offset:%x\n",
3702813957e3Ssnj				       i,
3703813957e3Ssnj				       (int)kgem->reloc[i].offset,
3704813957e3Ssnj				       kgem->reloc[i].target_handle,
3705813957e3Ssnj				       kgem->reloc[i].delta,
3706813957e3Ssnj				       kgem->reloc[i].read_domains,
3707813957e3Ssnj				       kgem->reloc[i].write_domain,
3708813957e3Ssnj				       (int)kgem->reloc[i].presumed_offset);
3709813957e3Ssnj			}
371003b705cfSriastradh
3711813957e3Ssnj			{
3712813957e3Ssnj				struct drm_i915_gem_get_aperture aperture;
3713813957e3Ssnj				if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture) == 0)
3714813957e3Ssnj					ErrorF("Aperture size %lld, available %lld\n",
3715813957e3Ssnj					       (long long)aperture.aper_size,
3716813957e3Ssnj					       (long long)aperture.aper_available_size);
3717813957e3Ssnj			}
37189a906b70Schristos
3719813957e3Ssnj			if (ret == -ENOSPC)
3720813957e3Ssnj				dump_gtt_info(kgem);
3721813957e3Ssnj			if (ret == -EDEADLK)
3722813957e3Ssnj				dump_fence_regs(kgem);
372303b705cfSriastradh
3724813957e3Ssnj			if (DEBUG_SYNC) {
3725813957e3Ssnj				int fd = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666);
3726813957e3Ssnj				if (fd != -1) {
3727813957e3Ssnj					int ignored = write(fd, kgem->batch, batch_end*sizeof(uint32_t));
3728813957e3Ssnj					assert(ignored == batch_end*sizeof(uint32_t));
3729813957e3Ssnj					close(fd);
373003b705cfSriastradh				}
3731813957e3Ssnj
3732813957e3Ssnj				FatalError("SNA: failed to submit batchbuffer, errno=%d\n", -ret);
373303b705cfSriastradh			}
3734813957e3Ssnj#endif
373503b705cfSriastradh		}
373603b705cfSriastradh	}
37379a906b70Schristos#if SHOW_BATCH_AFTER
3738813957e3Ssnj	if (gem_read(kgem->fd, rq->bo->handle, kgem->batch, 0, batch_end*sizeof(uint32_t)) == 0)
37399a906b70Schristos		__kgem_batch_debug(kgem, batch_end);
37409a906b70Schristos#endif
37419a906b70Schristos	kgem_commit(kgem);
374203b705cfSriastradh	if (kgem->wedged)
374303b705cfSriastradh		kgem_cleanup(kgem);
374403b705cfSriastradh
374503b705cfSriastradh	kgem_reset(kgem);
374603b705cfSriastradh
374703b705cfSriastradh	assert(kgem->next_request != NULL);
374803b705cfSriastradh}
374903b705cfSriastradh
37509a906b70Schristosstatic bool find_hang_state(struct kgem *kgem, char *path, int maxlen)
375103b705cfSriastradh{
37529a906b70Schristos	int minor = kgem_get_minor(kgem);
375303b705cfSriastradh
375403b705cfSriastradh	/* Search for our hang state in a few canonical locations.
375503b705cfSriastradh	 * In the unlikely event of having multiple devices, we
375603b705cfSriastradh	 * will need to check which minor actually corresponds to ours.
375703b705cfSriastradh	 */
375803b705cfSriastradh
37599a906b70Schristos	snprintf(path, maxlen, "/sys/class/drm/card%d/error", minor);
37609a906b70Schristos	if (access(path, R_OK) == 0)
37619a906b70Schristos		return true;
376203b705cfSriastradh
37639a906b70Schristos	snprintf(path, maxlen, "/sys/kernel/debug/dri/%d/i915_error_state", minor);
37649a906b70Schristos	if (access(path, R_OK) == 0)
37659a906b70Schristos		return true;
376603b705cfSriastradh
37679a906b70Schristos	snprintf(path, maxlen, "/debug/dri/%d/i915_error_state", minor);
37689a906b70Schristos	if (access(path, R_OK) == 0)
37699a906b70Schristos		return true;
377003b705cfSriastradh
377103b705cfSriastradh	path[0] = '\0';
37729a906b70Schristos	return false;
377303b705cfSriastradh}
377403b705cfSriastradh
377503b705cfSriastradhvoid kgem_throttle(struct kgem *kgem)
377603b705cfSriastradh{
377703b705cfSriastradh	if (kgem->wedged)
377803b705cfSriastradh		return;
377903b705cfSriastradh
3780813957e3Ssnj	if (__kgem_throttle(kgem, true)) {
37819a906b70Schristos		static int once;
378203b705cfSriastradh		char path[128];
378303b705cfSriastradh
378403b705cfSriastradh		xf86DrvMsg(kgem_get_screen_index(kgem), X_ERROR,
378503b705cfSriastradh			   "Detected a hung GPU, disabling acceleration.\n");
37869a906b70Schristos		if (!once && find_hang_state(kgem, path, sizeof(path))) {
378703b705cfSriastradh			xf86DrvMsg(kgem_get_screen_index(kgem), X_ERROR,
378803b705cfSriastradh				   "When reporting this, please include %s and the full dmesg.\n",
378903b705cfSriastradh				   path);
37909a906b70Schristos			once = 1;
37919a906b70Schristos		}
37929a906b70Schristos
3793813957e3Ssnj		__kgem_set_wedged(kgem);
37949a906b70Schristos		kgem->need_throttle = false;
379503b705cfSriastradh	}
379603b705cfSriastradh}
379703b705cfSriastradh
37989a906b70Schristosint kgem_is_wedged(struct kgem *kgem)
37999a906b70Schristos{
38009a906b70Schristos	return __kgem_throttle(kgem, true);
38019a906b70Schristos}
38029a906b70Schristos
38039a906b70Schristosstatic void kgem_purge_cache(struct kgem *kgem)
380403b705cfSriastradh{
380503b705cfSriastradh	struct kgem_bo *bo, *next;
380603b705cfSriastradh	int i;
380703b705cfSriastradh
380803b705cfSriastradh	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
380903b705cfSriastradh		list_for_each_entry_safe(bo, next, &kgem->inactive[i], list) {
381003b705cfSriastradh			if (!kgem_bo_is_retained(kgem, bo)) {
381103b705cfSriastradh				DBG(("%s: purging %d\n",
381203b705cfSriastradh				     __FUNCTION__, bo->handle));
381303b705cfSriastradh				kgem_bo_free(kgem, bo);
381403b705cfSriastradh			}
381503b705cfSriastradh		}
381603b705cfSriastradh	}
381703b705cfSriastradh
381803b705cfSriastradh	kgem->need_purge = false;
381903b705cfSriastradh}
382003b705cfSriastradh
382103b705cfSriastradhvoid kgem_clean_scanout_cache(struct kgem *kgem)
382203b705cfSriastradh{
382303b705cfSriastradh	while (!list_is_empty(&kgem->scanout)) {
382403b705cfSriastradh		struct kgem_bo *bo;
382503b705cfSriastradh
382603b705cfSriastradh		bo = list_first_entry(&kgem->scanout, struct kgem_bo, list);
382703b705cfSriastradh
382803b705cfSriastradh		assert(bo->scanout);
382903b705cfSriastradh		assert(!bo->refcnt);
38309a906b70Schristos		assert(!bo->prime);
383103b705cfSriastradh		assert(bo->proxy == NULL);
383203b705cfSriastradh
383303b705cfSriastradh		if (bo->exec || __kgem_busy(kgem, bo->handle))
383403b705cfSriastradh			break;
383503b705cfSriastradh
383603b705cfSriastradh		DBG(("%s: handle=%d, fb=%d (reusable=%d)\n",
383703b705cfSriastradh		     __FUNCTION__, bo->handle, bo->delta, bo->reusable));
383803b705cfSriastradh		list_del(&bo->list);
383903b705cfSriastradh
38409a906b70Schristos		kgem_bo_rmfb(kgem, bo);
384103b705cfSriastradh		bo->scanout = false;
384203b705cfSriastradh
384303b705cfSriastradh		if (!bo->purged) {
384403b705cfSriastradh			bo->reusable = true;
384503b705cfSriastradh			if (kgem->has_llc &&
384603b705cfSriastradh			    !gem_set_caching(kgem->fd, bo->handle, SNOOPED))
384703b705cfSriastradh				bo->reusable = false;
384803b705cfSriastradh
384903b705cfSriastradh		}
385003b705cfSriastradh
385103b705cfSriastradh		__kgem_bo_destroy(kgem, bo);
385203b705cfSriastradh	}
385303b705cfSriastradh}
385403b705cfSriastradh
385503b705cfSriastradhvoid kgem_clean_large_cache(struct kgem *kgem)
385603b705cfSriastradh{
385703b705cfSriastradh	while (!list_is_empty(&kgem->large_inactive)) {
385803b705cfSriastradh		kgem_bo_free(kgem,
385903b705cfSriastradh			     list_first_entry(&kgem->large_inactive,
386003b705cfSriastradh					      struct kgem_bo, list));
386103b705cfSriastradh
386203b705cfSriastradh	}
386303b705cfSriastradh}
386403b705cfSriastradh
386503b705cfSriastradhbool kgem_expire_cache(struct kgem *kgem)
386603b705cfSriastradh{
386703b705cfSriastradh	time_t now, expire;
386803b705cfSriastradh	struct kgem_bo *bo;
386903b705cfSriastradh	unsigned int size = 0, count = 0;
387003b705cfSriastradh	bool idle;
387103b705cfSriastradh	unsigned int i;
387203b705cfSriastradh
387303b705cfSriastradh	time(&now);
387403b705cfSriastradh
387503b705cfSriastradh	while (__kgem_freed_bo) {
387603b705cfSriastradh		bo = __kgem_freed_bo;
387703b705cfSriastradh		__kgem_freed_bo = *(struct kgem_bo **)bo;
387803b705cfSriastradh		free(bo);
387903b705cfSriastradh	}
388003b705cfSriastradh
388103b705cfSriastradh	while (__kgem_freed_request) {
388203b705cfSriastradh		struct kgem_request *rq = __kgem_freed_request;
388303b705cfSriastradh		__kgem_freed_request = *(struct kgem_request **)rq;
388403b705cfSriastradh		free(rq);
388503b705cfSriastradh	}
388603b705cfSriastradh
388703b705cfSriastradh	kgem_clean_large_cache(kgem);
388803b705cfSriastradh	if (container_of(kgem, struct sna, kgem)->scrn->vtSema)
388903b705cfSriastradh		kgem_clean_scanout_cache(kgem);
389003b705cfSriastradh
389103b705cfSriastradh	expire = 0;
389203b705cfSriastradh	list_for_each_entry(bo, &kgem->snoop, list) {
389303b705cfSriastradh		if (bo->delta) {
389403b705cfSriastradh			expire = now - MAX_INACTIVE_TIME/2;
389503b705cfSriastradh			break;
389603b705cfSriastradh		}
389703b705cfSriastradh
389803b705cfSriastradh		bo->delta = now;
389903b705cfSriastradh	}
390003b705cfSriastradh	if (expire) {
390103b705cfSriastradh		while (!list_is_empty(&kgem->snoop)) {
390203b705cfSriastradh			bo = list_last_entry(&kgem->snoop, struct kgem_bo, list);
390303b705cfSriastradh
390403b705cfSriastradh			if (bo->delta > expire)
390503b705cfSriastradh				break;
390603b705cfSriastradh
390703b705cfSriastradh			kgem_bo_free(kgem, bo);
390803b705cfSriastradh		}
390903b705cfSriastradh	}
391003b705cfSriastradh#ifdef DEBUG_MEMORY
391103b705cfSriastradh	{
391203b705cfSriastradh		long snoop_size = 0;
391303b705cfSriastradh		int snoop_count = 0;
391403b705cfSriastradh		list_for_each_entry(bo, &kgem->snoop, list)
391503b705cfSriastradh			snoop_count++, snoop_size += bytes(bo);
39169a906b70Schristos		DBG(("%s: still allocated %d bo, %ld bytes, in snoop cache\n",
39179a906b70Schristos		     __FUNCTION__, snoop_count, snoop_size));
391803b705cfSriastradh	}
391903b705cfSriastradh#endif
392003b705cfSriastradh
392103b705cfSriastradh	kgem_retire(kgem);
392203b705cfSriastradh	if (kgem->wedged)
392303b705cfSriastradh		kgem_cleanup(kgem);
392403b705cfSriastradh
392503b705cfSriastradh	kgem->expire(kgem);
392603b705cfSriastradh
392703b705cfSriastradh	if (kgem->need_purge)
392803b705cfSriastradh		kgem_purge_cache(kgem);
392903b705cfSriastradh
39309a906b70Schristos	if (kgem->need_retire)
39319a906b70Schristos		kgem_retire(kgem);
393203b705cfSriastradh
39339a906b70Schristos	expire = 0;
39349a906b70Schristos	idle = true;
393503b705cfSriastradh	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
393603b705cfSriastradh		idle &= list_is_empty(&kgem->inactive[i]);
393703b705cfSriastradh		list_for_each_entry(bo, &kgem->inactive[i], list) {
393803b705cfSriastradh			if (bo->delta) {
393903b705cfSriastradh				expire = now - MAX_INACTIVE_TIME;
394003b705cfSriastradh				break;
394103b705cfSriastradh			}
394203b705cfSriastradh
394303b705cfSriastradh			bo->delta = now;
394403b705cfSriastradh		}
394503b705cfSriastradh	}
39469a906b70Schristos	if (expire == 0) {
39479a906b70Schristos		DBG(("%s: idle? %d\n", __FUNCTION__, idle));
39489a906b70Schristos		kgem->need_expire = !idle;
394903b705cfSriastradh		return false;
395003b705cfSriastradh	}
395103b705cfSriastradh
39529a906b70Schristos	idle = true;
395303b705cfSriastradh	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
395403b705cfSriastradh		struct list preserve;
395503b705cfSriastradh
395603b705cfSriastradh		list_init(&preserve);
395703b705cfSriastradh		while (!list_is_empty(&kgem->inactive[i])) {
395803b705cfSriastradh			bo = list_last_entry(&kgem->inactive[i],
395903b705cfSriastradh					     struct kgem_bo, list);
396003b705cfSriastradh
396103b705cfSriastradh			if (bo->delta > expire) {
396203b705cfSriastradh				idle = false;
396303b705cfSriastradh				break;
396403b705cfSriastradh			}
396503b705cfSriastradh
39669a906b70Schristos			if (bo->map__cpu && bo->delta + MAP_PRESERVE_TIME > expire) {
396703b705cfSriastradh				idle = false;
396803b705cfSriastradh				list_move_tail(&bo->list, &preserve);
396903b705cfSriastradh			} else {
397003b705cfSriastradh				count++;
397103b705cfSriastradh				size += bytes(bo);
397203b705cfSriastradh				kgem_bo_free(kgem, bo);
397303b705cfSriastradh				DBG(("%s: expiring %d\n",
397403b705cfSriastradh				     __FUNCTION__, bo->handle));
397503b705cfSriastradh			}
397603b705cfSriastradh		}
397703b705cfSriastradh		if (!list_is_empty(&preserve)) {
397803b705cfSriastradh			preserve.prev->next = kgem->inactive[i].next;
397903b705cfSriastradh			kgem->inactive[i].next->prev = preserve.prev;
398003b705cfSriastradh			kgem->inactive[i].next = preserve.next;
398103b705cfSriastradh			preserve.next->prev = &kgem->inactive[i];
398203b705cfSriastradh		}
398303b705cfSriastradh	}
398403b705cfSriastradh
398503b705cfSriastradh#ifdef DEBUG_MEMORY
398603b705cfSriastradh	{
398703b705cfSriastradh		long inactive_size = 0;
398803b705cfSriastradh		int inactive_count = 0;
398903b705cfSriastradh		for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
399003b705cfSriastradh			list_for_each_entry(bo, &kgem->inactive[i], list)
399103b705cfSriastradh				inactive_count++, inactive_size += bytes(bo);
39929a906b70Schristos		DBG(("%s: still allocated %d bo, %ld bytes, in inactive cache\n",
39939a906b70Schristos		     __FUNCTION__, inactive_count, inactive_size));
399403b705cfSriastradh	}
399503b705cfSriastradh#endif
399603b705cfSriastradh
399703b705cfSriastradh	DBG(("%s: expired %d objects, %d bytes, idle? %d\n",
399803b705cfSriastradh	     __FUNCTION__, count, size, idle));
399903b705cfSriastradh
400003b705cfSriastradh	kgem->need_expire = !idle;
40019a906b70Schristos	return count;
400203b705cfSriastradh	(void)count;
400303b705cfSriastradh	(void)size;
400403b705cfSriastradh}
400503b705cfSriastradh
40069a906b70Schristosbool kgem_cleanup_cache(struct kgem *kgem)
400703b705cfSriastradh{
400803b705cfSriastradh	unsigned int i;
400903b705cfSriastradh	int n;
401003b705cfSriastradh
401103b705cfSriastradh	/* sync to the most recent request */
401203b705cfSriastradh	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) {
401303b705cfSriastradh		if (!list_is_empty(&kgem->requests[n])) {
401403b705cfSriastradh			struct kgem_request *rq;
401503b705cfSriastradh			struct drm_i915_gem_set_domain set_domain;
401603b705cfSriastradh
401703b705cfSriastradh			rq = list_first_entry(&kgem->requests[n],
401803b705cfSriastradh					      struct kgem_request,
401903b705cfSriastradh					      list);
402003b705cfSriastradh
402103b705cfSriastradh			DBG(("%s: sync on cleanup\n", __FUNCTION__));
402203b705cfSriastradh
402303b705cfSriastradh			VG_CLEAR(set_domain);
402403b705cfSriastradh			set_domain.handle = rq->bo->handle;
402503b705cfSriastradh			set_domain.read_domains = I915_GEM_DOMAIN_GTT;
402603b705cfSriastradh			set_domain.write_domain = I915_GEM_DOMAIN_GTT;
40279a906b70Schristos			(void)do_ioctl(kgem->fd,
402803b705cfSriastradh				       DRM_IOCTL_I915_GEM_SET_DOMAIN,
402903b705cfSriastradh				       &set_domain);
403003b705cfSriastradh		}
403103b705cfSriastradh	}
403203b705cfSriastradh
403303b705cfSriastradh	kgem_retire(kgem);
403403b705cfSriastradh	kgem_cleanup(kgem);
403503b705cfSriastradh
40369a906b70Schristos	if (!kgem->need_expire)
40379a906b70Schristos		return false;
40389a906b70Schristos
403903b705cfSriastradh	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
404003b705cfSriastradh		while (!list_is_empty(&kgem->inactive[i]))
404103b705cfSriastradh			kgem_bo_free(kgem,
404203b705cfSriastradh				     list_last_entry(&kgem->inactive[i],
404303b705cfSriastradh						     struct kgem_bo, list));
404403b705cfSriastradh	}
404503b705cfSriastradh
404603b705cfSriastradh	kgem_clean_large_cache(kgem);
404703b705cfSriastradh	kgem_clean_scanout_cache(kgem);
404803b705cfSriastradh
404903b705cfSriastradh	while (!list_is_empty(&kgem->snoop))
405003b705cfSriastradh		kgem_bo_free(kgem,
405103b705cfSriastradh			     list_last_entry(&kgem->snoop,
405203b705cfSriastradh					     struct kgem_bo, list));
405303b705cfSriastradh
405403b705cfSriastradh	while (__kgem_freed_bo) {
405503b705cfSriastradh		struct kgem_bo *bo = __kgem_freed_bo;
405603b705cfSriastradh		__kgem_freed_bo = *(struct kgem_bo **)bo;
405703b705cfSriastradh		free(bo);
405803b705cfSriastradh	}
405903b705cfSriastradh
406003b705cfSriastradh	kgem->need_purge = false;
406103b705cfSriastradh	kgem->need_expire = false;
40629a906b70Schristos	return true;
406303b705cfSriastradh}
406403b705cfSriastradh
406503b705cfSriastradhstatic struct kgem_bo *
406603b705cfSriastradhsearch_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
406703b705cfSriastradh{
406803b705cfSriastradh	struct kgem_bo *bo, *first = NULL;
406903b705cfSriastradh	bool use_active = (flags & CREATE_INACTIVE) == 0;
407003b705cfSriastradh	struct list *cache;
407103b705cfSriastradh
407203b705cfSriastradh	DBG(("%s: num_pages=%d, flags=%x, use_active? %d, use_large=%d [max=%d]\n",
407303b705cfSriastradh	     __FUNCTION__, num_pages, flags, use_active,
407403b705cfSriastradh	     num_pages >= MAX_CACHE_SIZE / PAGE_SIZE,
407503b705cfSriastradh	     MAX_CACHE_SIZE / PAGE_SIZE));
407603b705cfSriastradh
407703b705cfSriastradh	assert(num_pages);
407803b705cfSriastradh
407903b705cfSriastradh	if (num_pages >= MAX_CACHE_SIZE / PAGE_SIZE) {
408003b705cfSriastradh		DBG(("%s: searching large buffers\n", __FUNCTION__));
408103b705cfSriastradhretry_large:
408203b705cfSriastradh		cache = use_active ? &kgem->large : &kgem->large_inactive;
408303b705cfSriastradh		list_for_each_entry_safe(bo, first, cache, list) {
408403b705cfSriastradh			assert(bo->refcnt == 0);
408503b705cfSriastradh			assert(bo->reusable);
408603b705cfSriastradh			assert(!bo->scanout);
408703b705cfSriastradh
408803b705cfSriastradh			if (num_pages > num_pages(bo))
408903b705cfSriastradh				goto discard;
409003b705cfSriastradh
409103b705cfSriastradh			if (bo->tiling != I915_TILING_NONE) {
409203b705cfSriastradh				if (use_active)
409303b705cfSriastradh					goto discard;
409403b705cfSriastradh
409503b705cfSriastradh				if (!gem_set_tiling(kgem->fd, bo->handle,
409603b705cfSriastradh						    I915_TILING_NONE, 0))
409703b705cfSriastradh					goto discard;
409803b705cfSriastradh
409903b705cfSriastradh				bo->tiling = I915_TILING_NONE;
410003b705cfSriastradh				bo->pitch = 0;
410103b705cfSriastradh			}
410203b705cfSriastradh
410303b705cfSriastradh			if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo))
410403b705cfSriastradh				goto discard;
410503b705cfSriastradh
410603b705cfSriastradh			list_del(&bo->list);
41079a906b70Schristos			if (RQ(bo->rq) == (void *)kgem) {
41089a906b70Schristos				assert(bo->exec == NULL);
410903b705cfSriastradh				list_del(&bo->request);
41109a906b70Schristos			}
411103b705cfSriastradh
411203b705cfSriastradh			bo->delta = 0;
411303b705cfSriastradh			assert_tiling(kgem, bo);
411403b705cfSriastradh			return bo;
411503b705cfSriastradh
411603b705cfSriastradhdiscard:
411703b705cfSriastradh			if (!use_active)
411803b705cfSriastradh				kgem_bo_free(kgem, bo);
411903b705cfSriastradh		}
412003b705cfSriastradh
412103b705cfSriastradh		if (use_active) {
412203b705cfSriastradh			use_active = false;
412303b705cfSriastradh			goto retry_large;
412403b705cfSriastradh		}
412503b705cfSriastradh
412603b705cfSriastradh		if (__kgem_throttle_retire(kgem, flags))
412703b705cfSriastradh			goto retry_large;
412803b705cfSriastradh
412903b705cfSriastradh		return NULL;
413003b705cfSriastradh	}
413103b705cfSriastradh
413203b705cfSriastradh	if (!use_active && list_is_empty(inactive(kgem, num_pages))) {
413303b705cfSriastradh		DBG(("%s: inactive and cache bucket empty\n",
413403b705cfSriastradh		     __FUNCTION__));
413503b705cfSriastradh
413603b705cfSriastradh		if (flags & CREATE_NO_RETIRE) {
413703b705cfSriastradh			DBG(("%s: can not retire\n", __FUNCTION__));
413803b705cfSriastradh			return NULL;
413903b705cfSriastradh		}
414003b705cfSriastradh
414103b705cfSriastradh		if (list_is_empty(active(kgem, num_pages, I915_TILING_NONE))) {
414203b705cfSriastradh			DBG(("%s: active cache bucket empty\n", __FUNCTION__));
414303b705cfSriastradh			return NULL;
414403b705cfSriastradh		}
414503b705cfSriastradh
414603b705cfSriastradh		if (!__kgem_throttle_retire(kgem, flags)) {
414703b705cfSriastradh			DBG(("%s: nothing retired\n", __FUNCTION__));
414803b705cfSriastradh			return NULL;
414903b705cfSriastradh		}
415003b705cfSriastradh
415103b705cfSriastradh		if (list_is_empty(inactive(kgem, num_pages))) {
415203b705cfSriastradh			DBG(("%s: active cache bucket still empty after retire\n",
415303b705cfSriastradh			     __FUNCTION__));
415403b705cfSriastradh			return NULL;
415503b705cfSriastradh		}
415603b705cfSriastradh	}
415703b705cfSriastradh
415803b705cfSriastradh	if (!use_active && flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
415903b705cfSriastradh		int for_cpu = !!(flags & CREATE_CPU_MAP);
416003b705cfSriastradh		DBG(("%s: searching for inactive %s map\n",
416103b705cfSriastradh		     __FUNCTION__, for_cpu ? "cpu" : "gtt"));
416203b705cfSriastradh		cache = &kgem->vma[for_cpu].inactive[cache_bucket(num_pages)];
416303b705cfSriastradh		list_for_each_entry(bo, cache, vma) {
4164813957e3Ssnj			assert(for_cpu ? !!bo->map__cpu : (bo->map__gtt || bo->map__wc));
416503b705cfSriastradh			assert(bucket(bo) == cache_bucket(num_pages));
416603b705cfSriastradh			assert(bo->proxy == NULL);
416703b705cfSriastradh			assert(bo->rq == NULL);
416803b705cfSriastradh			assert(bo->exec == NULL);
416903b705cfSriastradh			assert(!bo->scanout);
417003b705cfSriastradh
417103b705cfSriastradh			if (num_pages > num_pages(bo)) {
417203b705cfSriastradh				DBG(("inactive too small: %d < %d\n",
417303b705cfSriastradh				     num_pages(bo), num_pages));
417403b705cfSriastradh				continue;
417503b705cfSriastradh			}
417603b705cfSriastradh
417703b705cfSriastradh			if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
417803b705cfSriastradh				kgem_bo_free(kgem, bo);
417903b705cfSriastradh				break;
418003b705cfSriastradh			}
418103b705cfSriastradh
418203b705cfSriastradh			if (I915_TILING_NONE != bo->tiling &&
418303b705cfSriastradh			    !gem_set_tiling(kgem->fd, bo->handle,
418403b705cfSriastradh					    I915_TILING_NONE, 0))
418503b705cfSriastradh				continue;
418603b705cfSriastradh
418703b705cfSriastradh			kgem_bo_remove_from_inactive(kgem, bo);
41889a906b70Schristos			assert(list_is_empty(&bo->vma));
41899a906b70Schristos			assert(list_is_empty(&bo->list));
419003b705cfSriastradh
419103b705cfSriastradh			bo->tiling = I915_TILING_NONE;
419203b705cfSriastradh			bo->pitch = 0;
419303b705cfSriastradh			bo->delta = 0;
419403b705cfSriastradh			DBG(("  %s: found handle=%d (num_pages=%d) in linear vma cache\n",
419503b705cfSriastradh			     __FUNCTION__, bo->handle, num_pages(bo)));
419603b705cfSriastradh			assert(use_active || bo->domain != DOMAIN_GPU);
419703b705cfSriastradh			assert(!bo->needs_flush);
419803b705cfSriastradh			assert_tiling(kgem, bo);
419903b705cfSriastradh			ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active);
420003b705cfSriastradh			return bo;
420103b705cfSriastradh		}
420203b705cfSriastradh
420303b705cfSriastradh		if (flags & CREATE_EXACT)
420403b705cfSriastradh			return NULL;
420503b705cfSriastradh
420603b705cfSriastradh		if (flags & CREATE_CPU_MAP && !kgem->has_llc)
420703b705cfSriastradh			return NULL;
420803b705cfSriastradh	}
420903b705cfSriastradh
421003b705cfSriastradh	cache = use_active ? active(kgem, num_pages, I915_TILING_NONE) : inactive(kgem, num_pages);
421103b705cfSriastradh	list_for_each_entry(bo, cache, list) {
421203b705cfSriastradh		assert(bo->refcnt == 0);
421303b705cfSriastradh		assert(bo->reusable);
421403b705cfSriastradh		assert(!!bo->rq == !!use_active);
421503b705cfSriastradh		assert(bo->proxy == NULL);
421603b705cfSriastradh		assert(!bo->scanout);
421703b705cfSriastradh
421803b705cfSriastradh		if (num_pages > num_pages(bo))
421903b705cfSriastradh			continue;
422003b705cfSriastradh
422103b705cfSriastradh		if (use_active &&
422203b705cfSriastradh		    kgem->gen <= 040 &&
422303b705cfSriastradh		    bo->tiling != I915_TILING_NONE)
422403b705cfSriastradh			continue;
422503b705cfSriastradh
422603b705cfSriastradh		if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
422703b705cfSriastradh			kgem_bo_free(kgem, bo);
422803b705cfSriastradh			break;
422903b705cfSriastradh		}
423003b705cfSriastradh
423103b705cfSriastradh		if (I915_TILING_NONE != bo->tiling) {
423203b705cfSriastradh			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP))
423303b705cfSriastradh				continue;
423403b705cfSriastradh
423503b705cfSriastradh			if (first)
423603b705cfSriastradh				continue;
423703b705cfSriastradh
423803b705cfSriastradh			if (!gem_set_tiling(kgem->fd, bo->handle,
423903b705cfSriastradh					    I915_TILING_NONE, 0))
424003b705cfSriastradh				continue;
424103b705cfSriastradh
424203b705cfSriastradh			bo->tiling = I915_TILING_NONE;
424303b705cfSriastradh			bo->pitch = 0;
424403b705cfSriastradh		}
424503b705cfSriastradh
4246813957e3Ssnj		if (bo->map__gtt || bo->map__wc || bo->map__cpu) {
424703b705cfSriastradh			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
424803b705cfSriastradh				int for_cpu = !!(flags & CREATE_CPU_MAP);
4249813957e3Ssnj				if (for_cpu ? !!bo->map__cpu : (bo->map__gtt || bo->map__wc)){
425003b705cfSriastradh					if (first != NULL)
425103b705cfSriastradh						break;
425203b705cfSriastradh
425303b705cfSriastradh					first = bo;
425403b705cfSriastradh					continue;
425503b705cfSriastradh				}
425603b705cfSriastradh			} else {
425703b705cfSriastradh				if (first != NULL)
425803b705cfSriastradh					break;
425903b705cfSriastradh
426003b705cfSriastradh				first = bo;
426103b705cfSriastradh				continue;
426203b705cfSriastradh			}
426303b705cfSriastradh		} else {
42649a906b70Schristos			if (flags & CREATE_GTT_MAP && !kgem_bo_can_map(kgem, bo))
42659a906b70Schristos				continue;
42669a906b70Schristos
426703b705cfSriastradh			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
426803b705cfSriastradh				if (first != NULL)
426903b705cfSriastradh					break;
427003b705cfSriastradh
427103b705cfSriastradh				first = bo;
427203b705cfSriastradh				continue;
427303b705cfSriastradh			}
427403b705cfSriastradh		}
427503b705cfSriastradh
427603b705cfSriastradh		if (use_active)
427703b705cfSriastradh			kgem_bo_remove_from_active(kgem, bo);
427803b705cfSriastradh		else
427903b705cfSriastradh			kgem_bo_remove_from_inactive(kgem, bo);
428003b705cfSriastradh
428103b705cfSriastradh		assert(bo->tiling == I915_TILING_NONE);
428203b705cfSriastradh		bo->pitch = 0;
428303b705cfSriastradh		bo->delta = 0;
428403b705cfSriastradh		DBG(("  %s: found handle=%d (num_pages=%d) in linear %s cache\n",
428503b705cfSriastradh		     __FUNCTION__, bo->handle, num_pages(bo),
428603b705cfSriastradh		     use_active ? "active" : "inactive"));
428703b705cfSriastradh		assert(list_is_empty(&bo->list));
42889a906b70Schristos		assert(list_is_empty(&bo->vma));
428903b705cfSriastradh		assert(use_active || bo->domain != DOMAIN_GPU);
429003b705cfSriastradh		assert(!bo->needs_flush || use_active);
429103b705cfSriastradh		assert_tiling(kgem, bo);
429203b705cfSriastradh		ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active);
429303b705cfSriastradh		return bo;
429403b705cfSriastradh	}
429503b705cfSriastradh
429603b705cfSriastradh	if (first) {
429703b705cfSriastradh		assert(first->tiling == I915_TILING_NONE);
429803b705cfSriastradh
429903b705cfSriastradh		if (use_active)
430003b705cfSriastradh			kgem_bo_remove_from_active(kgem, first);
430103b705cfSriastradh		else
430203b705cfSriastradh			kgem_bo_remove_from_inactive(kgem, first);
430303b705cfSriastradh
430403b705cfSriastradh		first->pitch = 0;
430503b705cfSriastradh		first->delta = 0;
430603b705cfSriastradh		DBG(("  %s: found handle=%d (near-miss) (num_pages=%d) in linear %s cache\n",
430703b705cfSriastradh		     __FUNCTION__, first->handle, num_pages(first),
430803b705cfSriastradh		     use_active ? "active" : "inactive"));
430903b705cfSriastradh		assert(list_is_empty(&first->list));
43109a906b70Schristos		assert(list_is_empty(&first->vma));
431103b705cfSriastradh		assert(use_active || first->domain != DOMAIN_GPU);
431203b705cfSriastradh		assert(!first->needs_flush || use_active);
431303b705cfSriastradh		ASSERT_MAYBE_IDLE(kgem, first->handle, !use_active);
431403b705cfSriastradh		return first;
431503b705cfSriastradh	}
431603b705cfSriastradh
431703b705cfSriastradh	return NULL;
431803b705cfSriastradh}
431903b705cfSriastradh
432003b705cfSriastradhstruct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name)
432103b705cfSriastradh{
432203b705cfSriastradh	struct drm_gem_open open_arg;
43239a906b70Schristos	struct drm_i915_gem_get_tiling tiling;
432403b705cfSriastradh	struct kgem_bo *bo;
432503b705cfSriastradh
432603b705cfSriastradh	DBG(("%s(name=%d)\n", __FUNCTION__, name));
432703b705cfSriastradh
432803b705cfSriastradh	VG_CLEAR(open_arg);
432903b705cfSriastradh	open_arg.name = name;
43309a906b70Schristos	if (do_ioctl(kgem->fd, DRM_IOCTL_GEM_OPEN, &open_arg))
433103b705cfSriastradh		return NULL;
433203b705cfSriastradh
433303b705cfSriastradh	DBG(("%s: new handle=%d\n", __FUNCTION__, open_arg.handle));
43349a906b70Schristos
43359a906b70Schristos	VG_CLEAR(tiling);
43369a906b70Schristos	tiling.handle = open_arg.handle;
43379a906b70Schristos	if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &tiling)) {
43389a906b70Schristos		DBG(("%s(name=%d) get-tiling failed, ret=%d\n", __FUNCTION__, name, errno));
43399a906b70Schristos		gem_close(kgem->fd, open_arg.handle);
43409a906b70Schristos		return NULL;
43419a906b70Schristos	}
43429a906b70Schristos
43439a906b70Schristos	DBG(("%s: handle=%d, tiling=%d\n", __FUNCTION__, tiling.handle, tiling.tiling_mode));
43449a906b70Schristos
434503b705cfSriastradh	bo = __kgem_bo_alloc(open_arg.handle, open_arg.size / PAGE_SIZE);
434603b705cfSriastradh	if (bo == NULL) {
434703b705cfSriastradh		gem_close(kgem->fd, open_arg.handle);
434803b705cfSriastradh		return NULL;
434903b705cfSriastradh	}
435003b705cfSriastradh
43519a906b70Schristos	bo->unique_id = kgem_get_unique_id(kgem);
43529a906b70Schristos	bo->tiling = tiling.tiling_mode;
435303b705cfSriastradh	bo->reusable = false;
43549a906b70Schristos	bo->prime = true;
43559a906b70Schristos	bo->purged = true; /* no coherency guarantees */
435603b705cfSriastradh
435703b705cfSriastradh	debug_alloc__bo(kgem, bo);
435803b705cfSriastradh	return bo;
435903b705cfSriastradh}
436003b705cfSriastradh
436103b705cfSriastradhstruct kgem_bo *kgem_create_for_prime(struct kgem *kgem, int name, uint32_t size)
436203b705cfSriastradh{
436303b705cfSriastradh#ifdef DRM_IOCTL_PRIME_FD_TO_HANDLE
436403b705cfSriastradh	struct drm_prime_handle args;
436503b705cfSriastradh	struct drm_i915_gem_get_tiling tiling;
43669a906b70Schristos	struct local_i915_gem_caching caching;
436703b705cfSriastradh	struct kgem_bo *bo;
43689a906b70Schristos	off_t seek;
436903b705cfSriastradh
437003b705cfSriastradh	DBG(("%s(name=%d)\n", __FUNCTION__, name));
437103b705cfSriastradh
437203b705cfSriastradh	VG_CLEAR(args);
437303b705cfSriastradh	args.fd = name;
437403b705cfSriastradh	args.flags = 0;
43759a906b70Schristos	if (do_ioctl(kgem->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args)) {
43769a906b70Schristos		DBG(("%s(name=%d) fd-to-handle failed, ret=%d\n", __FUNCTION__, name, errno));
437703b705cfSriastradh		return NULL;
43789a906b70Schristos	}
437903b705cfSriastradh
438003b705cfSriastradh	VG_CLEAR(tiling);
438103b705cfSriastradh	tiling.handle = args.handle;
43829a906b70Schristos	if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &tiling)) {
43839a906b70Schristos		DBG(("%s(name=%d) get-tiling failed, ret=%d\n", __FUNCTION__, name, errno));
438403b705cfSriastradh		gem_close(kgem->fd, args.handle);
438503b705cfSriastradh		return NULL;
438603b705cfSriastradh	}
438703b705cfSriastradh
43889a906b70Schristos	/* Query actual size, overriding specified if available */
43899a906b70Schristos	seek = lseek(args.fd, 0, SEEK_END);
43909a906b70Schristos	DBG(("%s: estimated size=%ld, actual=%lld\n",
43919a906b70Schristos	     __FUNCTION__, (long)size, (long long)seek));
43929a906b70Schristos	if (seek != -1) {
43939a906b70Schristos		if (size > seek) {
43949a906b70Schristos			DBG(("%s(name=%d) estimated required size [%d] is larger than actual [%ld]\n", __FUNCTION__, name, size, (long)seek));
43959a906b70Schristos			gem_close(kgem->fd, args.handle);
43969a906b70Schristos			return NULL;
43979a906b70Schristos		}
43989a906b70Schristos		size = seek;
43999a906b70Schristos	}
44009a906b70Schristos
440103b705cfSriastradh	DBG(("%s: new handle=%d, tiling=%d\n", __FUNCTION__,
440203b705cfSriastradh	     args.handle, tiling.tiling_mode));
440303b705cfSriastradh	bo = __kgem_bo_alloc(args.handle, NUM_PAGES(size));
440403b705cfSriastradh	if (bo == NULL) {
440503b705cfSriastradh		gem_close(kgem->fd, args.handle);
440603b705cfSriastradh		return NULL;
440703b705cfSriastradh	}
440803b705cfSriastradh
44099a906b70Schristos	bo->unique_id = kgem_get_unique_id(kgem);
441003b705cfSriastradh	bo->tiling = tiling.tiling_mode;
441103b705cfSriastradh	bo->reusable = false;
44129a906b70Schristos	bo->prime = true;
44139a906b70Schristos	bo->domain = DOMAIN_NONE;
44149a906b70Schristos
44159a906b70Schristos	/* is this a special bo (e.g. scanout or CPU coherent)? */
44169a906b70Schristos
44179a906b70Schristos	VG_CLEAR(caching);
44189a906b70Schristos	caching.handle = args.handle;
44199a906b70Schristos	caching.caching = kgem->has_llc;
44209a906b70Schristos	(void)drmIoctl(kgem->fd, LOCAL_IOCTL_I915_GEM_GET_CACHING, &caching);
44219a906b70Schristos	DBG(("%s: imported handle=%d has caching %d\n", __FUNCTION__, args.handle, caching.caching));
44229a906b70Schristos	switch (caching.caching) {
44239a906b70Schristos	case 0:
44249a906b70Schristos		if (kgem->has_llc) {
44259a906b70Schristos			DBG(("%s: interpreting handle=%d as a foreign scanout\n",
44269a906b70Schristos			     __FUNCTION__, args.handle));
44279a906b70Schristos			bo->scanout = true;
44289a906b70Schristos		}
44299a906b70Schristos		break;
44309a906b70Schristos	case 1:
44319a906b70Schristos		if (!kgem->has_llc) {
44329a906b70Schristos			DBG(("%s: interpreting handle=%d as a foreign snooped buffer\n",
44339a906b70Schristos			     __FUNCTION__, args.handle));
44349a906b70Schristos			bo->snoop = true;
44359a906b70Schristos			if (bo->tiling) {
44369a906b70Schristos				DBG(("%s: illegal snooped tiled buffer\n", __FUNCTION__));
44379a906b70Schristos				kgem_bo_free(kgem, bo);
44389a906b70Schristos				return NULL;
44399a906b70Schristos			}
44409a906b70Schristos		}
44419a906b70Schristos		break;
44429a906b70Schristos	case 2:
44439a906b70Schristos		DBG(("%s: interpreting handle=%d as a foreign scanout\n",
44449a906b70Schristos		     __FUNCTION__, args.handle));
44459a906b70Schristos		bo->scanout = true;
44469a906b70Schristos		break;
44479a906b70Schristos	}
444803b705cfSriastradh
444903b705cfSriastradh	debug_alloc__bo(kgem, bo);
445003b705cfSriastradh	return bo;
445103b705cfSriastradh#else
445203b705cfSriastradh	return NULL;
445303b705cfSriastradh#endif
445403b705cfSriastradh}
445503b705cfSriastradh
445603b705cfSriastradhint kgem_bo_export_to_prime(struct kgem *kgem, struct kgem_bo *bo)
445703b705cfSriastradh{
445803b705cfSriastradh#if defined(DRM_IOCTL_PRIME_HANDLE_TO_FD) && defined(O_CLOEXEC)
445903b705cfSriastradh	struct drm_prime_handle args;
446003b705cfSriastradh
446103b705cfSriastradh	VG_CLEAR(args);
446203b705cfSriastradh	args.handle = bo->handle;
446303b705cfSriastradh	args.flags = O_CLOEXEC;
446403b705cfSriastradh
44659a906b70Schristos	if (do_ioctl(kgem->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args))
446603b705cfSriastradh		return -1;
446703b705cfSriastradh
446803b705cfSriastradh	bo->reusable = false;
446903b705cfSriastradh	return args.fd;
447003b705cfSriastradh#else
447103b705cfSriastradh	return -1;
447203b705cfSriastradh#endif
447303b705cfSriastradh}
447403b705cfSriastradh
447503b705cfSriastradhstruct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags)
447603b705cfSriastradh{
447703b705cfSriastradh	struct kgem_bo *bo;
447803b705cfSriastradh	uint32_t handle;
447903b705cfSriastradh
448003b705cfSriastradh	DBG(("%s(%d)\n", __FUNCTION__, size));
448103b705cfSriastradh	assert(size);
448203b705cfSriastradh
448303b705cfSriastradh	if (flags & CREATE_GTT_MAP && kgem->has_llc) {
448403b705cfSriastradh		flags &= ~CREATE_GTT_MAP;
448503b705cfSriastradh		flags |= CREATE_CPU_MAP;
448603b705cfSriastradh	}
448703b705cfSriastradh
448803b705cfSriastradh	size = NUM_PAGES(size);
4489813957e3Ssnj	if ((flags & CREATE_UNCACHED) == 0) {
4490813957e3Ssnj		bo = search_linear_cache(kgem, size, CREATE_INACTIVE | flags);
4491813957e3Ssnj		if (bo) {
4492813957e3Ssnj			assert(bo->domain != DOMAIN_GPU);
4493813957e3Ssnj			ASSERT_IDLE(kgem, bo->handle);
4494813957e3Ssnj			bo->refcnt = 1;
4495813957e3Ssnj			return bo;
4496813957e3Ssnj		}
449703b705cfSriastradh
4498813957e3Ssnj		if (flags & CREATE_CACHED)
4499813957e3Ssnj			return NULL;
4500813957e3Ssnj	}
450103b705cfSriastradh
450203b705cfSriastradh	handle = gem_create(kgem->fd, size);
450303b705cfSriastradh	if (handle == 0)
450403b705cfSriastradh		return NULL;
450503b705cfSriastradh
450603b705cfSriastradh	DBG(("%s: new handle=%d, num_pages=%d\n", __FUNCTION__, handle, size));
450703b705cfSriastradh	bo = __kgem_bo_alloc(handle, size);
450803b705cfSriastradh	if (bo == NULL) {
450903b705cfSriastradh		gem_close(kgem->fd, handle);
451003b705cfSriastradh		return NULL;
451103b705cfSriastradh	}
451203b705cfSriastradh
451303b705cfSriastradh	debug_alloc__bo(kgem, bo);
451403b705cfSriastradh	return bo;
451503b705cfSriastradh}
451603b705cfSriastradh
451703b705cfSriastradhint kgem_choose_tiling(struct kgem *kgem, int tiling, int width, int height, int bpp)
451803b705cfSriastradh{
451903b705cfSriastradh	if (DBG_NO_TILING)
452003b705cfSriastradh		return tiling < 0 ? tiling : I915_TILING_NONE;
452103b705cfSriastradh
452203b705cfSriastradh	if (kgem->gen < 040) {
452303b705cfSriastradh		if (tiling && width * bpp > 8192 * 8) {
452403b705cfSriastradh			DBG(("%s: pitch too large for tliing [%d]\n",
452503b705cfSriastradh			     __FUNCTION__, width*bpp/8));
452603b705cfSriastradh			tiling = I915_TILING_NONE;
452703b705cfSriastradh			goto done;
452803b705cfSriastradh		}
452903b705cfSriastradh	} else {
453003b705cfSriastradh		if (width*bpp > (MAXSHORT-512) * 8) {
453103b705cfSriastradh			if (tiling > 0)
453203b705cfSriastradh				tiling = -tiling;
453303b705cfSriastradh			else if (tiling == 0)
453403b705cfSriastradh				tiling = -I915_TILING_X;
453503b705cfSriastradh			DBG(("%s: large pitch [%d], forcing TILING [%d]\n",
453603b705cfSriastradh			     __FUNCTION__, width*bpp/8, tiling));
453703b705cfSriastradh		} else if (tiling && (width|height) > 8192) {
453803b705cfSriastradh			DBG(("%s: large tiled buffer [%dx%d], forcing TILING_X\n",
453903b705cfSriastradh			     __FUNCTION__, width, height));
454003b705cfSriastradh			tiling = -I915_TILING_X;
454103b705cfSriastradh		}
454203b705cfSriastradh
454303b705cfSriastradh		/* fences limited to 128k (256k on ivb) */
454403b705cfSriastradh		assert(width * bpp <= 128 * 1024 * 8);
454503b705cfSriastradh	}
454603b705cfSriastradh
454703b705cfSriastradh	if (tiling < 0)
454803b705cfSriastradh		return tiling;
454903b705cfSriastradh
45509a906b70Schristos	if (tiling == I915_TILING_Y && !kgem->can_render_y)
45519a906b70Schristos		tiling = I915_TILING_X;
45529a906b70Schristos
455303b705cfSriastradh	if (tiling && (height == 1 || width == 1)) {
455403b705cfSriastradh		DBG(("%s: disabling tiling [%dx%d] for single row/col\n",
455503b705cfSriastradh		     __FUNCTION__,width, height));
455603b705cfSriastradh		tiling = I915_TILING_NONE;
455703b705cfSriastradh		goto done;
455803b705cfSriastradh	}
455903b705cfSriastradh	if (tiling == I915_TILING_Y && height <= 16) {
456003b705cfSriastradh		DBG(("%s: too short [%d] for TILING_Y\n",
456103b705cfSriastradh		     __FUNCTION__,height));
456203b705cfSriastradh		tiling = I915_TILING_X;
456303b705cfSriastradh	}
456403b705cfSriastradh	if (tiling && width * bpp > 8 * (4096 - 64)) {
456503b705cfSriastradh		DBG(("%s: TLB miss between lines %dx%d (pitch=%d), forcing tiling %d\n",
456603b705cfSriastradh		     __FUNCTION__,
456703b705cfSriastradh		     width, height, width*bpp/8,
456803b705cfSriastradh		     tiling));
456903b705cfSriastradh		return -tiling;
457003b705cfSriastradh	}
457103b705cfSriastradh	if (tiling == I915_TILING_X && height < 4) {
457203b705cfSriastradh		DBG(("%s: too short [%d] for TILING_X\n",
457303b705cfSriastradh		     __FUNCTION__, height));
457403b705cfSriastradh		tiling = I915_TILING_NONE;
457503b705cfSriastradh		goto done;
457603b705cfSriastradh	}
457703b705cfSriastradh
45789a906b70Schristos	if (tiling == I915_TILING_X && width * bpp <= 8*512) {
457903b705cfSriastradh		DBG(("%s: too thin [width %d, %d bpp] for TILING_X\n",
458003b705cfSriastradh		     __FUNCTION__, width, bpp));
458103b705cfSriastradh		tiling = I915_TILING_NONE;
458203b705cfSriastradh		goto done;
458303b705cfSriastradh	}
45849a906b70Schristos	if (tiling == I915_TILING_Y && width * bpp < 8*128) {
458503b705cfSriastradh		DBG(("%s: too thin [%d] for TILING_Y\n",
458603b705cfSriastradh		     __FUNCTION__, width));
458703b705cfSriastradh		tiling = I915_TILING_NONE;
458803b705cfSriastradh		goto done;
458903b705cfSriastradh	}
459003b705cfSriastradh
459103b705cfSriastradh	if (tiling && ALIGN(height, 2) * ALIGN(width*bpp, 8*64) <= 4096 * 8) {
459203b705cfSriastradh		DBG(("%s: too small [%d bytes] for TILING_%c\n", __FUNCTION__,
459303b705cfSriastradh		     ALIGN(height, 2) * ALIGN(width*bpp, 8*64) / 8,
459403b705cfSriastradh		     tiling == I915_TILING_X ? 'X' : 'Y'));
459503b705cfSriastradh		tiling = I915_TILING_NONE;
459603b705cfSriastradh		goto done;
459703b705cfSriastradh	}
459803b705cfSriastradh
459903b705cfSriastradh	if (tiling && width * bpp >= 8 * 4096 / 2) {
460003b705cfSriastradh		DBG(("%s: TLB near-miss between lines %dx%d (pitch=%d), forcing tiling %d\n",
460103b705cfSriastradh		     __FUNCTION__,
460203b705cfSriastradh		     width, height, width*bpp/8,
460303b705cfSriastradh		     tiling));
460403b705cfSriastradh		return -tiling;
460503b705cfSriastradh	}
460603b705cfSriastradh
460703b705cfSriastradhdone:
460803b705cfSriastradh	DBG(("%s: %dx%d -> %d\n", __FUNCTION__, width, height, tiling));
460903b705cfSriastradh	return tiling;
461003b705cfSriastradh}
461103b705cfSriastradh
461203b705cfSriastradhstatic int bits_per_pixel(int depth)
461303b705cfSriastradh{
461403b705cfSriastradh	switch (depth) {
461503b705cfSriastradh	case 8: return 8;
461603b705cfSriastradh	case 15:
461703b705cfSriastradh	case 16: return 16;
461803b705cfSriastradh	case 24:
461903b705cfSriastradh	case 30:
462003b705cfSriastradh	case 32: return 32;
462103b705cfSriastradh	default: return 0;
462203b705cfSriastradh	}
462303b705cfSriastradh}
462403b705cfSriastradh
462503b705cfSriastradhunsigned kgem_can_create_2d(struct kgem *kgem,
462603b705cfSriastradh			    int width, int height, int depth)
462703b705cfSriastradh{
462803b705cfSriastradh	uint32_t pitch, size;
462903b705cfSriastradh	unsigned flags = 0;
463003b705cfSriastradh	int tiling;
463103b705cfSriastradh	int bpp;
463203b705cfSriastradh
463303b705cfSriastradh	DBG(("%s: %dx%d @ %d\n", __FUNCTION__, width, height, depth));
463403b705cfSriastradh
463503b705cfSriastradh	bpp = bits_per_pixel(depth);
463603b705cfSriastradh	if (bpp == 0) {
463703b705cfSriastradh		DBG(("%s: unhandled depth %d\n", __FUNCTION__, depth));
463803b705cfSriastradh		return 0;
463903b705cfSriastradh	}
464003b705cfSriastradh
464103b705cfSriastradh	if (width > MAXSHORT || height > MAXSHORT) {
464203b705cfSriastradh		DBG(("%s: unhandled size %dx%d\n",
464303b705cfSriastradh		     __FUNCTION__, width, height));
464403b705cfSriastradh		return 0;
464503b705cfSriastradh	}
464603b705cfSriastradh
464703b705cfSriastradh	size = kgem_surface_size(kgem, false, 0,
464803b705cfSriastradh				 width, height, bpp,
464903b705cfSriastradh				 I915_TILING_NONE, &pitch);
465003b705cfSriastradh	DBG(("%s: untiled size=%d\n", __FUNCTION__, size));
465103b705cfSriastradh	if (size > 0) {
465203b705cfSriastradh		if (size <= kgem->max_cpu_size)
465303b705cfSriastradh			flags |= KGEM_CAN_CREATE_CPU;
46549a906b70Schristos		if (size > 4096 && size <= kgem->max_gpu_size)
465503b705cfSriastradh			flags |= KGEM_CAN_CREATE_GPU;
4656813957e3Ssnj		if (size <= PAGE_SIZE*kgem->aperture_mappable/4 || kgem->has_wc_mmap)
465703b705cfSriastradh			flags |= KGEM_CAN_CREATE_GTT;
465803b705cfSriastradh		if (size > kgem->large_object_size)
465903b705cfSriastradh			flags |= KGEM_CAN_CREATE_LARGE;
466003b705cfSriastradh		if (size > kgem->max_object_size) {
466103b705cfSriastradh			DBG(("%s: too large (untiled) %d > %d\n",
466203b705cfSriastradh			     __FUNCTION__, size, kgem->max_object_size));
466303b705cfSriastradh			return 0;
466403b705cfSriastradh		}
466503b705cfSriastradh	}
466603b705cfSriastradh
466703b705cfSriastradh	tiling = kgem_choose_tiling(kgem, I915_TILING_X,
466803b705cfSriastradh				    width, height, bpp);
466903b705cfSriastradh	if (tiling != I915_TILING_NONE) {
467003b705cfSriastradh		size = kgem_surface_size(kgem, false, 0,
467103b705cfSriastradh					 width, height, bpp, tiling,
467203b705cfSriastradh					 &pitch);
467303b705cfSriastradh		DBG(("%s: tiled[%d] size=%d\n", __FUNCTION__, tiling, size));
467403b705cfSriastradh		if (size > 0 && size <= kgem->max_gpu_size)
46759a906b70Schristos			flags |= KGEM_CAN_CREATE_GPU | KGEM_CAN_CREATE_TILED;
46769a906b70Schristos		if (size > 0 && size <= PAGE_SIZE*kgem->aperture_mappable/4)
467703b705cfSriastradh			flags |= KGEM_CAN_CREATE_GTT;
46789a906b70Schristos		if (size > PAGE_SIZE*kgem->aperture_mappable/4)
46799a906b70Schristos			flags &= ~KGEM_CAN_CREATE_GTT;
468003b705cfSriastradh		if (size > kgem->large_object_size)
468103b705cfSriastradh			flags |= KGEM_CAN_CREATE_LARGE;
468203b705cfSriastradh		if (size > kgem->max_object_size) {
468303b705cfSriastradh			DBG(("%s: too large (tiled) %d > %d\n",
468403b705cfSriastradh			     __FUNCTION__, size, kgem->max_object_size));
468503b705cfSriastradh			return 0;
468603b705cfSriastradh		}
46879a906b70Schristos		if (kgem->gen < 040) {
46889a906b70Schristos			int fence_size = 1024 * 1024;
46899a906b70Schristos			while (fence_size < size)
46909a906b70Schristos				fence_size <<= 1;
46919a906b70Schristos			if (fence_size > kgem->max_gpu_size)
46929a906b70Schristos				flags &= ~KGEM_CAN_CREATE_GPU | KGEM_CAN_CREATE_TILED;
46939a906b70Schristos			if (fence_size > PAGE_SIZE*kgem->aperture_fenceable/4)
46949a906b70Schristos				flags &= ~KGEM_CAN_CREATE_GTT;
46959a906b70Schristos		}
469603b705cfSriastradh	}
469703b705cfSriastradh
469803b705cfSriastradh	return flags;
469903b705cfSriastradh}
470003b705cfSriastradh
470103b705cfSriastradhinline int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo)
470203b705cfSriastradh{
470303b705cfSriastradh	unsigned int size;
470403b705cfSriastradh
470503b705cfSriastradh	assert(bo->tiling);
470603b705cfSriastradh	assert_tiling(kgem, bo);
470703b705cfSriastradh	assert(kgem->gen < 040);
470803b705cfSriastradh
470903b705cfSriastradh	if (kgem->gen < 030)
47109a906b70Schristos		size = 512 * 1024 / PAGE_SIZE;
471103b705cfSriastradh	else
47129a906b70Schristos		size = 1024 * 1024 / PAGE_SIZE;
47139a906b70Schristos	while (size < num_pages(bo))
47149a906b70Schristos		size <<= 1;
471503b705cfSriastradh
471603b705cfSriastradh	return size;
471703b705cfSriastradh}
471803b705cfSriastradh
471903b705cfSriastradhstatic struct kgem_bo *
472003b705cfSriastradh__kgem_bo_create_as_display(struct kgem *kgem, int size, int tiling, int pitch)
472103b705cfSriastradh{
472203b705cfSriastradh	struct local_i915_gem_create2 args;
472303b705cfSriastradh	struct kgem_bo *bo;
472403b705cfSriastradh
472503b705cfSriastradh	if (!kgem->has_create2)
472603b705cfSriastradh		return NULL;
472703b705cfSriastradh
472803b705cfSriastradh	memset(&args, 0, sizeof(args));
472903b705cfSriastradh	args.size = size * PAGE_SIZE;
473003b705cfSriastradh	args.placement = LOCAL_I915_CREATE_PLACEMENT_STOLEN;
473103b705cfSriastradh	args.caching = DISPLAY;
473203b705cfSriastradh	args.tiling_mode = tiling;
473303b705cfSriastradh	args.stride = pitch;
473403b705cfSriastradh
47359a906b70Schristos	if (do_ioctl(kgem->fd, LOCAL_IOCTL_I915_GEM_CREATE2, &args)) {
473603b705cfSriastradh		args.placement = LOCAL_I915_CREATE_PLACEMENT_SYSTEM;
47379a906b70Schristos		if (do_ioctl(kgem->fd, LOCAL_IOCTL_I915_GEM_CREATE2, &args))
473803b705cfSriastradh			return NULL;
473903b705cfSriastradh	}
474003b705cfSriastradh
474103b705cfSriastradh	bo = __kgem_bo_alloc(args.handle, size);
474203b705cfSriastradh	if (bo == NULL) {
474303b705cfSriastradh		gem_close(kgem->fd, args.handle);
474403b705cfSriastradh		return NULL;
474503b705cfSriastradh	}
474603b705cfSriastradh
474703b705cfSriastradh	bo->unique_id = kgem_get_unique_id(kgem);
474803b705cfSriastradh	bo->tiling = tiling;
474903b705cfSriastradh	bo->pitch = pitch;
475003b705cfSriastradh	if (args.placement == LOCAL_I915_CREATE_PLACEMENT_STOLEN) {
475103b705cfSriastradh		bo->purged = true; /* for asserts against CPU access */
475203b705cfSriastradh	}
475303b705cfSriastradh	bo->reusable = false; /* so that unclaimed scanouts are freed */
475403b705cfSriastradh	bo->domain = DOMAIN_NONE;
475503b705cfSriastradh
475603b705cfSriastradh	if (__kgem_busy(kgem, bo->handle)) {
47579a906b70Schristos		assert(bo->exec == NULL);
475803b705cfSriastradh		list_add(&bo->request, &kgem->flushing);
475903b705cfSriastradh		bo->rq = (void *)kgem;
47609a906b70Schristos		kgem->need_retire = true;
476103b705cfSriastradh	}
476203b705cfSriastradh
476303b705cfSriastradh	assert_tiling(kgem, bo);
476403b705cfSriastradh	debug_alloc__bo(kgem, bo);
476503b705cfSriastradh
476603b705cfSriastradh	return bo;
476703b705cfSriastradh}
476803b705cfSriastradh
47699a906b70Schristosstatic void __kgem_bo_make_scanout(struct kgem *kgem,
47709a906b70Schristos				   struct kgem_bo *bo,
47719a906b70Schristos				   int width, int height)
47729a906b70Schristos{
47739a906b70Schristos	ScrnInfoPtr scrn =
47749a906b70Schristos		container_of(kgem, struct sna, kgem)->scrn;
47759a906b70Schristos	struct drm_mode_fb_cmd arg;
47769a906b70Schristos
47779a906b70Schristos	assert(bo->proxy == NULL);
47789a906b70Schristos
47799a906b70Schristos	if (!scrn->vtSema)
47809a906b70Schristos		return;
47819a906b70Schristos
47829a906b70Schristos	DBG(("%s: create fb %dx%d@%d/%d\n",
47839a906b70Schristos	     __FUNCTION__, width, height, scrn->depth, scrn->bitsPerPixel));
47849a906b70Schristos
47859a906b70Schristos	VG_CLEAR(arg);
47869a906b70Schristos	arg.width = width;
47879a906b70Schristos	arg.height = height;
47889a906b70Schristos	arg.pitch = bo->pitch;
47899a906b70Schristos	arg.bpp = scrn->bitsPerPixel;
47909a906b70Schristos	arg.depth = scrn->depth;
47919a906b70Schristos	arg.handle = bo->handle;
47929a906b70Schristos
47939a906b70Schristos	/* First move the scanout out of cached memory */
47949a906b70Schristos	if (kgem->has_llc) {
47959a906b70Schristos		if (!gem_set_caching(kgem->fd, bo->handle, DISPLAY) &&
47969a906b70Schristos		    !gem_set_caching(kgem->fd, bo->handle, UNCACHED))
47979a906b70Schristos			return;
47989a906b70Schristos	}
47999a906b70Schristos
48009a906b70Schristos	bo->scanout = true;
48019a906b70Schristos
48029a906b70Schristos	/* Then pre-emptively move the object into the mappable
48039a906b70Schristos	 * portion to avoid rebinding later when busy.
48049a906b70Schristos	 */
48059a906b70Schristos	if (bo->map__gtt == NULL)
48069a906b70Schristos		bo->map__gtt = __kgem_bo_map__gtt(kgem, bo);
48079a906b70Schristos	if (bo->map__gtt) {
4808813957e3Ssnj		if (sigtrap_get() == 0) {
4809813957e3Ssnj			*(uint32_t *)bo->map__gtt = 0;
4810813957e3Ssnj			sigtrap_put();
4811813957e3Ssnj		}
48129a906b70Schristos		bo->domain = DOMAIN_GTT;
48139a906b70Schristos	}
48149a906b70Schristos
48159a906b70Schristos	if (do_ioctl(kgem->fd, DRM_IOCTL_MODE_ADDFB, &arg) == 0) {
48169a906b70Schristos		DBG(("%s: attached fb=%d to handle=%d\n",
48179a906b70Schristos		     __FUNCTION__, arg.fb_id, arg.handle));
48189a906b70Schristos		bo->delta = arg.fb_id;
48199a906b70Schristos	}
48209a906b70Schristos}
48219a906b70Schristos
482203b705cfSriastradhstruct kgem_bo *kgem_create_2d(struct kgem *kgem,
482303b705cfSriastradh			       int width,
482403b705cfSriastradh			       int height,
482503b705cfSriastradh			       int bpp,
482603b705cfSriastradh			       int tiling,
482703b705cfSriastradh			       uint32_t flags)
482803b705cfSriastradh{
482903b705cfSriastradh	struct list *cache;
483003b705cfSriastradh	struct kgem_bo *bo;
483103b705cfSriastradh	uint32_t pitch, tiled_height, size;
483203b705cfSriastradh	uint32_t handle;
483303b705cfSriastradh	int i, bucket, retry;
483403b705cfSriastradh	bool exact = flags & (CREATE_EXACT | CREATE_SCANOUT);
483503b705cfSriastradh
483603b705cfSriastradh	if (tiling < 0)
483703b705cfSriastradh		exact = true, tiling = -tiling;
483803b705cfSriastradh
483903b705cfSriastradh	DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d, prime?=%d, temp?=%d)\n", __FUNCTION__,
484003b705cfSriastradh	     width, height, bpp, tiling, exact,
484103b705cfSriastradh	     !!(flags & CREATE_INACTIVE),
484203b705cfSriastradh	     !!(flags & CREATE_CPU_MAP),
484303b705cfSriastradh	     !!(flags & CREATE_GTT_MAP),
484403b705cfSriastradh	     !!(flags & CREATE_SCANOUT),
484503b705cfSriastradh	     !!(flags & CREATE_PRIME),
484603b705cfSriastradh	     !!(flags & CREATE_TEMPORARY)));
484703b705cfSriastradh
484803b705cfSriastradh	size = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags,
484903b705cfSriastradh				 width, height, bpp, tiling, &pitch);
48509a906b70Schristos	if (size == 0) {
48519a906b70Schristos		DBG(("%s: invalid surface size (too large?)\n", __FUNCTION__));
48529a906b70Schristos		return NULL;
48539a906b70Schristos	}
48549a906b70Schristos
485503b705cfSriastradh	size /= PAGE_SIZE;
485603b705cfSriastradh	bucket = cache_bucket(size);
485703b705cfSriastradh
485803b705cfSriastradh	if (flags & CREATE_SCANOUT) {
485903b705cfSriastradh		struct kgem_bo *last = NULL;
486003b705cfSriastradh
486103b705cfSriastradh		list_for_each_entry_reverse(bo, &kgem->scanout, list) {
486203b705cfSriastradh			assert(bo->scanout);
486303b705cfSriastradh			assert(!bo->flush);
48649a906b70Schristos			assert(!bo->refcnt);
486503b705cfSriastradh			assert_tiling(kgem, bo);
486603b705cfSriastradh
486703b705cfSriastradh			if (size > num_pages(bo) || num_pages(bo) > 2*size)
486803b705cfSriastradh				continue;
486903b705cfSriastradh
48709a906b70Schristos			if (bo->tiling != tiling || bo->pitch != pitch)
48719a906b70Schristos				/* No tiling/pitch without recreating fb */
487203b705cfSriastradh				continue;
487303b705cfSriastradh
48749a906b70Schristos			if (bo->delta && !check_scanout_size(kgem, bo, width, height))
48759a906b70Schristos				continue;
487603b705cfSriastradh
487703b705cfSriastradh			if (flags & CREATE_INACTIVE && bo->rq) {
487803b705cfSriastradh				last = bo;
487903b705cfSriastradh				continue;
488003b705cfSriastradh			}
488103b705cfSriastradh
488203b705cfSriastradh			list_del(&bo->list);
488303b705cfSriastradh
488403b705cfSriastradh			bo->unique_id = kgem_get_unique_id(kgem);
488503b705cfSriastradh			DBG(("  1:from scanout: pitch=%d, tiling=%d, handle=%d, id=%d\n",
488603b705cfSriastradh			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
488703b705cfSriastradh			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
488803b705cfSriastradh			assert_tiling(kgem, bo);
488903b705cfSriastradh			bo->refcnt = 1;
489003b705cfSriastradh			return bo;
489103b705cfSriastradh		}
489203b705cfSriastradh
489303b705cfSriastradh		if (last) {
489403b705cfSriastradh			list_del(&last->list);
489503b705cfSriastradh
489603b705cfSriastradh			last->unique_id = kgem_get_unique_id(kgem);
489703b705cfSriastradh			DBG(("  1:from scanout: pitch=%d, tiling=%d, handle=%d, id=%d\n",
489803b705cfSriastradh			     last->pitch, last->tiling, last->handle, last->unique_id));
489903b705cfSriastradh			assert(last->pitch*kgem_aligned_height(kgem, height, last->tiling) <= kgem_bo_size(last));
490003b705cfSriastradh			assert_tiling(kgem, last);
490103b705cfSriastradh			last->refcnt = 1;
490203b705cfSriastradh			return last;
490303b705cfSriastradh		}
490403b705cfSriastradh
49059a906b70Schristos		if (container_of(kgem, struct sna, kgem)->scrn->vtSema) {
49069a906b70Schristos			ScrnInfoPtr scrn = container_of(kgem, struct sna, kgem)->scrn;
49079a906b70Schristos
49089a906b70Schristos			list_for_each_entry_reverse(bo, &kgem->scanout, list) {
49099a906b70Schristos				struct drm_mode_fb_cmd arg;
49109a906b70Schristos
49119a906b70Schristos				assert(bo->scanout);
49129a906b70Schristos				assert(!bo->refcnt);
49139a906b70Schristos
49149a906b70Schristos				if (size > num_pages(bo) || num_pages(bo) > 2*size)
49159a906b70Schristos					continue;
49169a906b70Schristos
49179a906b70Schristos				if (flags & CREATE_INACTIVE && bo->rq)
49189a906b70Schristos					continue;
49199a906b70Schristos
49209a906b70Schristos				list_del(&bo->list);
49219a906b70Schristos
49229a906b70Schristos				if (bo->tiling != tiling || bo->pitch != pitch) {
49239a906b70Schristos					if (bo->delta) {
49249a906b70Schristos						kgem_bo_rmfb(kgem, bo);
49259a906b70Schristos						bo->delta = 0;
49269a906b70Schristos					}
49279a906b70Schristos
49289a906b70Schristos					if (gem_set_tiling(kgem->fd, bo->handle,
49299a906b70Schristos							   tiling, pitch)) {
49309a906b70Schristos						bo->tiling = tiling;
49319a906b70Schristos						bo->pitch = pitch;
49329a906b70Schristos					} else {
49339a906b70Schristos						kgem_bo_free(kgem, bo);
49349a906b70Schristos						break;
49359a906b70Schristos					}
49369a906b70Schristos				}
49379a906b70Schristos
49389a906b70Schristos				VG_CLEAR(arg);
49399a906b70Schristos				arg.width = width;
49409a906b70Schristos				arg.height = height;
49419a906b70Schristos				arg.pitch = bo->pitch;
49429a906b70Schristos				arg.bpp = scrn->bitsPerPixel;
49439a906b70Schristos				arg.depth = scrn->depth;
49449a906b70Schristos				arg.handle = bo->handle;
49459a906b70Schristos
49469a906b70Schristos				if (do_ioctl(kgem->fd, DRM_IOCTL_MODE_ADDFB, &arg)) {
49479a906b70Schristos					kgem_bo_free(kgem, bo);
49489a906b70Schristos					break;
49499a906b70Schristos				}
49509a906b70Schristos
49519a906b70Schristos				bo->delta = arg.fb_id;
49529a906b70Schristos				bo->unique_id = kgem_get_unique_id(kgem);
49539a906b70Schristos
49549a906b70Schristos				DBG(("  2:from scanout: pitch=%d, tiling=%d, handle=%d, id=%d\n",
49559a906b70Schristos				     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
49569a906b70Schristos				assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
49579a906b70Schristos				assert_tiling(kgem, bo);
49589a906b70Schristos				bo->refcnt = 1;
49599a906b70Schristos				return bo;
49609a906b70Schristos			}
49619a906b70Schristos		}
49629a906b70Schristos
496303b705cfSriastradh		bo = __kgem_bo_create_as_display(kgem, size, tiling, pitch);
496403b705cfSriastradh		if (bo)
496503b705cfSriastradh			return bo;
49669a906b70Schristos
49679a906b70Schristos		flags |= CREATE_INACTIVE;
496803b705cfSriastradh	}
496903b705cfSriastradh
497003b705cfSriastradh	if (bucket >= NUM_CACHE_BUCKETS) {
497103b705cfSriastradh		DBG(("%s: large bo num pages=%d, bucket=%d\n",
497203b705cfSriastradh		     __FUNCTION__, size, bucket));
497303b705cfSriastradh
497403b705cfSriastradh		if (flags & CREATE_INACTIVE)
497503b705cfSriastradh			goto large_inactive;
497603b705cfSriastradh
497703b705cfSriastradh		tiled_height = kgem_aligned_height(kgem, height, tiling);
497803b705cfSriastradh
497903b705cfSriastradh		list_for_each_entry(bo, &kgem->large, list) {
498003b705cfSriastradh			assert(!bo->purged);
498103b705cfSriastradh			assert(!bo->scanout);
498203b705cfSriastradh			assert(bo->refcnt == 0);
498303b705cfSriastradh			assert(bo->reusable);
498403b705cfSriastradh			assert_tiling(kgem, bo);
498503b705cfSriastradh
498603b705cfSriastradh			if (kgem->gen < 040) {
498703b705cfSriastradh				if (bo->pitch < pitch) {
498803b705cfSriastradh					DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n",
498903b705cfSriastradh					     bo->tiling, tiling,
499003b705cfSriastradh					     bo->pitch, pitch));
499103b705cfSriastradh					continue;
499203b705cfSriastradh				}
499303b705cfSriastradh
499403b705cfSriastradh				if (bo->pitch * tiled_height > bytes(bo))
499503b705cfSriastradh					continue;
499603b705cfSriastradh			} else {
499703b705cfSriastradh				if (num_pages(bo) < size)
499803b705cfSriastradh					continue;
499903b705cfSriastradh
500003b705cfSriastradh				if (bo->pitch != pitch || bo->tiling != tiling) {
500103b705cfSriastradh					if (!gem_set_tiling(kgem->fd, bo->handle,
500203b705cfSriastradh							    tiling, pitch))
500303b705cfSriastradh						continue;
500403b705cfSriastradh
500503b705cfSriastradh					bo->pitch = pitch;
500603b705cfSriastradh					bo->tiling = tiling;
500703b705cfSriastradh				}
500803b705cfSriastradh			}
500903b705cfSriastradh
501003b705cfSriastradh			kgem_bo_remove_from_active(kgem, bo);
501103b705cfSriastradh
501203b705cfSriastradh			bo->unique_id = kgem_get_unique_id(kgem);
501303b705cfSriastradh			bo->delta = 0;
501403b705cfSriastradh			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
501503b705cfSriastradh			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
501603b705cfSriastradh			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
501703b705cfSriastradh			assert_tiling(kgem, bo);
501803b705cfSriastradh			bo->refcnt = 1;
501903b705cfSriastradh			return bo;
502003b705cfSriastradh		}
502103b705cfSriastradh
502203b705cfSriastradhlarge_inactive:
502303b705cfSriastradh		__kgem_throttle_retire(kgem, flags);
502403b705cfSriastradh		list_for_each_entry(bo, &kgem->large_inactive, list) {
502503b705cfSriastradh			assert(bo->refcnt == 0);
502603b705cfSriastradh			assert(bo->reusable);
502703b705cfSriastradh			assert(!bo->scanout);
502803b705cfSriastradh			assert_tiling(kgem, bo);
502903b705cfSriastradh
503003b705cfSriastradh			if (size > num_pages(bo))
503103b705cfSriastradh				continue;
503203b705cfSriastradh
503303b705cfSriastradh			if (bo->tiling != tiling ||
503403b705cfSriastradh			    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
503503b705cfSriastradh				if (!gem_set_tiling(kgem->fd, bo->handle,
503603b705cfSriastradh						    tiling, pitch))
503703b705cfSriastradh					continue;
503803b705cfSriastradh
503903b705cfSriastradh				bo->tiling = tiling;
504003b705cfSriastradh				bo->pitch = pitch;
504103b705cfSriastradh			}
504203b705cfSriastradh
504303b705cfSriastradh			if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
504403b705cfSriastradh				kgem_bo_free(kgem, bo);
504503b705cfSriastradh				break;
504603b705cfSriastradh			}
504703b705cfSriastradh
504803b705cfSriastradh			list_del(&bo->list);
504903b705cfSriastradh
505003b705cfSriastradh			assert(bo->domain != DOMAIN_GPU);
505103b705cfSriastradh			bo->unique_id = kgem_get_unique_id(kgem);
505203b705cfSriastradh			bo->pitch = pitch;
505303b705cfSriastradh			bo->delta = 0;
505403b705cfSriastradh			DBG(("  1:from large inactive: pitch=%d, tiling=%d, handle=%d, id=%d\n",
505503b705cfSriastradh			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
505603b705cfSriastradh			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
505703b705cfSriastradh			assert_tiling(kgem, bo);
505803b705cfSriastradh			bo->refcnt = 1;
50599a906b70Schristos
50609a906b70Schristos			if (flags & CREATE_SCANOUT)
50619a906b70Schristos				__kgem_bo_make_scanout(kgem, bo, width, height);
50629a906b70Schristos
506303b705cfSriastradh			return bo;
506403b705cfSriastradh		}
506503b705cfSriastradh
506603b705cfSriastradh		goto create;
506703b705cfSriastradh	}
506803b705cfSriastradh
506903b705cfSriastradh	if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
507003b705cfSriastradh		int for_cpu = !!(flags & CREATE_CPU_MAP);
507103b705cfSriastradh		if (kgem->has_llc && tiling == I915_TILING_NONE)
507203b705cfSriastradh			for_cpu = 1;
507303b705cfSriastradh		/* We presume that we will need to upload to this bo,
507403b705cfSriastradh		 * and so would prefer to have an active VMA.
507503b705cfSriastradh		 */
507603b705cfSriastradh		cache = &kgem->vma[for_cpu].inactive[bucket];
507703b705cfSriastradh		do {
507803b705cfSriastradh			list_for_each_entry(bo, cache, vma) {
507903b705cfSriastradh				assert(bucket(bo) == bucket);
508003b705cfSriastradh				assert(bo->refcnt == 0);
508103b705cfSriastradh				assert(!bo->scanout);
5082813957e3Ssnj				assert(for_cpu ? !!bo->map__cpu : (bo->map__gtt || bo->map__wc));
508303b705cfSriastradh				assert(bo->rq == NULL);
50849a906b70Schristos				assert(bo->exec == NULL);
508503b705cfSriastradh				assert(list_is_empty(&bo->request));
508603b705cfSriastradh				assert(bo->flush == false);
508703b705cfSriastradh				assert_tiling(kgem, bo);
508803b705cfSriastradh
508903b705cfSriastradh				if (size > num_pages(bo)) {
509003b705cfSriastradh					DBG(("inactive too small: %d < %d\n",
509103b705cfSriastradh					     num_pages(bo), size));
509203b705cfSriastradh					continue;
509303b705cfSriastradh				}
509403b705cfSriastradh
50959a906b70Schristos				if (flags & UNCACHED && !kgem->has_llc && bo->domain != DOMAIN_CPU)
50969a906b70Schristos					continue;
50979a906b70Schristos
509803b705cfSriastradh				if (bo->tiling != tiling ||
509903b705cfSriastradh				    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
51009a906b70Schristos					if (bo->map__gtt ||
51019a906b70Schristos					    !gem_set_tiling(kgem->fd, bo->handle,
51029a906b70Schristos							    tiling, pitch)) {
51039a906b70Schristos						DBG(("inactive GTT vma with wrong tiling: %d < %d\n",
51049a906b70Schristos						     bo->tiling, tiling));
51059a906b70Schristos						continue;
51069a906b70Schristos					}
51079a906b70Schristos					bo->tiling = tiling;
51089a906b70Schristos					bo->pitch = pitch;
510903b705cfSriastradh				}
511003b705cfSriastradh
511103b705cfSriastradh				if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
511203b705cfSriastradh					kgem_bo_free(kgem, bo);
511303b705cfSriastradh					break;
511403b705cfSriastradh				}
511503b705cfSriastradh
511603b705cfSriastradh				assert(bo->tiling == tiling);
511703b705cfSriastradh				bo->pitch = pitch;
511803b705cfSriastradh				bo->delta = 0;
511903b705cfSriastradh				bo->unique_id = kgem_get_unique_id(kgem);
512003b705cfSriastradh
512103b705cfSriastradh				kgem_bo_remove_from_inactive(kgem, bo);
51229a906b70Schristos				assert(list_is_empty(&bo->list));
51239a906b70Schristos				assert(list_is_empty(&bo->vma));
512403b705cfSriastradh
512503b705cfSriastradh				DBG(("  from inactive vma: pitch=%d, tiling=%d: handle=%d, id=%d\n",
512603b705cfSriastradh				     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
512703b705cfSriastradh				assert(bo->reusable);
512803b705cfSriastradh				assert(bo->domain != DOMAIN_GPU);
512903b705cfSriastradh				ASSERT_IDLE(kgem, bo->handle);
513003b705cfSriastradh				assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
513103b705cfSriastradh				assert_tiling(kgem, bo);
513203b705cfSriastradh				bo->refcnt = 1;
513303b705cfSriastradh				return bo;
513403b705cfSriastradh			}
513503b705cfSriastradh		} while (!list_is_empty(cache) &&
513603b705cfSriastradh			 __kgem_throttle_retire(kgem, flags));
513703b705cfSriastradh
513803b705cfSriastradh		if (flags & CREATE_CPU_MAP && !kgem->has_llc) {
513903b705cfSriastradh			if (list_is_empty(&kgem->active[bucket][tiling]) &&
514003b705cfSriastradh			    list_is_empty(&kgem->inactive[bucket]))
514103b705cfSriastradh				flags &= ~CREATE_CACHED;
514203b705cfSriastradh
514303b705cfSriastradh			goto create;
514403b705cfSriastradh		}
514503b705cfSriastradh	}
514603b705cfSriastradh
514703b705cfSriastradh	if (flags & CREATE_INACTIVE)
514803b705cfSriastradh		goto skip_active_search;
514903b705cfSriastradh
515003b705cfSriastradh	/* Best active match */
515103b705cfSriastradh	retry = NUM_CACHE_BUCKETS - bucket;
515203b705cfSriastradh	if (retry > 3 && (flags & CREATE_TEMPORARY) == 0)
515303b705cfSriastradh		retry = 3;
51549a906b70Schristossearch_active:
515503b705cfSriastradh	assert(bucket < NUM_CACHE_BUCKETS);
515603b705cfSriastradh	cache = &kgem->active[bucket][tiling];
515703b705cfSriastradh	if (tiling) {
515803b705cfSriastradh		tiled_height = kgem_aligned_height(kgem, height, tiling);
515903b705cfSriastradh		list_for_each_entry(bo, cache, list) {
516003b705cfSriastradh			assert(!bo->purged);
516103b705cfSriastradh			assert(bo->refcnt == 0);
516203b705cfSriastradh			assert(bucket(bo) == bucket);
516303b705cfSriastradh			assert(bo->reusable);
516403b705cfSriastradh			assert(bo->tiling == tiling);
516503b705cfSriastradh			assert(bo->flush == false);
516603b705cfSriastradh			assert(!bo->scanout);
516703b705cfSriastradh			assert_tiling(kgem, bo);
516803b705cfSriastradh
516903b705cfSriastradh			if (kgem->gen < 040) {
517003b705cfSriastradh				if (bo->pitch < pitch) {
517103b705cfSriastradh					DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n",
517203b705cfSriastradh					     bo->tiling, tiling,
517303b705cfSriastradh					     bo->pitch, pitch));
517403b705cfSriastradh					continue;
517503b705cfSriastradh				}
517603b705cfSriastradh
517703b705cfSriastradh				if (bo->pitch * tiled_height > bytes(bo))
517803b705cfSriastradh					continue;
517903b705cfSriastradh			} else {
518003b705cfSriastradh				if (num_pages(bo) < size)
518103b705cfSriastradh					continue;
518203b705cfSriastradh
518303b705cfSriastradh				if (bo->pitch != pitch) {
518403b705cfSriastradh					if (!gem_set_tiling(kgem->fd,
518503b705cfSriastradh							    bo->handle,
518603b705cfSriastradh							    tiling, pitch))
518703b705cfSriastradh						continue;
518803b705cfSriastradh
518903b705cfSriastradh					bo->pitch = pitch;
519003b705cfSriastradh				}
519103b705cfSriastradh			}
519203b705cfSriastradh
519303b705cfSriastradh			kgem_bo_remove_from_active(kgem, bo);
519403b705cfSriastradh
519503b705cfSriastradh			bo->unique_id = kgem_get_unique_id(kgem);
519603b705cfSriastradh			bo->delta = 0;
519703b705cfSriastradh			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
519803b705cfSriastradh			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
519903b705cfSriastradh			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
520003b705cfSriastradh			assert_tiling(kgem, bo);
520103b705cfSriastradh			bo->refcnt = 1;
520203b705cfSriastradh			return bo;
520303b705cfSriastradh		}
520403b705cfSriastradh	} else {
520503b705cfSriastradh		list_for_each_entry(bo, cache, list) {
520603b705cfSriastradh			assert(bucket(bo) == bucket);
520703b705cfSriastradh			assert(!bo->purged);
520803b705cfSriastradh			assert(bo->refcnt == 0);
520903b705cfSriastradh			assert(bo->reusable);
521003b705cfSriastradh			assert(!bo->scanout);
521103b705cfSriastradh			assert(bo->tiling == tiling);
521203b705cfSriastradh			assert(bo->flush == false);
521303b705cfSriastradh			assert_tiling(kgem, bo);
521403b705cfSriastradh
521503b705cfSriastradh			if (num_pages(bo) < size)
521603b705cfSriastradh				continue;
521703b705cfSriastradh
521803b705cfSriastradh			kgem_bo_remove_from_active(kgem, bo);
521903b705cfSriastradh
522003b705cfSriastradh			bo->pitch = pitch;
522103b705cfSriastradh			bo->unique_id = kgem_get_unique_id(kgem);
522203b705cfSriastradh			bo->delta = 0;
522303b705cfSriastradh			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
522403b705cfSriastradh			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
522503b705cfSriastradh			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
522603b705cfSriastradh			assert_tiling(kgem, bo);
522703b705cfSriastradh			bo->refcnt = 1;
522803b705cfSriastradh			return bo;
522903b705cfSriastradh		}
523003b705cfSriastradh	}
523103b705cfSriastradh
52329a906b70Schristos	if (kgem->gen >= 040) {
52339a906b70Schristos		for (i = I915_TILING_Y; i >= I915_TILING_NONE; i--) {
52349a906b70Schristos			cache = &kgem->active[bucket][i];
52359a906b70Schristos			list_for_each_entry(bo, cache, list) {
52369a906b70Schristos				assert(!bo->purged);
52379a906b70Schristos				assert(bo->refcnt == 0);
52389a906b70Schristos				assert(bo->reusable);
52399a906b70Schristos				assert(!bo->scanout);
52409a906b70Schristos				assert(bo->flush == false);
52419a906b70Schristos				assert_tiling(kgem, bo);
524203b705cfSriastradh
52439a906b70Schristos				if (num_pages(bo) < size)
52449a906b70Schristos					continue;
524503b705cfSriastradh
52469a906b70Schristos				if (bo->tiling != tiling ||
52479a906b70Schristos				    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
524803b705cfSriastradh					if (!gem_set_tiling(kgem->fd,
524903b705cfSriastradh							    bo->handle,
525003b705cfSriastradh							    tiling, pitch))
525103b705cfSriastradh						continue;
52529a906b70Schristos				}
525303b705cfSriastradh
52549a906b70Schristos				kgem_bo_remove_from_active(kgem, bo);
525503b705cfSriastradh
52569a906b70Schristos				bo->unique_id = kgem_get_unique_id(kgem);
52579a906b70Schristos				bo->pitch = pitch;
52589a906b70Schristos				bo->tiling = tiling;
52599a906b70Schristos				bo->delta = 0;
52609a906b70Schristos				DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
52619a906b70Schristos				     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
52629a906b70Schristos				assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
52639a906b70Schristos				assert_tiling(kgem, bo);
52649a906b70Schristos				bo->refcnt = 1;
52659a906b70Schristos				return bo;
526603b705cfSriastradh			}
526703b705cfSriastradh		}
52689a906b70Schristos	} else if (!exact) { /* allow an active near-miss? */
52699a906b70Schristos		for (i = tiling; i >= I915_TILING_NONE; i--) {
527003b705cfSriastradh			tiled_height = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags,
527103b705cfSriastradh							 width, height, bpp, tiling, &pitch);
527203b705cfSriastradh			cache = active(kgem, tiled_height / PAGE_SIZE, i);
527303b705cfSriastradh			tiled_height = kgem_aligned_height(kgem, height, i);
527403b705cfSriastradh			list_for_each_entry(bo, cache, list) {
527503b705cfSriastradh				assert(!bo->purged);
527603b705cfSriastradh				assert(bo->refcnt == 0);
527703b705cfSriastradh				assert(bo->reusable);
527803b705cfSriastradh				assert(!bo->scanout);
527903b705cfSriastradh				assert(bo->flush == false);
528003b705cfSriastradh				assert_tiling(kgem, bo);
528103b705cfSriastradh
528203b705cfSriastradh				if (bo->tiling) {
528303b705cfSriastradh					if (bo->pitch < pitch) {
528403b705cfSriastradh						DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n",
528503b705cfSriastradh						     bo->tiling, tiling,
528603b705cfSriastradh						     bo->pitch, pitch));
528703b705cfSriastradh						continue;
528803b705cfSriastradh					}
528903b705cfSriastradh				} else
529003b705cfSriastradh					bo->pitch = pitch;
529103b705cfSriastradh
529203b705cfSriastradh				if (bo->pitch * tiled_height > bytes(bo))
529303b705cfSriastradh					continue;
529403b705cfSriastradh
529503b705cfSriastradh				kgem_bo_remove_from_active(kgem, bo);
529603b705cfSriastradh
529703b705cfSriastradh				bo->unique_id = kgem_get_unique_id(kgem);
529803b705cfSriastradh				bo->delta = 0;
529903b705cfSriastradh				DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
530003b705cfSriastradh				     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
530103b705cfSriastradh				assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
530203b705cfSriastradh				assert_tiling(kgem, bo);
530303b705cfSriastradh				bo->refcnt = 1;
530403b705cfSriastradh				return bo;
530503b705cfSriastradh			}
530603b705cfSriastradh		}
530703b705cfSriastradh	}
530803b705cfSriastradh
53099a906b70Schristos	if (--retry) {
53109a906b70Schristos		bucket++;
53119a906b70Schristos		goto search_active;
53129a906b70Schristos	}
53139a906b70Schristos
531403b705cfSriastradhskip_active_search:
531503b705cfSriastradh	bucket = cache_bucket(size);
531603b705cfSriastradh	retry = NUM_CACHE_BUCKETS - bucket;
531703b705cfSriastradh	if (retry > 3)
531803b705cfSriastradh		retry = 3;
531903b705cfSriastradhsearch_inactive:
532003b705cfSriastradh	/* Now just look for a close match and prefer any currently active */
532103b705cfSriastradh	assert(bucket < NUM_CACHE_BUCKETS);
532203b705cfSriastradh	cache = &kgem->inactive[bucket];
532303b705cfSriastradh	list_for_each_entry(bo, cache, list) {
532403b705cfSriastradh		assert(bucket(bo) == bucket);
532503b705cfSriastradh		assert(bo->reusable);
532603b705cfSriastradh		assert(!bo->scanout);
532703b705cfSriastradh		assert(bo->flush == false);
532803b705cfSriastradh		assert_tiling(kgem, bo);
532903b705cfSriastradh
533003b705cfSriastradh		if (size > num_pages(bo)) {
533103b705cfSriastradh			DBG(("inactive too small: %d < %d\n",
533203b705cfSriastradh			     num_pages(bo), size));
533303b705cfSriastradh			continue;
533403b705cfSriastradh		}
533503b705cfSriastradh
533603b705cfSriastradh		if (bo->tiling != tiling ||
533703b705cfSriastradh		    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
533803b705cfSriastradh			if (!gem_set_tiling(kgem->fd, bo->handle,
533903b705cfSriastradh					    tiling, pitch))
534003b705cfSriastradh				continue;
534103b705cfSriastradh		}
534203b705cfSriastradh
534303b705cfSriastradh		if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
534403b705cfSriastradh			kgem_bo_free(kgem, bo);
534503b705cfSriastradh			break;
534603b705cfSriastradh		}
534703b705cfSriastradh
534803b705cfSriastradh		kgem_bo_remove_from_inactive(kgem, bo);
53499a906b70Schristos		assert(list_is_empty(&bo->list));
53509a906b70Schristos		assert(list_is_empty(&bo->vma));
535103b705cfSriastradh
535203b705cfSriastradh		bo->pitch = pitch;
535303b705cfSriastradh		bo->tiling = tiling;
535403b705cfSriastradh
535503b705cfSriastradh		bo->delta = 0;
535603b705cfSriastradh		bo->unique_id = kgem_get_unique_id(kgem);
535703b705cfSriastradh		assert(bo->pitch);
535803b705cfSriastradh		DBG(("  from inactive: pitch=%d, tiling=%d: handle=%d, id=%d\n",
535903b705cfSriastradh		     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
536003b705cfSriastradh		assert(bo->refcnt == 0);
536103b705cfSriastradh		assert(bo->reusable);
536203b705cfSriastradh		assert((flags & CREATE_INACTIVE) == 0 || bo->domain != DOMAIN_GPU);
536303b705cfSriastradh		ASSERT_MAYBE_IDLE(kgem, bo->handle, flags & CREATE_INACTIVE);
536403b705cfSriastradh		assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
536503b705cfSriastradh		assert_tiling(kgem, bo);
536603b705cfSriastradh		bo->refcnt = 1;
53679a906b70Schristos
53689a906b70Schristos		if (flags & CREATE_SCANOUT)
53699a906b70Schristos			__kgem_bo_make_scanout(kgem, bo, width, height);
53709a906b70Schristos
537103b705cfSriastradh		return bo;
537203b705cfSriastradh	}
537303b705cfSriastradh
53749a906b70Schristos	if ((flags & CREATE_NO_RETIRE) == 0) {
53759a906b70Schristos		list_for_each_entry_reverse(bo, &kgem->active[bucket][tiling], list) {
53769a906b70Schristos			if (bo->exec)
53779a906b70Schristos				break;
53789a906b70Schristos
53799a906b70Schristos			if (size > num_pages(bo))
53809a906b70Schristos				continue;
53819a906b70Schristos
53829a906b70Schristos			if (__kgem_busy(kgem, bo->handle)) {
53839a906b70Schristos				if (flags & CREATE_NO_THROTTLE)
53849a906b70Schristos					goto no_retire;
53859a906b70Schristos
53869a906b70Schristos				do {
53879a906b70Schristos					if (!kgem->need_throttle) {
53889a906b70Schristos						DBG(("%s: not throttling for active handle=%d\n", __FUNCTION__, bo->handle));
53899a906b70Schristos						goto no_retire;
53909a906b70Schristos					}
53919a906b70Schristos
53929a906b70Schristos					__kgem_throttle(kgem, false);
53939a906b70Schristos				} while (__kgem_busy(kgem, bo->handle));
53949a906b70Schristos			}
53959a906b70Schristos
53969a906b70Schristos			DBG(("%s: flushed active handle=%d\n", __FUNCTION__, bo->handle));
53979a906b70Schristos
53989a906b70Schristos			kgem_bo_remove_from_active(kgem, bo);
53999a906b70Schristos			__kgem_bo_clear_busy(bo);
54009a906b70Schristos
54019a906b70Schristos			if (tiling != I915_TILING_NONE && bo->pitch != pitch) {
54029a906b70Schristos				if (!gem_set_tiling(kgem->fd, bo->handle, tiling, pitch)) {
54039a906b70Schristos					kgem_bo_free(kgem, bo);
54049a906b70Schristos					goto no_retire;
54059a906b70Schristos				}
54069a906b70Schristos			}
54079a906b70Schristos
54089a906b70Schristos			bo->pitch = pitch;
54099a906b70Schristos			bo->unique_id = kgem_get_unique_id(kgem);
54109a906b70Schristos			bo->delta = 0;
54119a906b70Schristos			DBG(("  2:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
54129a906b70Schristos			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
54139a906b70Schristos			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
54149a906b70Schristos			assert_tiling(kgem, bo);
54159a906b70Schristos			bo->refcnt = 1;
54169a906b70Schristos
54179a906b70Schristos			if (flags & CREATE_SCANOUT)
54189a906b70Schristos				__kgem_bo_make_scanout(kgem, bo, width, height);
54199a906b70Schristos
54209a906b70Schristos			return bo;
54219a906b70Schristos		}
54229a906b70Schristosno_retire:
54239a906b70Schristos		flags |= CREATE_NO_RETIRE;
542403b705cfSriastradh	}
542503b705cfSriastradh
542603b705cfSriastradh	if (--retry) {
542703b705cfSriastradh		bucket++;
542803b705cfSriastradh		goto search_inactive;
542903b705cfSriastradh	}
543003b705cfSriastradh
543103b705cfSriastradhcreate:
54329a906b70Schristos	if (flags & CREATE_CACHED) {
54339a906b70Schristos		DBG(("%s: no cached bo found, requested not to create a new bo\n", __FUNCTION__));
543403b705cfSriastradh		return NULL;
54359a906b70Schristos	}
543603b705cfSriastradh
543703b705cfSriastradh	if (bucket >= NUM_CACHE_BUCKETS)
543803b705cfSriastradh		size = ALIGN(size, 1024);
543903b705cfSriastradh	handle = gem_create(kgem->fd, size);
54409a906b70Schristos	if (handle == 0) {
54419a906b70Schristos		DBG(("%s: kernel allocation (gem_create) failure\n", __FUNCTION__));
544203b705cfSriastradh		return NULL;
54439a906b70Schristos	}
544403b705cfSriastradh
544503b705cfSriastradh	bo = __kgem_bo_alloc(handle, size);
544603b705cfSriastradh	if (!bo) {
54479a906b70Schristos		DBG(("%s: malloc failed\n", __FUNCTION__));
544803b705cfSriastradh		gem_close(kgem->fd, handle);
544903b705cfSriastradh		return NULL;
545003b705cfSriastradh	}
545103b705cfSriastradh
545203b705cfSriastradh	bo->unique_id = kgem_get_unique_id(kgem);
545303b705cfSriastradh	if (tiling == I915_TILING_NONE ||
545403b705cfSriastradh	    gem_set_tiling(kgem->fd, handle, tiling, pitch)) {
545503b705cfSriastradh		bo->tiling = tiling;
545603b705cfSriastradh		bo->pitch = pitch;
54579a906b70Schristos		if (flags & CREATE_SCANOUT)
54589a906b70Schristos			__kgem_bo_make_scanout(kgem, bo, width, height);
545903b705cfSriastradh	} else {
546003b705cfSriastradh		if (flags & CREATE_EXACT) {
54619a906b70Schristos			DBG(("%s: failed to set exact tiling (gem_set_tiling)\n", __FUNCTION__));
54629a906b70Schristos			gem_close(kgem->fd, handle);
54639a906b70Schristos			free(bo);
54649a906b70Schristos			return NULL;
546503b705cfSriastradh		}
546603b705cfSriastradh	}
546703b705cfSriastradh
546803b705cfSriastradh	assert(bytes(bo) >= bo->pitch * kgem_aligned_height(kgem, height, bo->tiling));
546903b705cfSriastradh	assert_tiling(kgem, bo);
547003b705cfSriastradh
547103b705cfSriastradh	debug_alloc__bo(kgem, bo);
547203b705cfSriastradh
547303b705cfSriastradh	DBG(("  new pitch=%d, tiling=%d, handle=%d, id=%d, num_pages=%d [%d], bucket=%d\n",
547403b705cfSriastradh	     bo->pitch, bo->tiling, bo->handle, bo->unique_id,
547503b705cfSriastradh	     size, num_pages(bo), bucket(bo)));
547603b705cfSriastradh	return bo;
547703b705cfSriastradh}
547803b705cfSriastradh
547903b705cfSriastradhstruct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
548003b705cfSriastradh				   int width,
548103b705cfSriastradh				   int height,
548203b705cfSriastradh				   int bpp,
548303b705cfSriastradh				   uint32_t flags)
548403b705cfSriastradh{
548503b705cfSriastradh	struct kgem_bo *bo;
548603b705cfSriastradh	int stride, size;
548703b705cfSriastradh
548803b705cfSriastradh	if (DBG_NO_CPU)
548903b705cfSriastradh		return NULL;
549003b705cfSriastradh
549103b705cfSriastradh	DBG(("%s(%dx%d, bpp=%d)\n", __FUNCTION__, width, height, bpp));
549203b705cfSriastradh
549303b705cfSriastradh	if (kgem->has_llc) {
549403b705cfSriastradh		bo = kgem_create_2d(kgem, width, height, bpp,
549503b705cfSriastradh				    I915_TILING_NONE, flags);
549603b705cfSriastradh		if (bo == NULL)
549703b705cfSriastradh			return bo;
549803b705cfSriastradh
549903b705cfSriastradh		assert(bo->tiling == I915_TILING_NONE);
550003b705cfSriastradh		assert_tiling(kgem, bo);
550103b705cfSriastradh
550203b705cfSriastradh		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
550303b705cfSriastradh			kgem_bo_destroy(kgem, bo);
550403b705cfSriastradh			return NULL;
550503b705cfSriastradh		}
550603b705cfSriastradh
550703b705cfSriastradh		return bo;
550803b705cfSriastradh	}
550903b705cfSriastradh
551003b705cfSriastradh	assert(width > 0 && height > 0);
551103b705cfSriastradh	stride = ALIGN(width, 2) * bpp >> 3;
551203b705cfSriastradh	stride = ALIGN(stride, 4);
551303b705cfSriastradh	size = stride * ALIGN(height, 2);
551403b705cfSriastradh	assert(size >= PAGE_SIZE);
551503b705cfSriastradh
551603b705cfSriastradh	DBG(("%s: %dx%d, %d bpp, stride=%d\n",
551703b705cfSriastradh	     __FUNCTION__, width, height, bpp, stride));
551803b705cfSriastradh
551903b705cfSriastradh	bo = search_snoop_cache(kgem, NUM_PAGES(size), 0);
552003b705cfSriastradh	if (bo) {
552103b705cfSriastradh		assert(bo->tiling == I915_TILING_NONE);
552203b705cfSriastradh		assert_tiling(kgem, bo);
552303b705cfSriastradh		assert(bo->snoop);
552403b705cfSriastradh		bo->refcnt = 1;
552503b705cfSriastradh		bo->pitch = stride;
552603b705cfSriastradh		bo->unique_id = kgem_get_unique_id(kgem);
552703b705cfSriastradh		return bo;
552803b705cfSriastradh	}
552903b705cfSriastradh
553003b705cfSriastradh	if (kgem->has_caching) {
553103b705cfSriastradh		bo = kgem_create_linear(kgem, size, flags);
553203b705cfSriastradh		if (bo == NULL)
553303b705cfSriastradh			return NULL;
553403b705cfSriastradh
553503b705cfSriastradh		assert(bo->tiling == I915_TILING_NONE);
553603b705cfSriastradh		assert_tiling(kgem, bo);
553703b705cfSriastradh
5538813957e3Ssnj		assert(!__kgem_busy(kgem, bo->handle));
553903b705cfSriastradh		if (!gem_set_caching(kgem->fd, bo->handle, SNOOPED)) {
554003b705cfSriastradh			kgem_bo_destroy(kgem, bo);
554103b705cfSriastradh			return NULL;
554203b705cfSriastradh		}
554303b705cfSriastradh		bo->snoop = true;
554403b705cfSriastradh
554503b705cfSriastradh		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
554603b705cfSriastradh			kgem_bo_destroy(kgem, bo);
554703b705cfSriastradh			return NULL;
554803b705cfSriastradh		}
554903b705cfSriastradh
555003b705cfSriastradh		bo->pitch = stride;
555103b705cfSriastradh		bo->unique_id = kgem_get_unique_id(kgem);
555203b705cfSriastradh		return bo;
555303b705cfSriastradh	}
555403b705cfSriastradh
555503b705cfSriastradh	if (kgem->has_userptr) {
555603b705cfSriastradh		void *ptr;
555703b705cfSriastradh
555803b705cfSriastradh		/* XXX */
555903b705cfSriastradh		//if (posix_memalign(&ptr, 64, ALIGN(size, 64)))
556003b705cfSriastradh		if (posix_memalign(&ptr, PAGE_SIZE, ALIGN(size, PAGE_SIZE)))
556103b705cfSriastradh			return NULL;
556203b705cfSriastradh
556303b705cfSriastradh		bo = kgem_create_map(kgem, ptr, size, false);
556403b705cfSriastradh		if (bo == NULL) {
556503b705cfSriastradh			free(ptr);
556603b705cfSriastradh			return NULL;
556703b705cfSriastradh		}
556803b705cfSriastradh
556903b705cfSriastradh		bo->pitch = stride;
557003b705cfSriastradh		bo->unique_id = kgem_get_unique_id(kgem);
557103b705cfSriastradh		return bo;
557203b705cfSriastradh	}
557303b705cfSriastradh
557403b705cfSriastradh	return NULL;
557503b705cfSriastradh}
557603b705cfSriastradh
557703b705cfSriastradhvoid _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
557803b705cfSriastradh{
557903b705cfSriastradh	DBG(("%s: handle=%d, proxy? %d\n",
558003b705cfSriastradh	     __FUNCTION__, bo->handle, bo->proxy != NULL));
558103b705cfSriastradh
558203b705cfSriastradh	if (bo->proxy) {
55839a906b70Schristos		assert(!bo->reusable);
55849a906b70Schristos		kgem_bo_binding_free(kgem, bo);
55859a906b70Schristos
55869a906b70Schristos		assert(list_is_empty(&bo->list));
558703b705cfSriastradh		_list_del(&bo->vma);
558803b705cfSriastradh		_list_del(&bo->request);
55899a906b70Schristos
55909a906b70Schristos		if (bo->io && bo->domain == DOMAIN_CPU)
559103b705cfSriastradh			_kgem_bo_delete_buffer(kgem, bo);
55929a906b70Schristos
559303b705cfSriastradh		kgem_bo_unref(kgem, bo->proxy);
559403b705cfSriastradh
55959a906b70Schristos		if (DBG_NO_MALLOC_CACHE) {
55969a906b70Schristos			free(bo);
55979a906b70Schristos		} else {
55989a906b70Schristos			*(struct kgem_bo **)bo = __kgem_freed_bo;
55999a906b70Schristos			__kgem_freed_bo = bo;
56009a906b70Schristos		}
56019a906b70Schristos	} else
56029a906b70Schristos		__kgem_bo_destroy(kgem, bo);
560303b705cfSriastradh}
560403b705cfSriastradh
560503b705cfSriastradhstatic void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo)
560603b705cfSriastradh{
560703b705cfSriastradh	assert(bo->rq);
560803b705cfSriastradh	assert(bo->exec == NULL);
560903b705cfSriastradh	assert(bo->needs_flush);
561003b705cfSriastradh
561103b705cfSriastradh	/* The kernel will emit a flush *and* update its own flushing lists. */
561203b705cfSriastradh	if (!__kgem_busy(kgem, bo->handle))
561303b705cfSriastradh		__kgem_bo_clear_busy(bo);
561403b705cfSriastradh
561503b705cfSriastradh	DBG(("%s: handle=%d, busy?=%d\n",
561603b705cfSriastradh	     __FUNCTION__, bo->handle, bo->rq != NULL));
561703b705cfSriastradh}
561803b705cfSriastradh
561903b705cfSriastradhvoid kgem_scanout_flush(struct kgem *kgem, struct kgem_bo *bo)
562003b705cfSriastradh{
562103b705cfSriastradh	if (!bo->needs_flush)
562203b705cfSriastradh		return;
562303b705cfSriastradh
56249a906b70Schristos	kgem_bo_submit(kgem, bo);
56259a906b70Schristos
562603b705cfSriastradh	/* If the kernel fails to emit the flush, then it will be forced when
562703b705cfSriastradh	 * we assume direct access. And as the usual failure is EIO, we do
562803b705cfSriastradh	 * not actually care.
562903b705cfSriastradh	 */
563003b705cfSriastradh	assert(bo->exec == NULL);
563103b705cfSriastradh	if (bo->rq)
563203b705cfSriastradh		__kgem_flush(kgem, bo);
563303b705cfSriastradh
563403b705cfSriastradh	/* Whatever actually happens, we can regard the GTT write domain
563503b705cfSriastradh	 * as being flushed.
563603b705cfSriastradh	 */
563703b705cfSriastradh	bo->gtt_dirty = false;
563803b705cfSriastradh	bo->needs_flush = false;
563903b705cfSriastradh	bo->domain = DOMAIN_NONE;
564003b705cfSriastradh}
564103b705cfSriastradh
56429a906b70Schristosinline static bool nearly_idle(struct kgem *kgem)
56439a906b70Schristos{
56449a906b70Schristos	int ring = kgem->ring == KGEM_BLT;
56459a906b70Schristos
56469a906b70Schristos	if (list_is_singular(&kgem->requests[ring]))
56479a906b70Schristos		return true;
56489a906b70Schristos
56499a906b70Schristos	return __kgem_ring_is_idle(kgem, ring);
56509a906b70Schristos}
56519a906b70Schristos
565203b705cfSriastradhinline static bool needs_semaphore(struct kgem *kgem, struct kgem_bo *bo)
565303b705cfSriastradh{
56549a906b70Schristos	if (kgem->needs_semaphore)
56559a906b70Schristos		return false;
56569a906b70Schristos
56579a906b70Schristos	if (bo->rq == NULL || RQ_RING(bo->rq) == kgem->ring)
56589a906b70Schristos		return false;
56599a906b70Schristos
56609a906b70Schristos	kgem->needs_semaphore = true;
56619a906b70Schristos	return true;
56629a906b70Schristos}
56639a906b70Schristos
56649a906b70Schristosinline static bool needs_reservation(struct kgem *kgem, struct kgem_bo *bo)
56659a906b70Schristos{
56669a906b70Schristos	if (kgem->needs_reservation)
56679a906b70Schristos		return false;
56689a906b70Schristos
56699a906b70Schristos	if (bo->presumed_offset)
56709a906b70Schristos		return false;
56719a906b70Schristos
56729a906b70Schristos	kgem->needs_reservation = true;
56739a906b70Schristos	return nearly_idle(kgem);
56749a906b70Schristos}
56759a906b70Schristos
56769a906b70Schristosinline static bool needs_batch_flush(struct kgem *kgem, struct kgem_bo *bo)
56779a906b70Schristos{
56789a906b70Schristos	bool flush = false;
56799a906b70Schristos
56809a906b70Schristos	if (needs_semaphore(kgem, bo)) {
56819a906b70Schristos		DBG(("%s: flushing before handle=%d for required semaphore\n", __FUNCTION__, bo->handle));
56829a906b70Schristos		flush = true;
56839a906b70Schristos	}
56849a906b70Schristos
56859a906b70Schristos	if (needs_reservation(kgem, bo)) {
56869a906b70Schristos		DBG(("%s: flushing before handle=%d for new reservation\n", __FUNCTION__, bo->handle));
56879a906b70Schristos		flush = true;
56889a906b70Schristos	}
56899a906b70Schristos
56909a906b70Schristos	return kgem->nreloc ? flush : false;
56919a906b70Schristos}
56929a906b70Schristos
56939a906b70Schristosstatic bool aperture_check(struct kgem *kgem, unsigned num_pages)
56949a906b70Schristos{
56959a906b70Schristos	struct drm_i915_gem_get_aperture aperture;
56969a906b70Schristos	int reserve;
56979a906b70Schristos
56989a906b70Schristos	if (kgem->aperture)
56999a906b70Schristos		return false;
57009a906b70Schristos
57019a906b70Schristos	/* Leave some space in case of alignment issues */
57029a906b70Schristos	reserve = kgem->aperture_mappable / 2;
57039a906b70Schristos	if (kgem->gen < 033 && reserve < kgem->aperture_max_fence)
57049a906b70Schristos		reserve = kgem->aperture_max_fence;
57059a906b70Schristos	if (!kgem->has_llc)
57069a906b70Schristos		reserve += kgem->nexec * PAGE_SIZE * 2;
57079a906b70Schristos
57089a906b70Schristos	DBG(("%s: num_pages=%d, holding %d pages in reserve, total aperture %d\n",
57099a906b70Schristos	     __FUNCTION__, num_pages, reserve, kgem->aperture_total));
57109a906b70Schristos	num_pages += reserve;
57119a906b70Schristos
57129a906b70Schristos	VG_CLEAR(aperture);
57139a906b70Schristos	aperture.aper_available_size = kgem->aperture_total;
57149a906b70Schristos	aperture.aper_available_size *= PAGE_SIZE;
57159a906b70Schristos	(void)do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
57169a906b70Schristos
57179a906b70Schristos	DBG(("%s: aperture required %ld bytes, available %ld bytes\n",
57189a906b70Schristos	     __FUNCTION__,
57199a906b70Schristos	     (long)num_pages * PAGE_SIZE,
57209a906b70Schristos	     (long)aperture.aper_available_size));
57219a906b70Schristos
57229a906b70Schristos	return num_pages <= aperture.aper_available_size / PAGE_SIZE;
57239a906b70Schristos}
57249a906b70Schristos
57259a906b70Schristosstatic inline bool kgem_flush(struct kgem *kgem, bool flush)
57269a906b70Schristos{
57279a906b70Schristos	if (unlikely(kgem->wedged))
57289a906b70Schristos		return false;
57299a906b70Schristos
57309a906b70Schristos	if (kgem->nreloc == 0)
57319a906b70Schristos		return true;
57329a906b70Schristos
57339a906b70Schristos	if (container_of(kgem, struct sna, kgem)->flags & SNA_POWERSAVE)
57349a906b70Schristos		return true;
57359a906b70Schristos
57369a906b70Schristos	if (kgem->flush == flush && kgem->aperture < kgem->aperture_low)
57379a906b70Schristos		return true;
57389a906b70Schristos
57399a906b70Schristos	DBG(("%s: opportunistic flushing? flush=%d,%d, aperture=%d/%d, idle?=%d\n",
57409a906b70Schristos	     __FUNCTION__, kgem->flush, flush, kgem->aperture, kgem->aperture_low, kgem_ring_is_idle(kgem, kgem->ring)));
57419a906b70Schristos	return !kgem_ring_is_idle(kgem, kgem->ring);
574203b705cfSriastradh}
574303b705cfSriastradh
574403b705cfSriastradhbool kgem_check_bo(struct kgem *kgem, ...)
574503b705cfSriastradh{
574603b705cfSriastradh	va_list ap;
574703b705cfSriastradh	struct kgem_bo *bo;
574803b705cfSriastradh	int num_exec = 0;
574903b705cfSriastradh	int num_pages = 0;
575003b705cfSriastradh	bool flush = false;
57519a906b70Schristos	bool busy = true;
575203b705cfSriastradh
575303b705cfSriastradh	va_start(ap, kgem);
575403b705cfSriastradh	while ((bo = va_arg(ap, struct kgem_bo *))) {
575503b705cfSriastradh		while (bo->proxy)
575603b705cfSriastradh			bo = bo->proxy;
575703b705cfSriastradh		if (bo->exec)
575803b705cfSriastradh			continue;
575903b705cfSriastradh
57609a906b70Schristos		if (needs_batch_flush(kgem, bo)) {
57619a906b70Schristos			va_end(ap);
576203b705cfSriastradh			return false;
57639a906b70Schristos		}
576403b705cfSriastradh
576503b705cfSriastradh		num_pages += num_pages(bo);
576603b705cfSriastradh		num_exec++;
576703b705cfSriastradh
576803b705cfSriastradh		flush |= bo->flush;
57699a906b70Schristos		busy &= bo->rq != NULL;
577003b705cfSriastradh	}
577103b705cfSriastradh	va_end(ap);
577203b705cfSriastradh
577303b705cfSriastradh	DBG(("%s: num_pages=+%d, num_exec=+%d\n",
577403b705cfSriastradh	     __FUNCTION__, num_pages, num_exec));
577503b705cfSriastradh
577603b705cfSriastradh	if (!num_pages)
577703b705cfSriastradh		return true;
577803b705cfSriastradh
57799a906b70Schristos	if (kgem->nexec + num_exec >= KGEM_EXEC_SIZE(kgem)) {
57809a906b70Schristos		DBG(("%s: out of exec slots (%d + %d / %d)\n", __FUNCTION__,
57819a906b70Schristos		     kgem->nexec, num_exec, KGEM_EXEC_SIZE(kgem)));
578203b705cfSriastradh		return false;
578303b705cfSriastradh	}
578403b705cfSriastradh
578503b705cfSriastradh	if (num_pages + kgem->aperture > kgem->aperture_high) {
57869a906b70Schristos		DBG(("%s: final aperture usage (%d + %d) is greater than high water mark (%d)\n",
57879a906b70Schristos		     __FUNCTION__, kgem->aperture, num_pages, kgem->aperture_high));
57889a906b70Schristos		return aperture_check(kgem, num_pages);
578903b705cfSriastradh	}
579003b705cfSriastradh
57919a906b70Schristos	if (busy)
57929a906b70Schristos		return true;
579303b705cfSriastradh
57949a906b70Schristos	return kgem_flush(kgem, flush);
579503b705cfSriastradh}
579603b705cfSriastradh
579703b705cfSriastradhbool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo)
579803b705cfSriastradh{
579903b705cfSriastradh	assert(bo->refcnt);
580003b705cfSriastradh	while (bo->proxy)
580103b705cfSriastradh		bo = bo->proxy;
580203b705cfSriastradh	assert(bo->refcnt);
580303b705cfSriastradh
580403b705cfSriastradh	if (bo->exec) {
580503b705cfSriastradh		if (kgem->gen < 040 &&
580603b705cfSriastradh		    bo->tiling != I915_TILING_NONE &&
580703b705cfSriastradh		    (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) {
58089a906b70Schristos			uint32_t size;
58099a906b70Schristos
58109a906b70Schristos			assert(bo->tiling == I915_TILING_X);
58119a906b70Schristos
581203b705cfSriastradh			if (kgem->nfence >= kgem->fence_max)
581303b705cfSriastradh				return false;
581403b705cfSriastradh
58159a906b70Schristos			if (kgem->aperture_fenced) {
58169a906b70Schristos				size = 3*kgem->aperture_fenced;
58179a906b70Schristos				if (kgem->aperture_total == kgem->aperture_mappable)
58189a906b70Schristos					size += kgem->aperture;
58199a906b70Schristos				if (size > kgem->aperture_fenceable &&
58209a906b70Schristos				    kgem_ring_is_idle(kgem, kgem->ring)) {
58219a906b70Schristos					DBG(("%s: opportunistic fence flush\n", __FUNCTION__));
58229a906b70Schristos					return false;
58239a906b70Schristos				}
58249a906b70Schristos			}
582503b705cfSriastradh
58269a906b70Schristos			size = kgem_bo_fenced_size(kgem, bo);
58279a906b70Schristos			if (size > kgem->aperture_max_fence)
58289a906b70Schristos				kgem->aperture_max_fence = size;
58299a906b70Schristos			size += kgem->aperture_fenced;
58309a906b70Schristos			if (kgem->gen < 033 && size < 2 * kgem->aperture_max_fence)
58319a906b70Schristos				size = 2 * kgem->aperture_max_fence;
58329a906b70Schristos			if (kgem->aperture_total == kgem->aperture_mappable)
58339a906b70Schristos				size += kgem->aperture;
58349a906b70Schristos			if (size > kgem->aperture_fenceable) {
58359a906b70Schristos				DBG(("%s: estimated fence space required %d (fenced=%d, max_fence=%d, aperture=%d) exceeds fenceable aperture %d\n",
58369a906b70Schristos				     __FUNCTION__, size, kgem->aperture_fenced, kgem->aperture_max_fence, kgem->aperture, kgem->aperture_fenceable));
583703b705cfSriastradh				return false;
58389a906b70Schristos			}
583903b705cfSriastradh		}
584003b705cfSriastradh
584103b705cfSriastradh		return true;
584203b705cfSriastradh	}
584303b705cfSriastradh
584403b705cfSriastradh	if (kgem->nexec >= KGEM_EXEC_SIZE(kgem) - 1)
584503b705cfSriastradh		return false;
584603b705cfSriastradh
58479a906b70Schristos	if (needs_batch_flush(kgem, bo))
584803b705cfSriastradh		return false;
584903b705cfSriastradh
585003b705cfSriastradh	assert_tiling(kgem, bo);
585103b705cfSriastradh	if (kgem->gen < 040 && bo->tiling != I915_TILING_NONE) {
58529a906b70Schristos		uint32_t size;
58539a906b70Schristos
58549a906b70Schristos		assert(bo->tiling == I915_TILING_X);
58559a906b70Schristos
585603b705cfSriastradh		if (kgem->nfence >= kgem->fence_max)
585703b705cfSriastradh			return false;
585803b705cfSriastradh
58599a906b70Schristos		if (kgem->aperture_fenced) {
58609a906b70Schristos			size = 3*kgem->aperture_fenced;
58619a906b70Schristos			if (kgem->aperture_total == kgem->aperture_mappable)
58629a906b70Schristos				size += kgem->aperture;
58639a906b70Schristos			if (size > kgem->aperture_fenceable &&
58649a906b70Schristos			    kgem_ring_is_idle(kgem, kgem->ring)) {
58659a906b70Schristos				DBG(("%s: opportunistic fence flush\n", __FUNCTION__));
58669a906b70Schristos				return false;
58679a906b70Schristos			}
58689a906b70Schristos		}
586903b705cfSriastradh
58709a906b70Schristos		size = kgem_bo_fenced_size(kgem, bo);
58719a906b70Schristos		if (size > kgem->aperture_max_fence)
58729a906b70Schristos			kgem->aperture_max_fence = size;
58739a906b70Schristos		size += kgem->aperture_fenced;
58749a906b70Schristos		if (kgem->gen < 033 && size < 2 * kgem->aperture_max_fence)
58759a906b70Schristos			size = 2 * kgem->aperture_max_fence;
58769a906b70Schristos		if (kgem->aperture_total == kgem->aperture_mappable)
58779a906b70Schristos			size += kgem->aperture;
58789a906b70Schristos		if (size > kgem->aperture_fenceable) {
58799a906b70Schristos			DBG(("%s: estimated fence space required %d (fenced=%d, max_fence=%d, aperture=%d) exceeds fenceable aperture %d\n",
58809a906b70Schristos			     __FUNCTION__, size, kgem->aperture_fenced, kgem->aperture_max_fence, kgem->aperture, kgem->aperture_fenceable));
588103b705cfSriastradh			return false;
58829a906b70Schristos		}
588303b705cfSriastradh	}
588403b705cfSriastradh
58859a906b70Schristos	if (kgem->aperture + kgem->aperture_fenced + num_pages(bo) > kgem->aperture_high) {
58869a906b70Schristos		DBG(("%s: final aperture usage (%d + %d) is greater than high water mark (%d)\n",
58879a906b70Schristos		     __FUNCTION__, kgem->aperture, num_pages(bo), kgem->aperture_high));
58889a906b70Schristos		return aperture_check(kgem, num_pages(bo));
58899a906b70Schristos	}
58909a906b70Schristos
58919a906b70Schristos	if (bo->rq)
58929a906b70Schristos		return true;
58939a906b70Schristos
58949a906b70Schristos	return kgem_flush(kgem, bo->flush);
589503b705cfSriastradh}
589603b705cfSriastradh
589703b705cfSriastradhbool kgem_check_many_bo_fenced(struct kgem *kgem, ...)
589803b705cfSriastradh{
589903b705cfSriastradh	va_list ap;
590003b705cfSriastradh	struct kgem_bo *bo;
590103b705cfSriastradh	int num_fence = 0;
590203b705cfSriastradh	int num_exec = 0;
590303b705cfSriastradh	int num_pages = 0;
590403b705cfSriastradh	int fenced_size = 0;
590503b705cfSriastradh	bool flush = false;
59069a906b70Schristos	bool busy = true;
590703b705cfSriastradh
590803b705cfSriastradh	va_start(ap, kgem);
590903b705cfSriastradh	while ((bo = va_arg(ap, struct kgem_bo *))) {
591003b705cfSriastradh		assert(bo->refcnt);
591103b705cfSriastradh		while (bo->proxy)
591203b705cfSriastradh			bo = bo->proxy;
591303b705cfSriastradh		assert(bo->refcnt);
591403b705cfSriastradh		if (bo->exec) {
591503b705cfSriastradh			if (kgem->gen >= 040 || bo->tiling == I915_TILING_NONE)
591603b705cfSriastradh				continue;
591703b705cfSriastradh
591803b705cfSriastradh			if ((bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) {
591903b705cfSriastradh				fenced_size += kgem_bo_fenced_size(kgem, bo);
592003b705cfSriastradh				num_fence++;
592103b705cfSriastradh			}
592203b705cfSriastradh
592303b705cfSriastradh			continue;
592403b705cfSriastradh		}
592503b705cfSriastradh
59269a906b70Schristos		if (needs_batch_flush(kgem, bo)) {
59279a906b70Schristos			va_end(ap);
592803b705cfSriastradh			return false;
59299a906b70Schristos		}
593003b705cfSriastradh
593103b705cfSriastradh		assert_tiling(kgem, bo);
593203b705cfSriastradh		num_pages += num_pages(bo);
593303b705cfSriastradh		num_exec++;
593403b705cfSriastradh		if (kgem->gen < 040 && bo->tiling) {
59359a906b70Schristos			uint32_t size = kgem_bo_fenced_size(kgem, bo);
59369a906b70Schristos			if (size > kgem->aperture_max_fence)
59379a906b70Schristos				kgem->aperture_max_fence = size;
59389a906b70Schristos			fenced_size += size;
593903b705cfSriastradh			num_fence++;
594003b705cfSriastradh		}
594103b705cfSriastradh
594203b705cfSriastradh		flush |= bo->flush;
59439a906b70Schristos		busy &= bo->rq != NULL;
594403b705cfSriastradh	}
594503b705cfSriastradh	va_end(ap);
594603b705cfSriastradh
594703b705cfSriastradh	if (num_fence) {
59489a906b70Schristos		uint32_t size;
59499a906b70Schristos
595003b705cfSriastradh		if (kgem->nfence + num_fence > kgem->fence_max)
595103b705cfSriastradh			return false;
595203b705cfSriastradh
59539a906b70Schristos		if (kgem->aperture_fenced) {
59549a906b70Schristos			size = 3*kgem->aperture_fenced;
59559a906b70Schristos			if (kgem->aperture_total == kgem->aperture_mappable)
59569a906b70Schristos				size += kgem->aperture;
59579a906b70Schristos			if (size > kgem->aperture_fenceable &&
59589a906b70Schristos			    kgem_ring_is_idle(kgem, kgem->ring)) {
59599a906b70Schristos				DBG(("%s: opportunistic fence flush\n", __FUNCTION__));
59609a906b70Schristos				return false;
59619a906b70Schristos			}
59629a906b70Schristos		}
596303b705cfSriastradh
59649a906b70Schristos		size = kgem->aperture_fenced;
59659a906b70Schristos		size += fenced_size;
59669a906b70Schristos		if (kgem->gen < 033 && size < 2 * kgem->aperture_max_fence)
59679a906b70Schristos			size = 2 * kgem->aperture_max_fence;
59689a906b70Schristos		if (kgem->aperture_total == kgem->aperture_mappable)
59699a906b70Schristos			size += kgem->aperture;
59709a906b70Schristos		if (size > kgem->aperture_fenceable) {
59719a906b70Schristos			DBG(("%s: estimated fence space required %d (fenced=%d, max_fence=%d, aperture=%d) exceeds fenceable aperture %d\n",
59729a906b70Schristos			     __FUNCTION__, size, kgem->aperture_fenced, kgem->aperture_max_fence, kgem->aperture, kgem->aperture_fenceable));
597303b705cfSriastradh			return false;
59749a906b70Schristos		}
597503b705cfSriastradh	}
597603b705cfSriastradh
59779a906b70Schristos	if (num_pages == 0)
59789a906b70Schristos		return true;
597903b705cfSriastradh
59809a906b70Schristos	if (kgem->nexec + num_exec >= KGEM_EXEC_SIZE(kgem))
59819a906b70Schristos		return false;
598203b705cfSriastradh
59839a906b70Schristos	if (num_pages + kgem->aperture > kgem->aperture_high - kgem->aperture_fenced) {
59849a906b70Schristos		DBG(("%s: final aperture usage (%d + %d + %d) is greater than high water mark (%d)\n",
59859a906b70Schristos		     __FUNCTION__, kgem->aperture, kgem->aperture_fenced, num_pages, kgem->aperture_high));
59869a906b70Schristos		return aperture_check(kgem, num_pages);
598703b705cfSriastradh	}
598803b705cfSriastradh
59899a906b70Schristos	if (busy)
59909a906b70Schristos		return true;
59919a906b70Schristos
59929a906b70Schristos	return kgem_flush(kgem, flush);
599303b705cfSriastradh}
599403b705cfSriastradh
599503b705cfSriastradhuint32_t kgem_add_reloc(struct kgem *kgem,
599603b705cfSriastradh			uint32_t pos,
599703b705cfSriastradh			struct kgem_bo *bo,
599803b705cfSriastradh			uint32_t read_write_domain,
599903b705cfSriastradh			uint32_t delta)
600003b705cfSriastradh{
600103b705cfSriastradh	int index;
600203b705cfSriastradh
600303b705cfSriastradh	DBG(("%s: handle=%d, pos=%d, delta=%d, domains=%08x\n",
600403b705cfSriastradh	     __FUNCTION__, bo ? bo->handle : 0, pos, delta, read_write_domain));
600503b705cfSriastradh
60069a906b70Schristos	assert(kgem->gen < 0100);
600703b705cfSriastradh	assert((read_write_domain & 0x7fff) == 0 || bo != NULL);
600803b705cfSriastradh
600903b705cfSriastradh	index = kgem->nreloc++;
601003b705cfSriastradh	assert(index < ARRAY_SIZE(kgem->reloc));
601103b705cfSriastradh	kgem->reloc[index].offset = pos * sizeof(kgem->batch[0]);
601203b705cfSriastradh	if (bo) {
60139a906b70Schristos		assert(kgem->mode != KGEM_NONE);
601403b705cfSriastradh		assert(bo->refcnt);
601503b705cfSriastradh		while (bo->proxy) {
601603b705cfSriastradh			DBG(("%s: adding proxy [delta=%d] for handle=%d\n",
601703b705cfSriastradh			     __FUNCTION__, bo->delta, bo->handle));
601803b705cfSriastradh			delta += bo->delta;
601903b705cfSriastradh			assert(bo->handle == bo->proxy->handle);
602003b705cfSriastradh			/* need to release the cache upon batch submit */
602103b705cfSriastradh			if (bo->exec == NULL) {
602203b705cfSriastradh				list_move_tail(&bo->request,
602303b705cfSriastradh					       &kgem->next_request->buffers);
602403b705cfSriastradh				bo->rq = MAKE_REQUEST(kgem->next_request,
602503b705cfSriastradh						      kgem->ring);
602603b705cfSriastradh				bo->exec = &_kgem_dummy_exec;
60279a906b70Schristos				bo->domain = DOMAIN_GPU;
602803b705cfSriastradh			}
602903b705cfSriastradh
603003b705cfSriastradh			if (read_write_domain & 0x7fff && !bo->gpu_dirty)
603103b705cfSriastradh				__kgem_bo_mark_dirty(bo);
603203b705cfSriastradh
603303b705cfSriastradh			bo = bo->proxy;
603403b705cfSriastradh			assert(bo->refcnt);
603503b705cfSriastradh		}
603603b705cfSriastradh		assert(bo->refcnt);
603703b705cfSriastradh
603803b705cfSriastradh		if (bo->exec == NULL)
603903b705cfSriastradh			kgem_add_bo(kgem, bo);
604003b705cfSriastradh		assert(bo->rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
604103b705cfSriastradh		assert(RQ_RING(bo->rq) == kgem->ring);
604203b705cfSriastradh
604303b705cfSriastradh		if (kgem->gen < 040 && read_write_domain & KGEM_RELOC_FENCED) {
604403b705cfSriastradh			if (bo->tiling &&
604503b705cfSriastradh			    (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) {
60469a906b70Schristos				assert(bo->tiling == I915_TILING_X);
604703b705cfSriastradh				assert(kgem->nfence < kgem->fence_max);
604803b705cfSriastradh				kgem->aperture_fenced +=
604903b705cfSriastradh					kgem_bo_fenced_size(kgem, bo);
605003b705cfSriastradh				kgem->nfence++;
605103b705cfSriastradh			}
605203b705cfSriastradh			bo->exec->flags |= EXEC_OBJECT_NEEDS_FENCE;
605303b705cfSriastradh		}
605403b705cfSriastradh
605503b705cfSriastradh		kgem->reloc[index].delta = delta;
605603b705cfSriastradh		kgem->reloc[index].target_handle = bo->target_handle;
605703b705cfSriastradh		kgem->reloc[index].presumed_offset = bo->presumed_offset;
605803b705cfSriastradh
605903b705cfSriastradh		if (read_write_domain & 0x7fff && !bo->gpu_dirty) {
606003b705cfSriastradh			assert(!bo->snoop || kgem->can_blt_cpu);
606103b705cfSriastradh			__kgem_bo_mark_dirty(bo);
606203b705cfSriastradh		}
606303b705cfSriastradh
606403b705cfSriastradh		delta += bo->presumed_offset;
606503b705cfSriastradh	} else {
606603b705cfSriastradh		kgem->reloc[index].delta = delta;
606703b705cfSriastradh		kgem->reloc[index].target_handle = ~0U;
606803b705cfSriastradh		kgem->reloc[index].presumed_offset = 0;
606903b705cfSriastradh		if (kgem->nreloc__self < 256)
607003b705cfSriastradh			kgem->reloc__self[kgem->nreloc__self++] = index;
607103b705cfSriastradh	}
607203b705cfSriastradh	kgem->reloc[index].read_domains = read_write_domain >> 16;
607303b705cfSriastradh	kgem->reloc[index].write_domain = read_write_domain & 0x7fff;
607403b705cfSriastradh
607503b705cfSriastradh	return delta;
607603b705cfSriastradh}
607703b705cfSriastradh
60789a906b70Schristosuint64_t kgem_add_reloc64(struct kgem *kgem,
60799a906b70Schristos			  uint32_t pos,
60809a906b70Schristos			  struct kgem_bo *bo,
60819a906b70Schristos			  uint32_t read_write_domain,
60829a906b70Schristos			  uint64_t delta)
60839a906b70Schristos{
60849a906b70Schristos	int index;
60859a906b70Schristos
60869a906b70Schristos	DBG(("%s: handle=%d, pos=%d, delta=%ld, domains=%08x\n",
60879a906b70Schristos	     __FUNCTION__, bo ? bo->handle : 0, pos, (long)delta, read_write_domain));
60889a906b70Schristos
60899a906b70Schristos	assert(kgem->gen >= 0100);
60909a906b70Schristos	assert((read_write_domain & 0x7fff) == 0 || bo != NULL);
60919a906b70Schristos
60929a906b70Schristos	index = kgem->nreloc++;
60939a906b70Schristos	assert(index < ARRAY_SIZE(kgem->reloc));
60949a906b70Schristos	kgem->reloc[index].offset = pos * sizeof(kgem->batch[0]);
60959a906b70Schristos	if (bo) {
60969a906b70Schristos		assert(kgem->mode != KGEM_NONE);
60979a906b70Schristos		assert(bo->refcnt);
60989a906b70Schristos		while (bo->proxy) {
60999a906b70Schristos			DBG(("%s: adding proxy [delta=%ld] for handle=%d\n",
61009a906b70Schristos			     __FUNCTION__, (long)bo->delta, bo->handle));
61019a906b70Schristos			delta += bo->delta;
61029a906b70Schristos			assert(bo->handle == bo->proxy->handle);
61039a906b70Schristos			/* need to release the cache upon batch submit */
61049a906b70Schristos			if (bo->exec == NULL) {
61059a906b70Schristos				list_move_tail(&bo->request,
61069a906b70Schristos					       &kgem->next_request->buffers);
61079a906b70Schristos				bo->rq = MAKE_REQUEST(kgem->next_request,
61089a906b70Schristos						      kgem->ring);
61099a906b70Schristos				bo->exec = &_kgem_dummy_exec;
61109a906b70Schristos				bo->domain = DOMAIN_GPU;
61119a906b70Schristos			}
61129a906b70Schristos
61139a906b70Schristos			if (read_write_domain & 0x7fff && !bo->gpu_dirty)
61149a906b70Schristos				__kgem_bo_mark_dirty(bo);
61159a906b70Schristos
61169a906b70Schristos			bo = bo->proxy;
61179a906b70Schristos			assert(bo->refcnt);
61189a906b70Schristos		}
61199a906b70Schristos		assert(bo->refcnt);
61209a906b70Schristos
61219a906b70Schristos		if (bo->exec == NULL)
61229a906b70Schristos			kgem_add_bo(kgem, bo);
61239a906b70Schristos		assert(bo->rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
61249a906b70Schristos		assert(RQ_RING(bo->rq) == kgem->ring);
61259a906b70Schristos
61269a906b70Schristos		DBG(("%s[%d] = (delta=%d, target handle=%d, presumed=%llx)\n",
61279a906b70Schristos					__FUNCTION__, index, delta, bo->target_handle, (long long)bo->presumed_offset));
61289a906b70Schristos		kgem->reloc[index].delta = delta;
61299a906b70Schristos		kgem->reloc[index].target_handle = bo->target_handle;
61309a906b70Schristos		kgem->reloc[index].presumed_offset = bo->presumed_offset;
61319a906b70Schristos
61329a906b70Schristos		if (read_write_domain & 0x7fff && !bo->gpu_dirty) {
61339a906b70Schristos			assert(!bo->snoop || kgem->can_blt_cpu);
61349a906b70Schristos			__kgem_bo_mark_dirty(bo);
61359a906b70Schristos		}
61369a906b70Schristos
61379a906b70Schristos		delta += bo->presumed_offset;
61389a906b70Schristos	} else {
61399a906b70Schristos		DBG(("%s[%d] = (delta=%d, target handle=batch)\n",
61409a906b70Schristos					__FUNCTION__, index, delta));
61419a906b70Schristos		kgem->reloc[index].delta = delta;
61429a906b70Schristos		kgem->reloc[index].target_handle = ~0U;
61439a906b70Schristos		kgem->reloc[index].presumed_offset = 0;
61449a906b70Schristos		if (kgem->nreloc__self < 256)
61459a906b70Schristos			kgem->reloc__self[kgem->nreloc__self++] = index;
61469a906b70Schristos	}
61479a906b70Schristos	kgem->reloc[index].read_domains = read_write_domain >> 16;
61489a906b70Schristos	kgem->reloc[index].write_domain = read_write_domain & 0x7fff;
61499a906b70Schristos
61509a906b70Schristos	return delta;
61519a906b70Schristos}
61529a906b70Schristos
615303b705cfSriastradhstatic void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket)
615403b705cfSriastradh{
615503b705cfSriastradh	int i, j;
615603b705cfSriastradh
615703b705cfSriastradh	DBG(("%s: type=%d, count=%d (bucket: %d)\n",
615803b705cfSriastradh	     __FUNCTION__, type, kgem->vma[type].count, bucket));
615903b705cfSriastradh	if (kgem->vma[type].count <= 0)
616003b705cfSriastradh	       return;
616103b705cfSriastradh
616203b705cfSriastradh	if (kgem->need_purge)
616303b705cfSriastradh		kgem_purge_cache(kgem);
616403b705cfSriastradh
616503b705cfSriastradh	/* vma are limited on a per-process basis to around 64k.
616603b705cfSriastradh	 * This includes all malloc arenas as well as other file
616703b705cfSriastradh	 * mappings. In order to be fair and not hog the cache,
616803b705cfSriastradh	 * and more importantly not to exhaust that limit and to
616903b705cfSriastradh	 * start failing mappings, we keep our own number of open
617003b705cfSriastradh	 * vma to within a conservative value.
617103b705cfSriastradh	 */
617203b705cfSriastradh	i = 0;
617303b705cfSriastradh	while (kgem->vma[type].count > 0) {
617403b705cfSriastradh		struct kgem_bo *bo = NULL;
617503b705cfSriastradh
617603b705cfSriastradh		for (j = 0;
617703b705cfSriastradh		     bo == NULL && j < ARRAY_SIZE(kgem->vma[type].inactive);
617803b705cfSriastradh		     j++) {
617903b705cfSriastradh			struct list *head = &kgem->vma[type].inactive[i++%ARRAY_SIZE(kgem->vma[type].inactive)];
618003b705cfSriastradh			if (!list_is_empty(head))
618103b705cfSriastradh				bo = list_last_entry(head, struct kgem_bo, vma);
618203b705cfSriastradh		}
618303b705cfSriastradh		if (bo == NULL)
618403b705cfSriastradh			break;
618503b705cfSriastradh
618603b705cfSriastradh		DBG(("%s: discarding inactive %s vma cache for %d\n",
61879a906b70Schristos		     __FUNCTION__, type ? "CPU" : "GTT", bo->handle));
61889a906b70Schristos
618903b705cfSriastradh		assert(bo->rq == NULL);
6190813957e3Ssnj		if (type) {
6191813957e3Ssnj			VG(VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map__cpu), bytes(bo)));
6192813957e3Ssnj			munmap(MAP(bo->map__cpu), bytes(bo));
6193813957e3Ssnj			bo->map__cpu = NULL;
6194813957e3Ssnj		} else {
6195813957e3Ssnj			if (bo->map__wc) {
6196813957e3Ssnj				VG(VALGRIND_MAKE_MEM_NOACCESS(bo->map__wc, bytes(bo)));
6197813957e3Ssnj				munmap(bo->map__wc, bytes(bo));
6198813957e3Ssnj				bo->map__wc = NULL;
6199813957e3Ssnj			}
6200813957e3Ssnj			if (bo->map__gtt) {
6201813957e3Ssnj				munmap(bo->map__gtt, bytes(bo));
6202813957e3Ssnj				bo->map__gtt = NULL;
6203813957e3Ssnj			}
6204813957e3Ssnj		}
620503b705cfSriastradh
620603b705cfSriastradh		list_del(&bo->vma);
620703b705cfSriastradh		kgem->vma[type].count--;
620803b705cfSriastradh
620903b705cfSriastradh		if (!bo->purged && !kgem_bo_set_purgeable(kgem, bo)) {
621003b705cfSriastradh			DBG(("%s: freeing unpurgeable old mapping\n",
621103b705cfSriastradh			     __FUNCTION__));
621203b705cfSriastradh			kgem_bo_free(kgem, bo);
621303b705cfSriastradh		}
621403b705cfSriastradh	}
621503b705cfSriastradh}
621603b705cfSriastradh
6217813957e3Ssnjstatic void *__kgem_bo_map__gtt_or_wc(struct kgem *kgem, struct kgem_bo *bo)
621803b705cfSriastradh{
621903b705cfSriastradh	void *ptr;
622003b705cfSriastradh
6221813957e3Ssnj	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
6222813957e3Ssnj
6223813957e3Ssnj	assert(bo->proxy == NULL);
6224813957e3Ssnj	assert(!bo->snoop);
6225813957e3Ssnj
6226813957e3Ssnj	kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
6227813957e3Ssnj
6228813957e3Ssnj	if (bo->tiling || !kgem->has_wc_mmap) {
6229813957e3Ssnj		assert(num_pages(bo) <= kgem->aperture_mappable / 2);
6230813957e3Ssnj		assert(kgem->gen != 021 || bo->tiling != I915_TILING_Y);
6231813957e3Ssnj
6232813957e3Ssnj		ptr = bo->map__gtt;
6233813957e3Ssnj		if (ptr == NULL)
6234813957e3Ssnj			ptr = __kgem_bo_map__gtt(kgem, bo);
6235813957e3Ssnj	} else {
6236813957e3Ssnj		ptr = bo->map__wc;
6237813957e3Ssnj		if (ptr == NULL)
6238813957e3Ssnj			ptr = __kgem_bo_map__wc(kgem, bo);
6239813957e3Ssnj	}
6240813957e3Ssnj
6241813957e3Ssnj	return ptr;
6242813957e3Ssnj}
6243813957e3Ssnj
6244813957e3Ssnjvoid *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo)
6245813957e3Ssnj{
62469a906b70Schristos	DBG(("%s: handle=%d, offset=%ld, tiling=%d, map=%p:%p, domain=%d\n", __FUNCTION__,
62479a906b70Schristos	     bo->handle, (long)bo->presumed_offset, bo->tiling, bo->map__gtt, bo->map__cpu, bo->domain));
624803b705cfSriastradh
624903b705cfSriastradh	assert(bo->proxy == NULL);
625003b705cfSriastradh	assert(list_is_empty(&bo->list));
625103b705cfSriastradh	assert_tiling(kgem, bo);
62529a906b70Schristos	assert(!bo->purged || bo->reusable);
625303b705cfSriastradh
625403b705cfSriastradh	if (bo->tiling == I915_TILING_NONE && !bo->scanout && kgem->has_llc) {
625503b705cfSriastradh		DBG(("%s: converting request for GTT map into CPU map\n",
625603b705cfSriastradh		     __FUNCTION__));
625703b705cfSriastradh		return kgem_bo_map__cpu(kgem, bo);
625803b705cfSriastradh	}
625903b705cfSriastradh
6260813957e3Ssnj	return __kgem_bo_map__gtt_or_wc(kgem, bo);
626103b705cfSriastradh}
626203b705cfSriastradh
626303b705cfSriastradhvoid *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
626403b705cfSriastradh{
626503b705cfSriastradh	void *ptr;
626603b705cfSriastradh
62679a906b70Schristos	DBG(("%s: handle=%d, offset=%ld, tiling=%d, map=%p:%p, domain=%d\n", __FUNCTION__,
62689a906b70Schristos	     bo->handle, (long)bo->presumed_offset, bo->tiling, bo->map__gtt, bo->map__cpu, bo->domain));
626903b705cfSriastradh
627003b705cfSriastradh	assert(bo->proxy == NULL);
627103b705cfSriastradh	assert(list_is_empty(&bo->list));
627203b705cfSriastradh	assert(bo->exec == NULL);
627303b705cfSriastradh	assert_tiling(kgem, bo);
62749a906b70Schristos	assert(!bo->purged || bo->reusable);
627503b705cfSriastradh
627603b705cfSriastradh	if (bo->tiling == I915_TILING_NONE && !bo->scanout &&
627703b705cfSriastradh	    (kgem->has_llc || bo->domain == DOMAIN_CPU)) {
627803b705cfSriastradh		DBG(("%s: converting request for GTT map into CPU map\n",
627903b705cfSriastradh		     __FUNCTION__));
628003b705cfSriastradh		ptr = kgem_bo_map__cpu(kgem, bo);
628103b705cfSriastradh		if (ptr)
628203b705cfSriastradh			kgem_bo_sync__cpu(kgem, bo);
628303b705cfSriastradh		return ptr;
628403b705cfSriastradh	}
628503b705cfSriastradh
6286813957e3Ssnj	ptr = __kgem_bo_map__gtt_or_wc(kgem, bo);
628703b705cfSriastradh
628803b705cfSriastradh	if (bo->domain != DOMAIN_GTT || FORCE_MMAP_SYNC & (1 << DOMAIN_GTT)) {
628903b705cfSriastradh		struct drm_i915_gem_set_domain set_domain;
629003b705cfSriastradh
629103b705cfSriastradh		DBG(("%s: sync: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__,
629203b705cfSriastradh		     bo->needs_flush, bo->domain, __kgem_busy(kgem, bo->handle)));
629303b705cfSriastradh
629403b705cfSriastradh		/* XXX use PROT_READ to avoid the write flush? */
629503b705cfSriastradh
629603b705cfSriastradh		VG_CLEAR(set_domain);
629703b705cfSriastradh		set_domain.handle = bo->handle;
629803b705cfSriastradh		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
629903b705cfSriastradh		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
63009a906b70Schristos		if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) {
63019a906b70Schristos			DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
63029a906b70Schristos			kgem_throttle(kgem);
630303b705cfSriastradh		}
63049a906b70Schristos		kgem_bo_retire(kgem, bo);
63059a906b70Schristos		bo->domain = DOMAIN_GTT;
63069a906b70Schristos		bo->gtt_dirty = true;
630703b705cfSriastradh	}
630803b705cfSriastradh
630903b705cfSriastradh	return ptr;
631003b705cfSriastradh}
631103b705cfSriastradh
631203b705cfSriastradhvoid *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
631303b705cfSriastradh{
63149a906b70Schristos	DBG(("%s: handle=%d, offset=%ld, tiling=%d, map=%p:%p, domain=%d\n", __FUNCTION__,
63159a906b70Schristos	     bo->handle, (long)bo->presumed_offset, bo->tiling, bo->map__gtt, bo->map__cpu, bo->domain));
631603b705cfSriastradh
63179a906b70Schristos	assert(bo->proxy == NULL);
631803b705cfSriastradh	assert(bo->exec == NULL);
631903b705cfSriastradh	assert(list_is_empty(&bo->list));
632003b705cfSriastradh	assert_tiling(kgem, bo);
63219a906b70Schristos	assert(!bo->purged || bo->reusable);
632203b705cfSriastradh
6323813957e3Ssnj	return __kgem_bo_map__gtt_or_wc(kgem, bo);
6324813957e3Ssnj}
632503b705cfSriastradh
6326813957e3Ssnjvoid *kgem_bo_map__wc(struct kgem *kgem, struct kgem_bo *bo)
6327813957e3Ssnj{
6328813957e3Ssnj	DBG(("%s: handle=%d, offset=%ld, tiling=%d, map=%p:%p, domain=%d\n", __FUNCTION__,
6329813957e3Ssnj	     bo->handle, (long)bo->presumed_offset, bo->tiling, bo->map__gtt, bo->map__cpu, bo->domain));
633003b705cfSriastradh
6331813957e3Ssnj	assert(bo->proxy == NULL);
6332813957e3Ssnj	assert(bo->exec == NULL);
6333813957e3Ssnj	assert(list_is_empty(&bo->list));
6334813957e3Ssnj	assert_tiling(kgem, bo);
6335813957e3Ssnj	assert(!bo->purged || bo->reusable);
633603b705cfSriastradh
6337813957e3Ssnj	if (bo->map__wc)
6338813957e3Ssnj		return bo->map__wc;
633903b705cfSriastradh
6340813957e3Ssnj	return __kgem_bo_map__wc(kgem, bo);
634103b705cfSriastradh}
634203b705cfSriastradh
634303b705cfSriastradhvoid *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
634403b705cfSriastradh{
63459a906b70Schristos	DBG(("%s(handle=%d, size=%d, map=%p:%p)\n",
63469a906b70Schristos	     __FUNCTION__, bo->handle, bytes(bo), bo->map__gtt, bo->map__cpu));
634703b705cfSriastradh	assert(!bo->purged);
634803b705cfSriastradh	assert(list_is_empty(&bo->list));
634903b705cfSriastradh	assert(bo->proxy == NULL);
63509a906b70Schristos	assert_tiling(kgem, bo);
635103b705cfSriastradh
63529a906b70Schristos	if (bo->map__cpu)
63539a906b70Schristos		return MAP(bo->map__cpu);
635403b705cfSriastradh
635503b705cfSriastradh	kgem_trim_vma_cache(kgem, MAP_CPU, bucket(bo));
635603b705cfSriastradh
6357813957e3Ssnj	return __kgem_bo_map__cpu(kgem, bo);
6358813957e3Ssnj}
635903b705cfSriastradh
6360813957e3Ssnjvoid *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo)
6361813957e3Ssnj{
6362813957e3Ssnj	void *ptr;
636303b705cfSriastradh
6364813957e3Ssnj	if (bo->tiling == I915_TILING_NONE && kgem->has_llc) {
6365813957e3Ssnj		ptr = MAP(bo->map__cpu);
6366813957e3Ssnj		if (ptr == NULL)
6367813957e3Ssnj			ptr = __kgem_bo_map__cpu(kgem, bo);
6368813957e3Ssnj	} else if (bo->tiling || !kgem->has_wc_mmap) {
6369813957e3Ssnj		ptr = bo->map__gtt;
6370813957e3Ssnj		if (ptr == NULL)
6371813957e3Ssnj			ptr = __kgem_bo_map__gtt(kgem, bo);
6372813957e3Ssnj	} else {
6373813957e3Ssnj		ptr = bo->map__wc;
6374813957e3Ssnj		if (ptr == NULL)
6375813957e3Ssnj			ptr = __kgem_bo_map__wc(kgem, bo);
637603b705cfSriastradh	}
637703b705cfSriastradh
6378813957e3Ssnj	return ptr;
637903b705cfSriastradh}
638003b705cfSriastradh
6381813957e3Ssnj
638203b705cfSriastradhuint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo)
638303b705cfSriastradh{
638403b705cfSriastradh	struct drm_gem_flink flink;
638503b705cfSriastradh
638603b705cfSriastradh	VG_CLEAR(flink);
638703b705cfSriastradh	flink.handle = bo->handle;
63889a906b70Schristos	if (do_ioctl(kgem->fd, DRM_IOCTL_GEM_FLINK, &flink))
638903b705cfSriastradh		return 0;
639003b705cfSriastradh
639103b705cfSriastradh	DBG(("%s: flinked handle=%d to name=%d, marking non-reusable\n",
639203b705cfSriastradh	     __FUNCTION__, flink.handle, flink.name));
639303b705cfSriastradh
639403b705cfSriastradh	/* Ordinarily giving the name aware makes the buffer non-reusable.
639503b705cfSriastradh	 * However, we track the lifetime of all clients and their hold
639603b705cfSriastradh	 * on the buffer, and *presuming* they do not pass it on to a third
639703b705cfSriastradh	 * party, we track the lifetime accurately.
639803b705cfSriastradh	 */
639903b705cfSriastradh	bo->reusable = false;
640003b705cfSriastradh
640103b705cfSriastradh	kgem_bo_unclean(kgem, bo);
640203b705cfSriastradh
640303b705cfSriastradh	return flink.name;
640403b705cfSriastradh}
640503b705cfSriastradh
640603b705cfSriastradhstruct kgem_bo *kgem_create_map(struct kgem *kgem,
640703b705cfSriastradh				void *ptr, uint32_t size,
640803b705cfSriastradh				bool read_only)
640903b705cfSriastradh{
641003b705cfSriastradh	struct kgem_bo *bo;
641103b705cfSriastradh	uintptr_t first_page, last_page;
641203b705cfSriastradh	uint32_t handle;
641303b705cfSriastradh
641403b705cfSriastradh	assert(MAP(ptr) == ptr);
641503b705cfSriastradh
64169a906b70Schristos	DBG(("%s(%p size=%d, read-only?=%d) - has_userptr?=%d\n", __FUNCTION__,
64179a906b70Schristos	     ptr, size, read_only, kgem->has_userptr));
641803b705cfSriastradh	if (!kgem->has_userptr)
641903b705cfSriastradh		return NULL;
642003b705cfSriastradh
642103b705cfSriastradh	first_page = (uintptr_t)ptr;
642203b705cfSriastradh	last_page = first_page + size + PAGE_SIZE - 1;
642303b705cfSriastradh
642403b705cfSriastradh	first_page &= ~(PAGE_SIZE-1);
642503b705cfSriastradh	last_page &= ~(PAGE_SIZE-1);
642603b705cfSriastradh	assert(last_page > first_page);
642703b705cfSriastradh
642803b705cfSriastradh	handle = gem_userptr(kgem->fd,
642903b705cfSriastradh			     (void *)first_page, last_page-first_page,
643003b705cfSriastradh			     read_only);
64319a906b70Schristos	if (handle == 0) {
64329a906b70Schristos		DBG(("%s: import failed, errno=%d\n", __FUNCTION__, errno));
643303b705cfSriastradh		return NULL;
64349a906b70Schristos	}
643503b705cfSriastradh
643603b705cfSriastradh	bo = __kgem_bo_alloc(handle, (last_page - first_page) / PAGE_SIZE);
643703b705cfSriastradh	if (bo == NULL) {
643803b705cfSriastradh		gem_close(kgem->fd, handle);
643903b705cfSriastradh		return NULL;
644003b705cfSriastradh	}
644103b705cfSriastradh
64429a906b70Schristos	bo->unique_id = kgem_get_unique_id(kgem);
644303b705cfSriastradh	bo->snoop = !kgem->has_llc;
644403b705cfSriastradh	debug_alloc__bo(kgem, bo);
644503b705cfSriastradh
644603b705cfSriastradh	if (first_page != (uintptr_t)ptr) {
644703b705cfSriastradh		struct kgem_bo *proxy;
644803b705cfSriastradh
644903b705cfSriastradh		proxy = kgem_create_proxy(kgem, bo,
645003b705cfSriastradh					  (uintptr_t)ptr - first_page, size);
645103b705cfSriastradh		kgem_bo_destroy(kgem, bo);
645203b705cfSriastradh		if (proxy == NULL)
645303b705cfSriastradh			return NULL;
645403b705cfSriastradh
645503b705cfSriastradh		bo = proxy;
645603b705cfSriastradh	}
645703b705cfSriastradh
64589a906b70Schristos	bo->map__cpu = MAKE_USER_MAP(ptr);
645903b705cfSriastradh
646003b705cfSriastradh	DBG(("%s(ptr=%p, size=%d, pages=%d, read_only=%d) => handle=%d (proxy? %d)\n",
646103b705cfSriastradh	     __FUNCTION__, ptr, size, NUM_PAGES(size), read_only, handle, bo->proxy != NULL));
646203b705cfSriastradh	return bo;
646303b705cfSriastradh}
646403b705cfSriastradh
646503b705cfSriastradhvoid kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
646603b705cfSriastradh{
646703b705cfSriastradh	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
646803b705cfSriastradh	assert(!bo->scanout);
64699a906b70Schristos	assert_tiling(kgem, bo);
64709a906b70Schristos
647103b705cfSriastradh	kgem_bo_submit(kgem, bo);
647203b705cfSriastradh
647303b705cfSriastradh	/* SHM pixmaps use proxies for subpage offsets */
647403b705cfSriastradh	assert(!bo->purged);
647503b705cfSriastradh	while (bo->proxy)
647603b705cfSriastradh		bo = bo->proxy;
647703b705cfSriastradh	assert(!bo->purged);
647803b705cfSriastradh
647903b705cfSriastradh	if (bo->domain != DOMAIN_CPU || FORCE_MMAP_SYNC & (1 << DOMAIN_CPU)) {
648003b705cfSriastradh		struct drm_i915_gem_set_domain set_domain;
648103b705cfSriastradh
648203b705cfSriastradh		DBG(("%s: SYNC: handle=%d, needs_flush? %d, domain? %d, busy? %d\n",
648303b705cfSriastradh		     __FUNCTION__, bo->handle,
648403b705cfSriastradh		     bo->needs_flush, bo->domain,
648503b705cfSriastradh		     __kgem_busy(kgem, bo->handle)));
648603b705cfSriastradh
648703b705cfSriastradh		VG_CLEAR(set_domain);
648803b705cfSriastradh		set_domain.handle = bo->handle;
648903b705cfSriastradh		set_domain.read_domains = I915_GEM_DOMAIN_CPU;
649003b705cfSriastradh		set_domain.write_domain = I915_GEM_DOMAIN_CPU;
649103b705cfSriastradh
64929a906b70Schristos		if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) {
64939a906b70Schristos			DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
64949a906b70Schristos			kgem_throttle(kgem);
649503b705cfSriastradh		}
64969a906b70Schristos		kgem_bo_retire(kgem, bo);
64979a906b70Schristos		bo->domain = DOMAIN_CPU;
649803b705cfSriastradh	}
649903b705cfSriastradh}
650003b705cfSriastradh
650103b705cfSriastradhvoid kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write)
650203b705cfSriastradh{
650303b705cfSriastradh	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
650403b705cfSriastradh	assert(!bo->scanout || !write);
65059a906b70Schristos	assert_tiling(kgem, bo);
650603b705cfSriastradh
650703b705cfSriastradh	if (write || bo->needs_flush)
650803b705cfSriastradh		kgem_bo_submit(kgem, bo);
650903b705cfSriastradh
651003b705cfSriastradh	/* SHM pixmaps use proxies for subpage offsets */
651103b705cfSriastradh	assert(!bo->purged);
651203b705cfSriastradh	assert(bo->refcnt);
651303b705cfSriastradh	while (bo->proxy)
651403b705cfSriastradh		bo = bo->proxy;
651503b705cfSriastradh	assert(bo->refcnt);
651603b705cfSriastradh	assert(!bo->purged);
651703b705cfSriastradh
651803b705cfSriastradh	if (bo->domain != DOMAIN_CPU || FORCE_MMAP_SYNC & (1 << DOMAIN_CPU)) {
651903b705cfSriastradh		struct drm_i915_gem_set_domain set_domain;
652003b705cfSriastradh
652103b705cfSriastradh		DBG(("%s: SYNC: handle=%d, needs_flush? %d, domain? %d, busy? %d\n",
652203b705cfSriastradh		     __FUNCTION__, bo->handle,
652303b705cfSriastradh		     bo->needs_flush, bo->domain,
652403b705cfSriastradh		     __kgem_busy(kgem, bo->handle)));
652503b705cfSriastradh
652603b705cfSriastradh		VG_CLEAR(set_domain);
652703b705cfSriastradh		set_domain.handle = bo->handle;
652803b705cfSriastradh		set_domain.read_domains = I915_GEM_DOMAIN_CPU;
652903b705cfSriastradh		set_domain.write_domain = write ? I915_GEM_DOMAIN_CPU : 0;
653003b705cfSriastradh
65319a906b70Schristos		if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) {
65329a906b70Schristos			DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
65339a906b70Schristos			kgem_throttle(kgem);
65349a906b70Schristos		}
65359a906b70Schristos		if (write) {
65369a906b70Schristos			kgem_bo_retire(kgem, bo);
65379a906b70Schristos			bo->domain = DOMAIN_CPU;
65389a906b70Schristos		} else {
653903b705cfSriastradh			if (bo->exec == NULL)
65409a906b70Schristos				kgem_bo_maybe_retire(kgem, bo);
65419a906b70Schristos			bo->domain = DOMAIN_NONE;
654203b705cfSriastradh		}
654303b705cfSriastradh	}
654403b705cfSriastradh}
654503b705cfSriastradh
654603b705cfSriastradhvoid kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo)
654703b705cfSriastradh{
654803b705cfSriastradh	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
654903b705cfSriastradh	assert(bo->refcnt);
655003b705cfSriastradh	assert(bo->proxy == NULL);
65519a906b70Schristos	assert_tiling(kgem, bo);
655203b705cfSriastradh
655303b705cfSriastradh	kgem_bo_submit(kgem, bo);
655403b705cfSriastradh
655503b705cfSriastradh	if (bo->domain != DOMAIN_GTT || FORCE_MMAP_SYNC & (1 << DOMAIN_GTT)) {
655603b705cfSriastradh		struct drm_i915_gem_set_domain set_domain;
655703b705cfSriastradh
655803b705cfSriastradh		DBG(("%s: SYNC: handle=%d, needs_flush? %d, domain? %d, busy? %d\n",
655903b705cfSriastradh		     __FUNCTION__, bo->handle,
656003b705cfSriastradh		     bo->needs_flush, bo->domain,
656103b705cfSriastradh		     __kgem_busy(kgem, bo->handle)));
656203b705cfSriastradh
656303b705cfSriastradh		VG_CLEAR(set_domain);
656403b705cfSriastradh		set_domain.handle = bo->handle;
656503b705cfSriastradh		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
656603b705cfSriastradh		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
656703b705cfSriastradh
65689a906b70Schristos		if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) {
65699a906b70Schristos			DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
65709a906b70Schristos			kgem_throttle(kgem);
657103b705cfSriastradh		}
65729a906b70Schristos		kgem_bo_retire(kgem, bo);
65739a906b70Schristos		bo->domain = DOMAIN_GTT;
65749a906b70Schristos		bo->gtt_dirty = true;
657503b705cfSriastradh	}
657603b705cfSriastradh}
657703b705cfSriastradh
657803b705cfSriastradhvoid kgem_clear_dirty(struct kgem *kgem)
657903b705cfSriastradh{
658003b705cfSriastradh	struct list * const buffers = &kgem->next_request->buffers;
658103b705cfSriastradh	struct kgem_bo *bo;
658203b705cfSriastradh
658303b705cfSriastradh	list_for_each_entry(bo, buffers, request) {
658403b705cfSriastradh		if (!bo->gpu_dirty)
658503b705cfSriastradh			break;
658603b705cfSriastradh
658703b705cfSriastradh		bo->gpu_dirty = false;
658803b705cfSriastradh	}
658903b705cfSriastradh}
659003b705cfSriastradh
659103b705cfSriastradhstruct kgem_bo *kgem_create_proxy(struct kgem *kgem,
659203b705cfSriastradh				  struct kgem_bo *target,
659303b705cfSriastradh				  int offset, int length)
659403b705cfSriastradh{
659503b705cfSriastradh	struct kgem_bo *bo;
659603b705cfSriastradh
659703b705cfSriastradh	DBG(("%s: target handle=%d [proxy? %d], offset=%d, length=%d, io=%d\n",
659803b705cfSriastradh	     __FUNCTION__, target->handle, target->proxy ? target->proxy->delta : -1,
659903b705cfSriastradh	     offset, length, target->io));
660003b705cfSriastradh
660103b705cfSriastradh	bo = __kgem_bo_alloc(target->handle, length);
660203b705cfSriastradh	if (bo == NULL)
660303b705cfSriastradh		return NULL;
660403b705cfSriastradh
660503b705cfSriastradh	bo->unique_id = kgem_get_unique_id(kgem);
660603b705cfSriastradh	bo->reusable = false;
660703b705cfSriastradh	bo->size.bytes = length;
660803b705cfSriastradh
660903b705cfSriastradh	bo->io = target->io && target->proxy == NULL;
661003b705cfSriastradh	bo->gpu_dirty = target->gpu_dirty;
661103b705cfSriastradh	bo->tiling = target->tiling;
661203b705cfSriastradh	bo->pitch = target->pitch;
661303b705cfSriastradh	bo->flush = target->flush;
661403b705cfSriastradh	bo->snoop = target->snoop;
661503b705cfSriastradh
661603b705cfSriastradh	assert(!bo->scanout);
661703b705cfSriastradh	bo->proxy = kgem_bo_reference(target);
661803b705cfSriastradh	bo->delta = offset;
661903b705cfSriastradh
66209a906b70Schristos	/* Proxies are only tracked for busyness on the current rq */
66219a906b70Schristos	if (target->exec && !bo->io) {
66229a906b70Schristos		assert(RQ(target->rq) == kgem->next_request);
662303b705cfSriastradh		list_move_tail(&bo->request, &kgem->next_request->buffers);
662403b705cfSriastradh		bo->exec = &_kgem_dummy_exec;
66259a906b70Schristos		bo->rq = target->rq;
662603b705cfSriastradh	}
662703b705cfSriastradh
662803b705cfSriastradh	return bo;
662903b705cfSriastradh}
663003b705cfSriastradh
663103b705cfSriastradhstatic struct kgem_buffer *
663203b705cfSriastradhbuffer_alloc(void)
663303b705cfSriastradh{
663403b705cfSriastradh	struct kgem_buffer *bo;
663503b705cfSriastradh
663603b705cfSriastradh	bo = malloc(sizeof(*bo));
663703b705cfSriastradh	if (bo == NULL)
663803b705cfSriastradh		return NULL;
663903b705cfSriastradh
664003b705cfSriastradh	bo->mem = NULL;
664103b705cfSriastradh	bo->need_io = false;
66429a906b70Schristos	bo->mmapped = MMAPPED_CPU;
664303b705cfSriastradh
664403b705cfSriastradh	return bo;
664503b705cfSriastradh}
664603b705cfSriastradh
664703b705cfSriastradhstatic struct kgem_buffer *
664803b705cfSriastradhbuffer_alloc_with_data(int num_pages)
664903b705cfSriastradh{
665003b705cfSriastradh	struct kgem_buffer *bo;
665103b705cfSriastradh
665203b705cfSriastradh	bo = malloc(sizeof(*bo) + 2*UPLOAD_ALIGNMENT + num_pages * PAGE_SIZE);
665303b705cfSriastradh	if (bo == NULL)
665403b705cfSriastradh		return NULL;
665503b705cfSriastradh
665603b705cfSriastradh	bo->mem = (void *)ALIGN((uintptr_t)bo + sizeof(*bo), UPLOAD_ALIGNMENT);
665703b705cfSriastradh	bo->mmapped = false;
665803b705cfSriastradh	return bo;
665903b705cfSriastradh}
666003b705cfSriastradh
666103b705cfSriastradhstatic inline bool
666203b705cfSriastradhuse_snoopable_buffer(struct kgem *kgem, uint32_t flags)
666303b705cfSriastradh{
666403b705cfSriastradh	if ((flags & KGEM_BUFFER_WRITE) == 0)
666503b705cfSriastradh		return kgem->gen >= 030;
666603b705cfSriastradh
666703b705cfSriastradh	return true;
666803b705cfSriastradh}
666903b705cfSriastradh
667003b705cfSriastradhstatic void
667103b705cfSriastradhinit_buffer_from_bo(struct kgem_buffer *bo, struct kgem_bo *old)
667203b705cfSriastradh{
667303b705cfSriastradh	DBG(("%s: reusing handle=%d for buffer\n",
667403b705cfSriastradh	     __FUNCTION__, old->handle));
667503b705cfSriastradh
667603b705cfSriastradh	assert(old->proxy == NULL);
6677813957e3Ssnj	assert(list_is_empty(&old->list));
667803b705cfSriastradh
667903b705cfSriastradh	memcpy(&bo->base, old, sizeof(*old));
668003b705cfSriastradh	if (old->rq)
668103b705cfSriastradh		list_replace(&old->request, &bo->base.request);
668203b705cfSriastradh	else
668303b705cfSriastradh		list_init(&bo->base.request);
668403b705cfSriastradh	list_replace(&old->vma, &bo->base.vma);
668503b705cfSriastradh	list_init(&bo->base.list);
668603b705cfSriastradh	free(old);
668703b705cfSriastradh
668803b705cfSriastradh	assert(bo->base.tiling == I915_TILING_NONE);
668903b705cfSriastradh
669003b705cfSriastradh	bo->base.refcnt = 1;
669103b705cfSriastradh}
669203b705cfSriastradh
669303b705cfSriastradhstatic struct kgem_buffer *
669403b705cfSriastradhsearch_snoopable_buffer(struct kgem *kgem, unsigned alloc)
669503b705cfSriastradh{
669603b705cfSriastradh	struct kgem_buffer *bo;
669703b705cfSriastradh	struct kgem_bo *old;
669803b705cfSriastradh
669903b705cfSriastradh	old = search_snoop_cache(kgem, alloc, 0);
670003b705cfSriastradh	if (old) {
670103b705cfSriastradh		if (!old->io) {
670203b705cfSriastradh			bo = buffer_alloc();
670303b705cfSriastradh			if (bo == NULL)
670403b705cfSriastradh				return NULL;
670503b705cfSriastradh
670603b705cfSriastradh			init_buffer_from_bo(bo, old);
670703b705cfSriastradh		} else {
670803b705cfSriastradh			bo = (struct kgem_buffer *)old;
670903b705cfSriastradh			bo->base.refcnt = 1;
671003b705cfSriastradh		}
671103b705cfSriastradh
671203b705cfSriastradh		DBG(("%s: created CPU handle=%d for buffer, size %d\n",
671303b705cfSriastradh		     __FUNCTION__, bo->base.handle, num_pages(&bo->base)));
671403b705cfSriastradh
671503b705cfSriastradh		assert(bo->base.snoop);
671603b705cfSriastradh		assert(bo->base.tiling == I915_TILING_NONE);
671703b705cfSriastradh		assert(num_pages(&bo->base) >= alloc);
67189a906b70Schristos		assert(bo->mmapped == MMAPPED_CPU);
671903b705cfSriastradh		assert(bo->need_io == false);
672003b705cfSriastradh
672103b705cfSriastradh		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
672203b705cfSriastradh		if (bo->mem == NULL) {
672303b705cfSriastradh			bo->base.refcnt = 0;
672403b705cfSriastradh			kgem_bo_free(kgem, &bo->base);
672503b705cfSriastradh			bo = NULL;
672603b705cfSriastradh		}
672703b705cfSriastradh
672803b705cfSriastradh		return bo;
672903b705cfSriastradh	}
673003b705cfSriastradh
673103b705cfSriastradh	return NULL;
673203b705cfSriastradh}
673303b705cfSriastradh
673403b705cfSriastradhstatic struct kgem_buffer *
673503b705cfSriastradhcreate_snoopable_buffer(struct kgem *kgem, unsigned alloc)
673603b705cfSriastradh{
673703b705cfSriastradh	struct kgem_buffer *bo;
673803b705cfSriastradh	uint32_t handle;
673903b705cfSriastradh
674003b705cfSriastradh	if (kgem->has_llc) {
674103b705cfSriastradh		struct kgem_bo *old;
674203b705cfSriastradh
674303b705cfSriastradh		bo = buffer_alloc();
674403b705cfSriastradh		if (bo == NULL)
674503b705cfSriastradh			return NULL;
674603b705cfSriastradh
674703b705cfSriastradh		old = search_linear_cache(kgem, alloc,
674803b705cfSriastradh					 CREATE_INACTIVE | CREATE_CPU_MAP | CREATE_EXACT);
674903b705cfSriastradh		if (old) {
675003b705cfSriastradh			init_buffer_from_bo(bo, old);
675103b705cfSriastradh		} else {
675203b705cfSriastradh			handle = gem_create(kgem->fd, alloc);
675303b705cfSriastradh			if (handle == 0) {
675403b705cfSriastradh				free(bo);
675503b705cfSriastradh				return NULL;
675603b705cfSriastradh			}
675703b705cfSriastradh
675803b705cfSriastradh			__kgem_bo_init(&bo->base, handle, alloc);
67599a906b70Schristos			debug_alloc__bo(kgem, &bo->base);
676003b705cfSriastradh			DBG(("%s: created CPU (LLC) handle=%d for buffer, size %d\n",
676103b705cfSriastradh			     __FUNCTION__, bo->base.handle, alloc));
676203b705cfSriastradh		}
676303b705cfSriastradh
676403b705cfSriastradh		assert(bo->base.refcnt == 1);
67659a906b70Schristos		assert(bo->mmapped == MMAPPED_CPU);
676603b705cfSriastradh		assert(bo->need_io == false);
676703b705cfSriastradh
676803b705cfSriastradh		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
676903b705cfSriastradh		if (bo->mem != NULL)
677003b705cfSriastradh			return bo;
677103b705cfSriastradh
677203b705cfSriastradh		bo->base.refcnt = 0; /* for valgrind */
677303b705cfSriastradh		kgem_bo_free(kgem, &bo->base);
677403b705cfSriastradh	}
677503b705cfSriastradh
677603b705cfSriastradh	if (kgem->has_caching) {
677703b705cfSriastradh		struct kgem_bo *old;
677803b705cfSriastradh
677903b705cfSriastradh		bo = buffer_alloc();
678003b705cfSriastradh		if (bo == NULL)
678103b705cfSriastradh			return NULL;
678203b705cfSriastradh
678303b705cfSriastradh		old = search_linear_cache(kgem, alloc,
678403b705cfSriastradh					 CREATE_INACTIVE | CREATE_CPU_MAP | CREATE_EXACT);
678503b705cfSriastradh		if (old) {
678603b705cfSriastradh			init_buffer_from_bo(bo, old);
678703b705cfSriastradh		} else {
678803b705cfSriastradh			handle = gem_create(kgem->fd, alloc);
678903b705cfSriastradh			if (handle == 0) {
679003b705cfSriastradh				free(bo);
679103b705cfSriastradh				return NULL;
679203b705cfSriastradh			}
679303b705cfSriastradh
679403b705cfSriastradh			__kgem_bo_init(&bo->base, handle, alloc);
67959a906b70Schristos			debug_alloc__bo(kgem, &bo->base);
679603b705cfSriastradh			DBG(("%s: created CPU handle=%d for buffer, size %d\n",
679703b705cfSriastradh			     __FUNCTION__, bo->base.handle, alloc));
679803b705cfSriastradh		}
679903b705cfSriastradh
680003b705cfSriastradh		assert(bo->base.refcnt == 1);
68019a906b70Schristos		assert(bo->mmapped == MMAPPED_CPU);
680203b705cfSriastradh		assert(bo->need_io == false);
6803813957e3Ssnj		assert(!__kgem_busy(kgem, bo->base.handle));
680403b705cfSriastradh
680503b705cfSriastradh		if (!gem_set_caching(kgem->fd, bo->base.handle, SNOOPED))
680603b705cfSriastradh			goto free_caching;
680703b705cfSriastradh
680803b705cfSriastradh		bo->base.snoop = true;
680903b705cfSriastradh
681003b705cfSriastradh		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
681103b705cfSriastradh		if (bo->mem == NULL)
681203b705cfSriastradh			goto free_caching;
681303b705cfSriastradh
681403b705cfSriastradh		return bo;
681503b705cfSriastradh
681603b705cfSriastradhfree_caching:
681703b705cfSriastradh		bo->base.refcnt = 0; /* for valgrind */
681803b705cfSriastradh		kgem_bo_free(kgem, &bo->base);
681903b705cfSriastradh	}
682003b705cfSriastradh
682103b705cfSriastradh	if (kgem->has_userptr) {
682203b705cfSriastradh		bo = buffer_alloc();
682303b705cfSriastradh		if (bo == NULL)
682403b705cfSriastradh			return NULL;
682503b705cfSriastradh
682603b705cfSriastradh		//if (posix_memalign(&ptr, 64, ALIGN(size, 64)))
682703b705cfSriastradh		if (posix_memalign(&bo->mem, PAGE_SIZE, alloc * PAGE_SIZE)) {
682803b705cfSriastradh			free(bo);
682903b705cfSriastradh			return NULL;
683003b705cfSriastradh		}
683103b705cfSriastradh
683203b705cfSriastradh		handle = gem_userptr(kgem->fd, bo->mem, alloc * PAGE_SIZE, false);
683303b705cfSriastradh		if (handle == 0) {
683403b705cfSriastradh			free(bo->mem);
683503b705cfSriastradh			free(bo);
683603b705cfSriastradh			return NULL;
683703b705cfSriastradh		}
683803b705cfSriastradh
683903b705cfSriastradh		__kgem_bo_init(&bo->base, handle, alloc);
68409a906b70Schristos		debug_alloc__bo(kgem, &bo->base);
684103b705cfSriastradh		DBG(("%s: created snoop handle=%d for buffer\n",
684203b705cfSriastradh		     __FUNCTION__, bo->base.handle));
684303b705cfSriastradh
68449a906b70Schristos		assert(bo->mmapped == MMAPPED_CPU);
684503b705cfSriastradh		assert(bo->need_io == false);
684603b705cfSriastradh
684703b705cfSriastradh		bo->base.refcnt = 1;
684803b705cfSriastradh		bo->base.snoop = true;
68499a906b70Schristos		bo->base.map__cpu = MAKE_USER_MAP(bo->mem);
685003b705cfSriastradh
685103b705cfSriastradh		return bo;
685203b705cfSriastradh	}
685303b705cfSriastradh
685403b705cfSriastradh	return NULL;
685503b705cfSriastradh}
685603b705cfSriastradh
685703b705cfSriastradhstruct kgem_bo *kgem_create_buffer(struct kgem *kgem,
685803b705cfSriastradh				   uint32_t size, uint32_t flags,
685903b705cfSriastradh				   void **ret)
686003b705cfSriastradh{
686103b705cfSriastradh	struct kgem_buffer *bo;
686203b705cfSriastradh	unsigned offset, alloc;
686303b705cfSriastradh	struct kgem_bo *old;
686403b705cfSriastradh
686503b705cfSriastradh	DBG(("%s: size=%d, flags=%x [write?=%d, inplace?=%d, last?=%d]\n",
686603b705cfSriastradh	     __FUNCTION__, size, flags,
686703b705cfSriastradh	     !!(flags & KGEM_BUFFER_WRITE),
686803b705cfSriastradh	     !!(flags & KGEM_BUFFER_INPLACE),
686903b705cfSriastradh	     !!(flags & KGEM_BUFFER_LAST)));
687003b705cfSriastradh	assert(size);
687103b705cfSriastradh	/* we should never be asked to create anything TOO large */
687203b705cfSriastradh	assert(size <= kgem->max_object_size);
687303b705cfSriastradh
687403b705cfSriastradh#if !DBG_NO_UPLOAD_CACHE
687503b705cfSriastradh	list_for_each_entry(bo, &kgem->batch_buffers, base.list) {
687603b705cfSriastradh		assert(bo->base.io);
687703b705cfSriastradh		assert(bo->base.refcnt >= 1);
687803b705cfSriastradh
687903b705cfSriastradh		/* We can reuse any write buffer which we can fit */
688003b705cfSriastradh		if (flags == KGEM_BUFFER_LAST &&
688103b705cfSriastradh		    bo->write == KGEM_BUFFER_WRITE &&
68829a906b70Schristos		    bo->base.refcnt == 1 &&
68839a906b70Schristos		    bo->mmapped == MMAPPED_NONE &&
688403b705cfSriastradh		    size <= bytes(&bo->base)) {
688503b705cfSriastradh			DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n",
688603b705cfSriastradh			     __FUNCTION__, size, bo->used, bytes(&bo->base)));
68879a906b70Schristos			gem_write__cachealigned(kgem->fd, bo->base.handle,
68889a906b70Schristos						0, bo->used, bo->mem);
68899a906b70Schristos			assert(list_is_empty(&bo->base.vma));
689003b705cfSriastradh			bo->need_io = 0;
689103b705cfSriastradh			bo->write = 0;
689203b705cfSriastradh			offset = 0;
689303b705cfSriastradh			bo->used = size;
689403b705cfSriastradh			goto done;
689503b705cfSriastradh		}
689603b705cfSriastradh
689703b705cfSriastradh		if (flags & KGEM_BUFFER_WRITE) {
689803b705cfSriastradh			if ((bo->write & KGEM_BUFFER_WRITE) == 0 ||
689903b705cfSriastradh			    (((bo->write & ~flags) & KGEM_BUFFER_INPLACE) &&
690003b705cfSriastradh			     !bo->base.snoop)) {
690103b705cfSriastradh				DBG(("%s: skip write %x buffer, need %x\n",
690203b705cfSriastradh				     __FUNCTION__, bo->write, flags));
690303b705cfSriastradh				continue;
690403b705cfSriastradh			}
690503b705cfSriastradh			assert(bo->mmapped || bo->need_io);
690603b705cfSriastradh		} else {
690703b705cfSriastradh			if (bo->write & KGEM_BUFFER_WRITE) {
690803b705cfSriastradh				DBG(("%s: skip write %x buffer, need %x\n",
690903b705cfSriastradh				     __FUNCTION__, bo->write, flags));
691003b705cfSriastradh				continue;
691103b705cfSriastradh			}
691203b705cfSriastradh		}
691303b705cfSriastradh
691403b705cfSriastradh		if (bo->used + size <= bytes(&bo->base)) {
691503b705cfSriastradh			DBG(("%s: reusing buffer? used=%d + size=%d, total=%d\n",
691603b705cfSriastradh			     __FUNCTION__, bo->used, size, bytes(&bo->base)));
691703b705cfSriastradh			offset = bo->used;
691803b705cfSriastradh			bo->used += size;
691903b705cfSriastradh			goto done;
692003b705cfSriastradh		}
692103b705cfSriastradh	}
692203b705cfSriastradh
692303b705cfSriastradh	if (flags & KGEM_BUFFER_WRITE) {
692403b705cfSriastradh		list_for_each_entry(bo, &kgem->active_buffers, base.list) {
692503b705cfSriastradh			assert(bo->base.io);
692603b705cfSriastradh			assert(bo->base.refcnt >= 1);
69279a906b70Schristos			assert(bo->base.exec == NULL);
692803b705cfSriastradh			assert(bo->mmapped);
69299a906b70Schristos			assert(bo->mmapped == MMAPPED_GTT || kgem->has_llc || bo->base.snoop);
693003b705cfSriastradh
69319a906b70Schristos			if ((bo->write & ~flags) & KGEM_BUFFER_INPLACE && !bo->base.snoop) {
693203b705cfSriastradh				DBG(("%s: skip write %x buffer, need %x\n",
693303b705cfSriastradh				     __FUNCTION__, bo->write, flags));
693403b705cfSriastradh				continue;
693503b705cfSriastradh			}
693603b705cfSriastradh
693703b705cfSriastradh			if (bo->used + size <= bytes(&bo->base)) {
693803b705cfSriastradh				DBG(("%s: reusing buffer? used=%d + size=%d, total=%d\n",
693903b705cfSriastradh				     __FUNCTION__, bo->used, size, bytes(&bo->base)));
694003b705cfSriastradh				offset = bo->used;
694103b705cfSriastradh				bo->used += size;
694203b705cfSriastradh				list_move(&bo->base.list, &kgem->batch_buffers);
694303b705cfSriastradh				goto done;
694403b705cfSriastradh			}
69459a906b70Schristos
69469a906b70Schristos			if (bo->base.refcnt == 1 &&
69479a906b70Schristos			    size <= bytes(&bo->base) &&
69489a906b70Schristos			    (bo->base.rq == NULL ||
69499a906b70Schristos			     !__kgem_busy(kgem, bo->base.handle))) {
69509a906b70Schristos				DBG(("%s: reusing whole buffer? size=%d, total=%d\n",
69519a906b70Schristos				     __FUNCTION__, size, bytes(&bo->base)));
69529a906b70Schristos				__kgem_bo_clear_busy(&bo->base);
69539a906b70Schristos				assert(list_is_empty(&bo->base.vma));
69549a906b70Schristos
69559a906b70Schristos				switch (bo->mmapped) {
69569a906b70Schristos				case MMAPPED_CPU:
69579a906b70Schristos					kgem_bo_sync__cpu(kgem, &bo->base);
69589a906b70Schristos					break;
69599a906b70Schristos				case MMAPPED_GTT:
69609a906b70Schristos					kgem_bo_sync__gtt(kgem, &bo->base);
69619a906b70Schristos					break;
69629a906b70Schristos				}
69639a906b70Schristos
69649a906b70Schristos				offset = 0;
69659a906b70Schristos				bo->used = size;
69669a906b70Schristos				list_move(&bo->base.list, &kgem->batch_buffers);
69679a906b70Schristos				goto done;
69689a906b70Schristos			}
696903b705cfSriastradh		}
697003b705cfSriastradh	}
697103b705cfSriastradh#endif
697203b705cfSriastradh
697303b705cfSriastradh#if !DBG_NO_MAP_UPLOAD
697403b705cfSriastradh	/* Be a little more generous and hope to hold fewer mmappings */
697503b705cfSriastradh	alloc = ALIGN(2*size, kgem->buffer_size);
697603b705cfSriastradh	if (alloc > MAX_CACHE_SIZE)
697703b705cfSriastradh		alloc = ALIGN(size, kgem->buffer_size);
697803b705cfSriastradh	if (alloc > MAX_CACHE_SIZE)
697903b705cfSriastradh		alloc = PAGE_ALIGN(size);
698003b705cfSriastradh	assert(alloc);
698103b705cfSriastradh
69829a906b70Schristos	alloc /= PAGE_SIZE;
6983813957e3Ssnj	if (alloc > kgem->aperture_mappable / 4 && !kgem->has_wc_mmap)
698403b705cfSriastradh		flags &= ~KGEM_BUFFER_INPLACE;
698503b705cfSriastradh
698603b705cfSriastradh	if (kgem->has_llc &&
698703b705cfSriastradh	    (flags & KGEM_BUFFER_WRITE_INPLACE) != KGEM_BUFFER_WRITE_INPLACE) {
698803b705cfSriastradh		bo = buffer_alloc();
698903b705cfSriastradh		if (bo == NULL)
699003b705cfSriastradh			goto skip_llc;
699103b705cfSriastradh
699203b705cfSriastradh		old = NULL;
699303b705cfSriastradh		if ((flags & KGEM_BUFFER_WRITE) == 0)
699403b705cfSriastradh			old = search_linear_cache(kgem, alloc, CREATE_CPU_MAP);
699503b705cfSriastradh		if (old == NULL)
699603b705cfSriastradh			old = search_linear_cache(kgem, alloc, CREATE_INACTIVE | CREATE_CPU_MAP);
699703b705cfSriastradh		if (old == NULL)
699803b705cfSriastradh			old = search_linear_cache(kgem, NUM_PAGES(size), CREATE_INACTIVE | CREATE_CPU_MAP);
699903b705cfSriastradh		if (old) {
700003b705cfSriastradh			DBG(("%s: found LLC handle=%d for buffer\n",
700103b705cfSriastradh			     __FUNCTION__, old->handle));
700203b705cfSriastradh
700303b705cfSriastradh			init_buffer_from_bo(bo, old);
700403b705cfSriastradh		} else {
700503b705cfSriastradh			uint32_t handle = gem_create(kgem->fd, alloc);
700603b705cfSriastradh			if (handle == 0) {
700703b705cfSriastradh				free(bo);
700803b705cfSriastradh				goto skip_llc;
700903b705cfSriastradh			}
701003b705cfSriastradh			__kgem_bo_init(&bo->base, handle, alloc);
70119a906b70Schristos			debug_alloc__bo(kgem, &bo->base);
701203b705cfSriastradh			DBG(("%s: created LLC handle=%d for buffer\n",
701303b705cfSriastradh			     __FUNCTION__, bo->base.handle));
701403b705cfSriastradh		}
701503b705cfSriastradh
701603b705cfSriastradh		assert(bo->mmapped);
701703b705cfSriastradh		assert(!bo->need_io);
701803b705cfSriastradh
701903b705cfSriastradh		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
702003b705cfSriastradh		if (bo->mem) {
702103b705cfSriastradh			if (flags & KGEM_BUFFER_WRITE)
702203b705cfSriastradh				kgem_bo_sync__cpu(kgem, &bo->base);
702303b705cfSriastradh			flags &= ~KGEM_BUFFER_INPLACE;
702403b705cfSriastradh			goto init;
702503b705cfSriastradh		} else {
702603b705cfSriastradh			bo->base.refcnt = 0; /* for valgrind */
702703b705cfSriastradh			kgem_bo_free(kgem, &bo->base);
702803b705cfSriastradh		}
702903b705cfSriastradh	}
703003b705cfSriastradhskip_llc:
703103b705cfSriastradh
703203b705cfSriastradh	if ((flags & KGEM_BUFFER_WRITE_INPLACE) == KGEM_BUFFER_WRITE_INPLACE) {
703303b705cfSriastradh		/* The issue with using a GTT upload buffer is that we may
703403b705cfSriastradh		 * cause eviction-stalls in order to free up some GTT space.
703503b705cfSriastradh		 * An is-mappable? ioctl could help us detect when we are
703603b705cfSriastradh		 * about to block, or some per-page magic in the kernel.
703703b705cfSriastradh		 *
703803b705cfSriastradh		 * XXX This is especially noticeable on memory constrained
703903b705cfSriastradh		 * devices like gen2 or with relatively slow gpu like i3.
704003b705cfSriastradh		 */
704103b705cfSriastradh		DBG(("%s: searching for an inactive GTT map for upload\n",
704203b705cfSriastradh		     __FUNCTION__));
704303b705cfSriastradh		old = search_linear_cache(kgem, alloc,
704403b705cfSriastradh					  CREATE_EXACT | CREATE_INACTIVE | CREATE_GTT_MAP);
704503b705cfSriastradh#if HAVE_I915_GEM_BUFFER_INFO
704603b705cfSriastradh		if (old) {
704703b705cfSriastradh			struct drm_i915_gem_buffer_info info;
704803b705cfSriastradh
704903b705cfSriastradh			/* An example of such a non-blocking ioctl might work */
705003b705cfSriastradh
705103b705cfSriastradh			VG_CLEAR(info);
705203b705cfSriastradh			info.handle = handle;
70539a906b70Schristos			if (do_ioctl(kgem->fd,
705403b705cfSriastradh				     DRM_IOCTL_I915_GEM_BUFFER_INFO,
705503b705cfSriastradh				     &fino) == 0) {
705603b705cfSriastradh				old->presumed_offset = info.addr;
705703b705cfSriastradh				if ((info.flags & I915_GEM_MAPPABLE) == 0) {
705803b705cfSriastradh					kgem_bo_move_to_inactive(kgem, old);
705903b705cfSriastradh					old = NULL;
706003b705cfSriastradh				}
706103b705cfSriastradh			}
706203b705cfSriastradh		}
706303b705cfSriastradh#endif
706403b705cfSriastradh		if (old == NULL)
706503b705cfSriastradh			old = search_linear_cache(kgem, NUM_PAGES(size),
706603b705cfSriastradh						  CREATE_EXACT | CREATE_INACTIVE | CREATE_GTT_MAP);
706703b705cfSriastradh		if (old == NULL) {
706803b705cfSriastradh			old = search_linear_cache(kgem, alloc, CREATE_INACTIVE);
70699a906b70Schristos			if (old && !kgem_bo_can_map(kgem, old)) {
707003b705cfSriastradh				_kgem_bo_destroy(kgem, old);
707103b705cfSriastradh				old = NULL;
707203b705cfSriastradh			}
707303b705cfSriastradh		}
707403b705cfSriastradh		if (old) {
707503b705cfSriastradh			DBG(("%s: reusing handle=%d for buffer\n",
707603b705cfSriastradh			     __FUNCTION__, old->handle));
70779a906b70Schristos			assert(kgem_bo_can_map(kgem, old));
707803b705cfSriastradh			assert(!old->snoop);
707903b705cfSriastradh			assert(old->rq == NULL);
708003b705cfSriastradh
708103b705cfSriastradh			bo = buffer_alloc();
708203b705cfSriastradh			if (bo == NULL)
708303b705cfSriastradh				return NULL;
708403b705cfSriastradh
708503b705cfSriastradh			init_buffer_from_bo(bo, old);
708603b705cfSriastradh			assert(num_pages(&bo->base) >= NUM_PAGES(size));
708703b705cfSriastradh
708803b705cfSriastradh			assert(bo->mmapped);
708903b705cfSriastradh			assert(bo->base.refcnt == 1);
709003b705cfSriastradh
709103b705cfSriastradh			bo->mem = kgem_bo_map(kgem, &bo->base);
709203b705cfSriastradh			if (bo->mem) {
70939a906b70Schristos				if (bo->mem == MAP(bo->base.map__cpu))
709403b705cfSriastradh					flags &= ~KGEM_BUFFER_INPLACE;
70959a906b70Schristos				else
70969a906b70Schristos					bo->mmapped = MMAPPED_GTT;
709703b705cfSriastradh				goto init;
709803b705cfSriastradh			} else {
709903b705cfSriastradh				bo->base.refcnt = 0;
710003b705cfSriastradh				kgem_bo_free(kgem, &bo->base);
710103b705cfSriastradh			}
710203b705cfSriastradh		}
710303b705cfSriastradh	}
710403b705cfSriastradh#else
710503b705cfSriastradh	flags &= ~KGEM_BUFFER_INPLACE;
710603b705cfSriastradh#endif
710703b705cfSriastradh	/* Be more parsimonious with pwrite/pread/cacheable buffers */
710803b705cfSriastradh	if ((flags & KGEM_BUFFER_INPLACE) == 0)
710903b705cfSriastradh		alloc = NUM_PAGES(size);
711003b705cfSriastradh
711103b705cfSriastradh	if (use_snoopable_buffer(kgem, flags)) {
711203b705cfSriastradh		bo = search_snoopable_buffer(kgem, alloc);
711303b705cfSriastradh		if (bo) {
711403b705cfSriastradh			if (flags & KGEM_BUFFER_WRITE)
711503b705cfSriastradh				kgem_bo_sync__cpu(kgem, &bo->base);
711603b705cfSriastradh			flags &= ~KGEM_BUFFER_INPLACE;
711703b705cfSriastradh			goto init;
711803b705cfSriastradh		}
711903b705cfSriastradh
712003b705cfSriastradh		if ((flags & KGEM_BUFFER_INPLACE) == 0) {
712103b705cfSriastradh			bo = create_snoopable_buffer(kgem, alloc);
712203b705cfSriastradh			if (bo)
712303b705cfSriastradh				goto init;
712403b705cfSriastradh		}
712503b705cfSriastradh	}
712603b705cfSriastradh
712703b705cfSriastradh	flags &= ~KGEM_BUFFER_INPLACE;
712803b705cfSriastradh
712903b705cfSriastradh	old = NULL;
713003b705cfSriastradh	if ((flags & KGEM_BUFFER_WRITE) == 0)
713103b705cfSriastradh		old = search_linear_cache(kgem, alloc, 0);
713203b705cfSriastradh	if (old == NULL)
713303b705cfSriastradh		old = search_linear_cache(kgem, alloc, CREATE_INACTIVE);
713403b705cfSriastradh	if (old) {
713503b705cfSriastradh		DBG(("%s: reusing ordinary handle %d for io\n",
713603b705cfSriastradh		     __FUNCTION__, old->handle));
713703b705cfSriastradh		bo = buffer_alloc_with_data(num_pages(old));
713803b705cfSriastradh		if (bo == NULL)
713903b705cfSriastradh			return NULL;
714003b705cfSriastradh
714103b705cfSriastradh		init_buffer_from_bo(bo, old);
714203b705cfSriastradh		bo->need_io = flags & KGEM_BUFFER_WRITE;
714303b705cfSriastradh	} else {
714403b705cfSriastradh		unsigned hint;
714503b705cfSriastradh
714603b705cfSriastradh		if (use_snoopable_buffer(kgem, flags)) {
714703b705cfSriastradh			bo = create_snoopable_buffer(kgem, alloc);
714803b705cfSriastradh			if (bo)
714903b705cfSriastradh				goto init;
715003b705cfSriastradh		}
715103b705cfSriastradh
715203b705cfSriastradh		bo = buffer_alloc();
715303b705cfSriastradh		if (bo == NULL)
715403b705cfSriastradh			return NULL;
715503b705cfSriastradh
715603b705cfSriastradh		hint = CREATE_INACTIVE;
715703b705cfSriastradh		if (flags & KGEM_BUFFER_WRITE)
715803b705cfSriastradh			hint |= CREATE_CPU_MAP;
715903b705cfSriastradh		old = search_linear_cache(kgem, alloc, hint);
716003b705cfSriastradh		if (old) {
716103b705cfSriastradh			DBG(("%s: reusing handle=%d for buffer\n",
716203b705cfSriastradh			     __FUNCTION__, old->handle));
716303b705cfSriastradh
716403b705cfSriastradh			init_buffer_from_bo(bo, old);
716503b705cfSriastradh		} else {
716603b705cfSriastradh			uint32_t handle = gem_create(kgem->fd, alloc);
716703b705cfSriastradh			if (handle == 0) {
716803b705cfSriastradh				free(bo);
716903b705cfSriastradh				return NULL;
717003b705cfSriastradh			}
717103b705cfSriastradh
717203b705cfSriastradh			DBG(("%s: created handle=%d for buffer\n",
717303b705cfSriastradh			     __FUNCTION__, handle));
717403b705cfSriastradh
717503b705cfSriastradh			__kgem_bo_init(&bo->base, handle, alloc);
71769a906b70Schristos			debug_alloc__bo(kgem, &bo->base);
717703b705cfSriastradh		}
717803b705cfSriastradh
717903b705cfSriastradh		assert(bo->mmapped);
718003b705cfSriastradh		assert(!bo->need_io);
718103b705cfSriastradh		assert(bo->base.refcnt == 1);
718203b705cfSriastradh
718303b705cfSriastradh		if (flags & KGEM_BUFFER_WRITE) {
718403b705cfSriastradh			bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
718503b705cfSriastradh			if (bo->mem != NULL) {
718603b705cfSriastradh				kgem_bo_sync__cpu(kgem, &bo->base);
718703b705cfSriastradh				goto init;
718803b705cfSriastradh			}
718903b705cfSriastradh		}
719003b705cfSriastradh
719103b705cfSriastradh		DBG(("%s: failing back to new pwrite buffer\n", __FUNCTION__));
719203b705cfSriastradh		old = &bo->base;
719303b705cfSriastradh		bo = buffer_alloc_with_data(num_pages(old));
719403b705cfSriastradh		if (bo == NULL) {
719503b705cfSriastradh			old->refcnt= 0;
719603b705cfSriastradh			kgem_bo_free(kgem, old);
719703b705cfSriastradh			return NULL;
719803b705cfSriastradh		}
719903b705cfSriastradh
720003b705cfSriastradh		init_buffer_from_bo(bo, old);
720103b705cfSriastradh
720203b705cfSriastradh		assert(bo->mem);
720303b705cfSriastradh		assert(!bo->mmapped);
720403b705cfSriastradh		assert(bo->base.refcnt == 1);
720503b705cfSriastradh
720603b705cfSriastradh		bo->need_io = flags & KGEM_BUFFER_WRITE;
720703b705cfSriastradh	}
720803b705cfSriastradhinit:
720903b705cfSriastradh	bo->base.io = true;
721003b705cfSriastradh	assert(bo->base.refcnt == 1);
721103b705cfSriastradh	assert(num_pages(&bo->base) >= NUM_PAGES(size));
721203b705cfSriastradh	assert(!bo->need_io || !bo->base.needs_flush);
721303b705cfSriastradh	assert(!bo->need_io || bo->base.domain != DOMAIN_GPU);
721403b705cfSriastradh	assert(bo->mem);
7215813957e3Ssnj	assert(bo->mmapped != MMAPPED_GTT || bo->base.map__gtt == bo->mem || bo->base.map__wc == bo->mem);
72169a906b70Schristos	assert(bo->mmapped != MMAPPED_CPU || MAP(bo->base.map__cpu) == bo->mem);
721703b705cfSriastradh
721803b705cfSriastradh	bo->used = size;
721903b705cfSriastradh	bo->write = flags & KGEM_BUFFER_WRITE_INPLACE;
722003b705cfSriastradh	offset = 0;
722103b705cfSriastradh
722203b705cfSriastradh	assert(list_is_empty(&bo->base.list));
722303b705cfSriastradh	list_add(&bo->base.list, &kgem->batch_buffers);
722403b705cfSriastradh
722503b705cfSriastradh	DBG(("%s(pages=%d [%d]) new handle=%d, used=%d, write=%d\n",
722603b705cfSriastradh	     __FUNCTION__, num_pages(&bo->base), alloc, bo->base.handle, bo->used, bo->write));
722703b705cfSriastradh
722803b705cfSriastradhdone:
722903b705cfSriastradh	bo->used = ALIGN(bo->used, UPLOAD_ALIGNMENT);
72309a906b70Schristos	assert(bo->used && bo->used <= bytes(&bo->base));
723103b705cfSriastradh	assert(bo->mem);
723203b705cfSriastradh	*ret = (char *)bo->mem + offset;
723303b705cfSriastradh	return kgem_create_proxy(kgem, &bo->base, offset, size);
723403b705cfSriastradh}
723503b705cfSriastradh
723603b705cfSriastradhbool kgem_buffer_is_inplace(struct kgem_bo *_bo)
723703b705cfSriastradh{
723803b705cfSriastradh	struct kgem_buffer *bo = (struct kgem_buffer *)_bo->proxy;
723903b705cfSriastradh	return bo->write & KGEM_BUFFER_WRITE_INPLACE;
724003b705cfSriastradh}
724103b705cfSriastradh
724203b705cfSriastradhstruct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
724303b705cfSriastradh				      int width, int height, int bpp,
724403b705cfSriastradh				      uint32_t flags,
724503b705cfSriastradh				      void **ret)
724603b705cfSriastradh{
724703b705cfSriastradh	struct kgem_bo *bo;
724803b705cfSriastradh	int stride;
724903b705cfSriastradh
725003b705cfSriastradh	assert(width > 0 && height > 0);
725103b705cfSriastradh	assert(ret != NULL);
725203b705cfSriastradh	stride = ALIGN(width, 2) * bpp >> 3;
7253813957e3Ssnj	stride = ALIGN(stride, kgem->gen >= 0100 ? 32 : 4);
725403b705cfSriastradh
725503b705cfSriastradh	DBG(("%s: %dx%d, %d bpp, stride=%d\n",
725603b705cfSriastradh	     __FUNCTION__, width, height, bpp, stride));
725703b705cfSriastradh
725803b705cfSriastradh	bo = kgem_create_buffer(kgem, stride * ALIGN(height, 2), flags, ret);
725903b705cfSriastradh	if (bo == NULL) {
726003b705cfSriastradh		DBG(("%s: allocation failure for upload buffer\n",
726103b705cfSriastradh		     __FUNCTION__));
726203b705cfSriastradh		return NULL;
726303b705cfSriastradh	}
726403b705cfSriastradh	assert(*ret != NULL);
726503b705cfSriastradh	assert(bo->proxy != NULL);
726603b705cfSriastradh
726703b705cfSriastradh	if (height & 1) {
726803b705cfSriastradh		struct kgem_buffer *io = (struct kgem_buffer *)bo->proxy;
726903b705cfSriastradh		int min;
727003b705cfSriastradh
727103b705cfSriastradh		assert(io->used);
727203b705cfSriastradh
727303b705cfSriastradh		/* Having padded this surface to ensure that accesses to
727403b705cfSriastradh		 * the last pair of rows is valid, remove the padding so
727503b705cfSriastradh		 * that it can be allocated to other pixmaps.
727603b705cfSriastradh		 */
727703b705cfSriastradh		min = bo->delta + height * stride;
727803b705cfSriastradh		min = ALIGN(min, UPLOAD_ALIGNMENT);
727903b705cfSriastradh		if (io->used != min) {
728003b705cfSriastradh			DBG(("%s: trimming buffer from %d to %d\n",
728103b705cfSriastradh			     __FUNCTION__, io->used, min));
728203b705cfSriastradh			io->used = min;
728303b705cfSriastradh		}
728403b705cfSriastradh		bo->size.bytes -= stride;
728503b705cfSriastradh	}
728603b705cfSriastradh
72879a906b70Schristos	bo->map__cpu = *ret;
728803b705cfSriastradh	bo->pitch = stride;
728903b705cfSriastradh	bo->unique_id = kgem_get_unique_id(kgem);
729003b705cfSriastradh	return bo;
729103b705cfSriastradh}
729203b705cfSriastradh
729303b705cfSriastradhstruct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
729403b705cfSriastradh					 const void *data,
729503b705cfSriastradh					 const BoxRec *box,
729603b705cfSriastradh					 int stride, int bpp)
729703b705cfSriastradh{
729803b705cfSriastradh	int width  = box->x2 - box->x1;
729903b705cfSriastradh	int height = box->y2 - box->y1;
730003b705cfSriastradh	struct kgem_bo *bo;
730103b705cfSriastradh	void *dst;
730203b705cfSriastradh
730303b705cfSriastradh	if (!kgem_can_create_2d(kgem, width, height, bpp))
730403b705cfSriastradh		return NULL;
730503b705cfSriastradh
730603b705cfSriastradh	DBG(("%s : (%d, %d), (%d, %d), stride=%d, bpp=%d\n",
730703b705cfSriastradh	     __FUNCTION__, box->x1, box->y1, box->x2, box->y2, stride, bpp));
730803b705cfSriastradh
730903b705cfSriastradh	assert(data);
731003b705cfSriastradh	assert(width > 0);
731103b705cfSriastradh	assert(height > 0);
731203b705cfSriastradh	assert(stride);
731303b705cfSriastradh	assert(bpp);
731403b705cfSriastradh
731503b705cfSriastradh	bo = kgem_create_buffer_2d(kgem,
731603b705cfSriastradh				   width, height, bpp,
731703b705cfSriastradh				   KGEM_BUFFER_WRITE_INPLACE, &dst);
73189a906b70Schristos	if (bo == NULL)
73199a906b70Schristos		return NULL;
73209a906b70Schristos
73219a906b70Schristos	if (sigtrap_get()) {
73229a906b70Schristos		kgem_bo_destroy(kgem, bo);
73239a906b70Schristos		return NULL;
73249a906b70Schristos	}
73259a906b70Schristos
73269a906b70Schristos	memcpy_blt(data, dst, bpp,
73279a906b70Schristos		   stride, bo->pitch,
73289a906b70Schristos		   box->x1, box->y1,
73299a906b70Schristos		   0, 0,
73309a906b70Schristos		   width, height);
733103b705cfSriastradh
73329a906b70Schristos	sigtrap_put();
733303b705cfSriastradh	return bo;
733403b705cfSriastradh}
733503b705cfSriastradh
733603b705cfSriastradhvoid kgem_proxy_bo_attach(struct kgem_bo *bo,
733703b705cfSriastradh			  struct kgem_bo **ptr)
733803b705cfSriastradh{
733903b705cfSriastradh	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
73409a906b70Schristos	assert(bo->map__gtt == NULL);
734103b705cfSriastradh	assert(bo->proxy);
734203b705cfSriastradh	list_add(&bo->vma, &bo->proxy->vma);
73439a906b70Schristos	bo->map__gtt = ptr;
734403b705cfSriastradh	*ptr = kgem_bo_reference(bo);
734503b705cfSriastradh}
734603b705cfSriastradh
734703b705cfSriastradhvoid kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
734803b705cfSriastradh{
734903b705cfSriastradh	struct kgem_buffer *bo;
735003b705cfSriastradh	uint32_t offset = _bo->delta, length = _bo->size.bytes;
735103b705cfSriastradh
735203b705cfSriastradh	/* We expect the caller to have already submitted the batch */
735303b705cfSriastradh	assert(_bo->io);
735403b705cfSriastradh	assert(_bo->exec == NULL);
735503b705cfSriastradh	assert(_bo->rq == NULL);
735603b705cfSriastradh	assert(_bo->proxy);
735703b705cfSriastradh
735803b705cfSriastradh	_bo = _bo->proxy;
735903b705cfSriastradh	assert(_bo->proxy == NULL);
736003b705cfSriastradh	assert(_bo->exec == NULL);
736103b705cfSriastradh
736203b705cfSriastradh	bo = (struct kgem_buffer *)_bo;
736303b705cfSriastradh
736403b705cfSriastradh	DBG(("%s(offset=%d, length=%d, snooped=%d)\n", __FUNCTION__,
736503b705cfSriastradh	     offset, length, bo->base.snoop));
736603b705cfSriastradh
736703b705cfSriastradh	if (bo->mmapped) {
736803b705cfSriastradh		struct drm_i915_gem_set_domain set_domain;
736903b705cfSriastradh
737003b705cfSriastradh		DBG(("%s: sync: needs_flush? %d, domain? %d, busy? %d\n",
737103b705cfSriastradh		     __FUNCTION__,
737203b705cfSriastradh		     bo->base.needs_flush,
737303b705cfSriastradh		     bo->base.domain,
737403b705cfSriastradh		     __kgem_busy(kgem, bo->base.handle)));
737503b705cfSriastradh
73769a906b70Schristos		assert(bo->mmapped == MMAPPED_GTT || bo->base.snoop || kgem->has_llc);
737703b705cfSriastradh
737803b705cfSriastradh		VG_CLEAR(set_domain);
737903b705cfSriastradh		set_domain.handle = bo->base.handle;
738003b705cfSriastradh		set_domain.write_domain = 0;
738103b705cfSriastradh		set_domain.read_domains =
73829a906b70Schristos			bo->mmapped == MMAPPED_CPU ? I915_GEM_DOMAIN_CPU : I915_GEM_DOMAIN_GTT;
738303b705cfSriastradh
73849a906b70Schristos		if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) {
73859a906b70Schristos			DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
73869a906b70Schristos			kgem_throttle(kgem);
73879a906b70Schristos		}
738803b705cfSriastradh	} else {
738903b705cfSriastradh		if (gem_read(kgem->fd,
739003b705cfSriastradh			     bo->base.handle, (char *)bo->mem+offset,
739103b705cfSriastradh			     offset, length))
739203b705cfSriastradh			return;
739303b705cfSriastradh	}
73949a906b70Schristos	kgem_bo_maybe_retire(kgem, &bo->base);
739503b705cfSriastradh	bo->base.domain = DOMAIN_NONE;
739603b705cfSriastradh}
739703b705cfSriastradh
739803b705cfSriastradhuint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format)
739903b705cfSriastradh{
740003b705cfSriastradh	struct kgem_bo_binding *b;
740103b705cfSriastradh
74029a906b70Schristos	assert(bo->refcnt);
74039a906b70Schristos
740403b705cfSriastradh	for (b = &bo->binding; b && b->offset; b = b->next)
740503b705cfSriastradh		if (format == b->format)
740603b705cfSriastradh			return b->offset;
740703b705cfSriastradh
740803b705cfSriastradh	return 0;
740903b705cfSriastradh}
741003b705cfSriastradh
741103b705cfSriastradhvoid kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset)
741203b705cfSriastradh{
741303b705cfSriastradh	struct kgem_bo_binding *b;
741403b705cfSriastradh
74159a906b70Schristos	assert(bo->refcnt);
74169a906b70Schristos
741703b705cfSriastradh	for (b = &bo->binding; b; b = b->next) {
741803b705cfSriastradh		if (b->offset)
741903b705cfSriastradh			continue;
742003b705cfSriastradh
742103b705cfSriastradh		b->offset = offset;
742203b705cfSriastradh		b->format = format;
742303b705cfSriastradh
742403b705cfSriastradh		if (b->next)
742503b705cfSriastradh			b->next->offset = 0;
742603b705cfSriastradh
742703b705cfSriastradh		return;
742803b705cfSriastradh	}
742903b705cfSriastradh
743003b705cfSriastradh	b = malloc(sizeof(*b));
743103b705cfSriastradh	if (b) {
743203b705cfSriastradh		b->next = bo->binding.next;
743303b705cfSriastradh		b->format = format;
743403b705cfSriastradh		b->offset = offset;
743503b705cfSriastradh		bo->binding.next = b;
743603b705cfSriastradh	}
743703b705cfSriastradh}
743803b705cfSriastradh
743903b705cfSriastradhstruct kgem_bo *
744003b705cfSriastradhkgem_replace_bo(struct kgem *kgem,
744103b705cfSriastradh		struct kgem_bo *src,
744203b705cfSriastradh		uint32_t width,
744303b705cfSriastradh		uint32_t height,
744403b705cfSriastradh		uint32_t pitch,
744503b705cfSriastradh		uint32_t bpp)
744603b705cfSriastradh{
744703b705cfSriastradh	struct kgem_bo *dst;
744803b705cfSriastradh	uint32_t br00, br13;
744903b705cfSriastradh	uint32_t handle;
745003b705cfSriastradh	uint32_t size;
745103b705cfSriastradh	uint32_t *b;
745203b705cfSriastradh
745303b705cfSriastradh	DBG(("%s: replacing bo handle=%d, size=%dx%d pitch=%d, with pitch=%d\n",
745403b705cfSriastradh	     __FUNCTION__, src->handle,  width, height, src->pitch, pitch));
745503b705cfSriastradh
745603b705cfSriastradh	/* We only expect to be called to fixup small buffers, hence why
745703b705cfSriastradh	 * we only attempt to allocate a linear bo.
745803b705cfSriastradh	 */
745903b705cfSriastradh	assert(src->tiling == I915_TILING_NONE);
74609a906b70Schristos	assert(kgem_bo_can_blt(kgem, src));
746103b705cfSriastradh
746203b705cfSriastradh	size = height * pitch;
746303b705cfSriastradh	size = NUM_PAGES(size);
746403b705cfSriastradh
746503b705cfSriastradh	dst = search_linear_cache(kgem, size, 0);
746603b705cfSriastradh	if (dst == NULL)
746703b705cfSriastradh		dst = search_linear_cache(kgem, size, CREATE_INACTIVE);
746803b705cfSriastradh	if (dst == NULL) {
746903b705cfSriastradh		handle = gem_create(kgem->fd, size);
747003b705cfSriastradh		if (handle == 0)
747103b705cfSriastradh			return NULL;
747203b705cfSriastradh
747303b705cfSriastradh		dst = __kgem_bo_alloc(handle, size);
747403b705cfSriastradh		if (dst == NULL) {
747503b705cfSriastradh			gem_close(kgem->fd, handle);
747603b705cfSriastradh			return NULL;
747703b705cfSriastradh		}
747803b705cfSriastradh
747903b705cfSriastradh		debug_alloc__bo(kgem, dst);
748003b705cfSriastradh	}
748103b705cfSriastradh	dst->pitch = pitch;
748203b705cfSriastradh	dst->unique_id = kgem_get_unique_id(kgem);
748303b705cfSriastradh	dst->refcnt = 1;
74849a906b70Schristos	assert(dst->tiling == I915_TILING_NONE);
74859a906b70Schristos	assert(kgem_bo_can_blt(kgem, dst));
748603b705cfSriastradh
748703b705cfSriastradh	kgem_set_mode(kgem, KGEM_BLT, dst);
74889a906b70Schristos	if (!kgem_check_batch(kgem, 10) ||
748903b705cfSriastradh	    !kgem_check_reloc(kgem, 2) ||
749003b705cfSriastradh	    !kgem_check_many_bo_fenced(kgem, src, dst, NULL)) {
749103b705cfSriastradh		kgem_submit(kgem);
749203b705cfSriastradh		if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL)) {
749303b705cfSriastradh			kgem_bo_destroy(kgem, dst);
749403b705cfSriastradh			return NULL;
749503b705cfSriastradh		}
749603b705cfSriastradh		_kgem_set_mode(kgem, KGEM_BLT);
749703b705cfSriastradh	}
749803b705cfSriastradh
749903b705cfSriastradh	br00 = XY_SRC_COPY_BLT_CMD;
750003b705cfSriastradh	br13 = pitch;
750103b705cfSriastradh	pitch = src->pitch;
750203b705cfSriastradh	if (kgem->gen >= 040 && src->tiling) {
750303b705cfSriastradh		br00 |= BLT_SRC_TILED;
750403b705cfSriastradh		pitch >>= 2;
750503b705cfSriastradh	}
750603b705cfSriastradh
750703b705cfSriastradh	br13 |= 0xcc << 16;
750803b705cfSriastradh	switch (bpp) {
750903b705cfSriastradh	default:
751003b705cfSriastradh	case 32: br00 |= BLT_WRITE_ALPHA | BLT_WRITE_RGB;
751103b705cfSriastradh		 br13 |= 1 << 25; /* RGB8888 */
751203b705cfSriastradh	case 16: br13 |= 1 << 24; /* RGB565 */
751303b705cfSriastradh	case 8: break;
751403b705cfSriastradh	}
751503b705cfSriastradh
751603b705cfSriastradh	b = kgem->batch + kgem->nbatch;
75179a906b70Schristos	if (kgem->gen >= 0100) {
75189a906b70Schristos		b[0] = br00 | 8;
75199a906b70Schristos		b[1] = br13;
75209a906b70Schristos		b[2] = 0;
75219a906b70Schristos		b[3] = height << 16 | width;
75229a906b70Schristos		*(uint64_t *)(b+4) =
75239a906b70Schristos			kgem_add_reloc64(kgem, kgem->nbatch + 4, dst,
75249a906b70Schristos					 I915_GEM_DOMAIN_RENDER << 16 |
75259a906b70Schristos					 I915_GEM_DOMAIN_RENDER |
75269a906b70Schristos					 KGEM_RELOC_FENCED,
75279a906b70Schristos					 0);
75289a906b70Schristos		b[6] = 0;
75299a906b70Schristos		b[7] = pitch;
75309a906b70Schristos		*(uint64_t *)(b+8) =
75319a906b70Schristos			kgem_add_reloc64(kgem, kgem->nbatch + 8, src,
75329a906b70Schristos					 I915_GEM_DOMAIN_RENDER << 16 |
75339a906b70Schristos					 KGEM_RELOC_FENCED,
75349a906b70Schristos					 0);
75359a906b70Schristos		kgem->nbatch += 10;
75369a906b70Schristos	} else {
75379a906b70Schristos		b[0] = br00 | 6;
75389a906b70Schristos		b[1] = br13;
75399a906b70Schristos		b[2] = 0;
75409a906b70Schristos		b[3] = height << 16 | width;
75419a906b70Schristos		b[4] = kgem_add_reloc(kgem, kgem->nbatch + 4, dst,
75429a906b70Schristos				      I915_GEM_DOMAIN_RENDER << 16 |
75439a906b70Schristos				      I915_GEM_DOMAIN_RENDER |
75449a906b70Schristos				      KGEM_RELOC_FENCED,
75459a906b70Schristos				      0);
75469a906b70Schristos		b[5] = 0;
75479a906b70Schristos		b[6] = pitch;
75489a906b70Schristos		b[7] = kgem_add_reloc(kgem, kgem->nbatch + 7, src,
75499a906b70Schristos				      I915_GEM_DOMAIN_RENDER << 16 |
75509a906b70Schristos				      KGEM_RELOC_FENCED,
75519a906b70Schristos				      0);
75529a906b70Schristos		kgem->nbatch += 8;
75539a906b70Schristos	}
755403b705cfSriastradh
755503b705cfSriastradh	return dst;
755603b705cfSriastradh}
75579a906b70Schristos
75589a906b70Schristosbool kgem_bo_convert_to_gpu(struct kgem *kgem,
75599a906b70Schristos			    struct kgem_bo *bo,
75609a906b70Schristos			    unsigned flags)
75619a906b70Schristos{
7562813957e3Ssnj	DBG(("%s: converting handle=%d from CPU to GPU, flags=%x, busy?=%d\n",
7563813957e3Ssnj	     __FUNCTION__, bo->handle, flags, __kgem_bo_is_busy(kgem, bo)));
75649a906b70Schristos	assert(bo->tiling == I915_TILING_NONE);
75659a906b70Schristos
75669a906b70Schristos	if (kgem->has_llc)
75679a906b70Schristos		return true;
75689a906b70Schristos
75699a906b70Schristos	if (flags & MOVE_ASYNC_HINT && __kgem_bo_is_busy(kgem, bo))
75709a906b70Schristos		return false;
75719a906b70Schristos
75729a906b70Schristos	assert(bo->snoop);
75739a906b70Schristos
75749a906b70Schristos	kgem_bo_submit(kgem, bo);
75759a906b70Schristos
75769a906b70Schristos	if (!gem_set_caching(kgem->fd, bo->handle, UNCACHED))
75779a906b70Schristos		return false;
75789a906b70Schristos
75799a906b70Schristos	bo->snoop = false;
75809a906b70Schristos	return true;
75819a906b70Schristos}
7582