1428d7b3dSmrg/*
2428d7b3dSmrg * Copyright (c) 2011 Intel Corporation
3428d7b3dSmrg *
4428d7b3dSmrg * Permission is hereby granted, free of charge, to any person obtaining a
5428d7b3dSmrg * copy of this software and associated documentation files (the "Software"),
6428d7b3dSmrg * to deal in the Software without restriction, including without limitation
7428d7b3dSmrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8428d7b3dSmrg * and/or sell copies of the Software, and to permit persons to whom the
9428d7b3dSmrg * Software is furnished to do so, subject to the following conditions:
10428d7b3dSmrg *
11428d7b3dSmrg * The above copyright notice and this permission notice (including the next
12428d7b3dSmrg * paragraph) shall be included in all copies or substantial portions of the
13428d7b3dSmrg * Software.
14428d7b3dSmrg *
15428d7b3dSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16428d7b3dSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17428d7b3dSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18428d7b3dSmrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19428d7b3dSmrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20428d7b3dSmrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21428d7b3dSmrg * SOFTWARE.
22428d7b3dSmrg *
23428d7b3dSmrg * Authors:
24428d7b3dSmrg *    Chris Wilson <chris@chris-wilson.co.uk>
25428d7b3dSmrg *
26428d7b3dSmrg */
27428d7b3dSmrg
28428d7b3dSmrg#ifndef KGEM_H
29428d7b3dSmrg#define KGEM_H
30428d7b3dSmrg
31428d7b3dSmrg#include <stdint.h>
32428d7b3dSmrg#include <stdbool.h>
33428d7b3dSmrg#include <stdarg.h>
34428d7b3dSmrg
35428d7b3dSmrg#include <i915_drm.h>
36428d7b3dSmrg
37428d7b3dSmrg#include "compiler.h"
38428d7b3dSmrg#include "debug.h"
39428d7b3dSmrg
40428d7b3dSmrgstruct kgem_bo {
41428d7b3dSmrg	struct kgem_request *rq;
42428d7b3dSmrg#define RQ(rq) ((struct kgem_request *)((uintptr_t)(rq) & ~3))
43428d7b3dSmrg#define RQ_RING(rq) ((uintptr_t)(rq) & 3)
44428d7b3dSmrg#define RQ_IS_BLT(rq) (RQ_RING(rq) == KGEM_BLT)
45428d7b3dSmrg#define MAKE_REQUEST(rq, ring) ((struct kgem_request *)((uintptr_t)(rq) | (ring)))
46428d7b3dSmrg
47428d7b3dSmrg	struct drm_i915_gem_exec_object2 *exec;
48428d7b3dSmrg
49428d7b3dSmrg	struct kgem_bo *proxy;
50428d7b3dSmrg
51428d7b3dSmrg	struct list list;
52428d7b3dSmrg	struct list request;
53428d7b3dSmrg	struct list vma;
54428d7b3dSmrg
55428d7b3dSmrg	void *map__cpu;
56428d7b3dSmrg	void *map__gtt;
57428d7b3dSmrg	void *map__wc;
58428d7b3dSmrg#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
59428d7b3dSmrg
60428d7b3dSmrg	struct kgem_bo_binding {
61428d7b3dSmrg		struct kgem_bo_binding *next;
62428d7b3dSmrg		uint32_t format;
63428d7b3dSmrg		uint16_t offset;
64428d7b3dSmrg	} binding;
65428d7b3dSmrg
66428d7b3dSmrg	uint64_t presumed_offset;
67428d7b3dSmrg	uint32_t unique_id;
68428d7b3dSmrg	uint32_t refcnt;
69428d7b3dSmrg	uint32_t handle;
70428d7b3dSmrg	uint32_t target_handle;
71428d7b3dSmrg	uint32_t delta;
72428d7b3dSmrg	uint32_t active_scanout;
73428d7b3dSmrg	union {
74428d7b3dSmrg		struct {
75428d7b3dSmrg			uint32_t count:27;
76428d7b3dSmrg#define PAGE_SIZE 4096
77428d7b3dSmrg			uint32_t bucket:5;
78428d7b3dSmrg#define NUM_CACHE_BUCKETS 16
79428d7b3dSmrg#define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12))
80428d7b3dSmrg		} pages;
81428d7b3dSmrg		uint32_t bytes;
82428d7b3dSmrg	} size;
83428d7b3dSmrg	uint32_t pitch : 18; /* max 128k */
84428d7b3dSmrg	uint32_t tiling : 2;
85428d7b3dSmrg	uint32_t reusable : 1;
86428d7b3dSmrg	uint32_t gpu_dirty : 1;
87428d7b3dSmrg	uint32_t gtt_dirty : 1;
88428d7b3dSmrg	uint32_t domain : 2;
89428d7b3dSmrg	uint32_t needs_flush : 1;
90428d7b3dSmrg	uint32_t snoop : 1;
91428d7b3dSmrg	uint32_t io : 1;
92428d7b3dSmrg	uint32_t flush : 1;
93428d7b3dSmrg	uint32_t scanout : 1;
94428d7b3dSmrg	uint32_t prime : 1;
95428d7b3dSmrg	uint32_t purged : 1;
96428d7b3dSmrg};
97428d7b3dSmrg#define DOMAIN_NONE 0
98428d7b3dSmrg#define DOMAIN_CPU 1
99428d7b3dSmrg#define DOMAIN_GTT 2
100428d7b3dSmrg#define DOMAIN_GPU 3
101428d7b3dSmrg
102428d7b3dSmrgstruct kgem_request {
103428d7b3dSmrg	struct list list;
104428d7b3dSmrg	struct kgem_bo *bo;
105428d7b3dSmrg	struct list buffers;
106428d7b3dSmrg	int ring;
107428d7b3dSmrg};
108428d7b3dSmrg
109428d7b3dSmrgenum {
110428d7b3dSmrg	MAP_GTT = 0,
111428d7b3dSmrg	MAP_CPU,
112428d7b3dSmrg	NUM_MAP_TYPES,
113428d7b3dSmrg};
114428d7b3dSmrg
115428d7b3dSmrgstruct kgem {
116428d7b3dSmrg	unsigned wedged;
117428d7b3dSmrg	int fd;
118428d7b3dSmrg	unsigned gen;
119428d7b3dSmrg
120428d7b3dSmrg	uint32_t unique_id;
121428d7b3dSmrg
122428d7b3dSmrg	uint16_t nbatch;
123428d7b3dSmrg	uint16_t surface;
124428d7b3dSmrg	uint16_t nexec;
125428d7b3dSmrg	uint16_t nreloc;
126428d7b3dSmrg	uint16_t nreloc__self;
127428d7b3dSmrg	uint16_t nfence;
128428d7b3dSmrg	uint16_t batch_size;
129428d7b3dSmrg
130428d7b3dSmrg	uint32_t *batch;
131428d7b3dSmrg
132428d7b3dSmrg	enum kgem_mode {
133428d7b3dSmrg		/* order matches I915_EXEC_RING ordering */
134428d7b3dSmrg		KGEM_NONE = 0,
135428d7b3dSmrg		KGEM_RENDER,
136428d7b3dSmrg		KGEM_BSD,
137428d7b3dSmrg		KGEM_BLT,
138428d7b3dSmrg	} mode, ring;
139428d7b3dSmrg
140428d7b3dSmrg	struct list flushing;
141428d7b3dSmrg	struct list large;
142428d7b3dSmrg	struct list large_inactive;
143428d7b3dSmrg	struct list active[NUM_CACHE_BUCKETS][3];
144428d7b3dSmrg	struct list inactive[NUM_CACHE_BUCKETS];
145428d7b3dSmrg	struct list pinned_batches[2];
146428d7b3dSmrg	struct list snoop;
147428d7b3dSmrg	struct list scanout;
148428d7b3dSmrg	struct list batch_buffers, active_buffers;
149428d7b3dSmrg
150428d7b3dSmrg	struct list requests[2];
151428d7b3dSmrg	struct kgem_request *fence[2];
152428d7b3dSmrg	struct kgem_request *next_request;
153428d7b3dSmrg	struct kgem_request static_request;
154428d7b3dSmrg
155428d7b3dSmrg	struct {
156428d7b3dSmrg		struct list inactive[NUM_CACHE_BUCKETS];
157428d7b3dSmrg		int16_t count;
158428d7b3dSmrg	} vma[NUM_MAP_TYPES];
159428d7b3dSmrg
160428d7b3dSmrg	uint32_t batch_flags;
161428d7b3dSmrg	uint32_t batch_flags_base;
162428d7b3dSmrg#define I915_EXEC_SECURE (1<<9)
163428d7b3dSmrg#define LOCAL_EXEC_OBJECT_WRITE (1<<2)
164428d7b3dSmrg
165428d7b3dSmrg	uint32_t flush:1;
166428d7b3dSmrg	uint32_t need_expire:1;
167428d7b3dSmrg	uint32_t need_purge:1;
168428d7b3dSmrg	uint32_t need_retire:1;
169428d7b3dSmrg	uint32_t need_throttle:1;
170428d7b3dSmrg	uint32_t needs_semaphore:1;
171428d7b3dSmrg	uint32_t needs_reservation:1;
172428d7b3dSmrg	uint32_t scanout_busy:1;
173428d7b3dSmrg	uint32_t busy:1;
174428d7b3dSmrg
175428d7b3dSmrg	uint32_t has_create2 :1;
176428d7b3dSmrg	uint32_t has_userptr :1;
177428d7b3dSmrg	uint32_t has_blt :1;
178428d7b3dSmrg	uint32_t has_relaxed_fencing :1;
179428d7b3dSmrg	uint32_t has_relaxed_delta :1;
180428d7b3dSmrg	uint32_t has_semaphores :1;
181428d7b3dSmrg	uint32_t has_secure_batches :1;
182428d7b3dSmrg	uint32_t has_pinned_batches :1;
183428d7b3dSmrg	uint32_t has_caching :1;
184428d7b3dSmrg	uint32_t has_llc :1;
185428d7b3dSmrg	uint32_t has_wt :1;
186428d7b3dSmrg	uint32_t has_no_reloc :1;
187428d7b3dSmrg	uint32_t has_handle_lut :1;
188428d7b3dSmrg	uint32_t has_wc_mmap :1;
189428d7b3dSmrg
190428d7b3dSmrg	uint32_t can_blt_cpu :1;
191428d7b3dSmrg	uint32_t can_render_y :1;
192428d7b3dSmrg
193428d7b3dSmrg	uint16_t fence_max;
194428d7b3dSmrg	uint16_t half_cpu_cache_pages;
195428d7b3dSmrg	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable, aperture_fenceable;
196428d7b3dSmrg	uint32_t aperture, aperture_fenced, aperture_max_fence;
197428d7b3dSmrg	uint32_t max_upload_tile_size, max_copy_tile_size;
198428d7b3dSmrg	uint32_t max_gpu_size, max_cpu_size;
199428d7b3dSmrg	uint32_t large_object_size, max_object_size;
200428d7b3dSmrg	uint32_t buffer_size;
201428d7b3dSmrg
202428d7b3dSmrg	void (*context_switch)(struct kgem *kgem, int new_mode);
203428d7b3dSmrg	void (*retire)(struct kgem *kgem);
204428d7b3dSmrg	void (*expire)(struct kgem *kgem);
205428d7b3dSmrg
206428d7b3dSmrg	void (*memcpy_to_tiled_x)(const void *src, void *dst, int bpp,
207428d7b3dSmrg				  int32_t src_stride, int32_t dst_stride,
208428d7b3dSmrg				  int16_t src_x, int16_t src_y,
209428d7b3dSmrg				  int16_t dst_x, int16_t dst_y,
210428d7b3dSmrg				  uint16_t width, uint16_t height);
211428d7b3dSmrg	void (*memcpy_from_tiled_x)(const void *src, void *dst, int bpp,
212428d7b3dSmrg				    int32_t src_stride, int32_t dst_stride,
213428d7b3dSmrg				    int16_t src_x, int16_t src_y,
214428d7b3dSmrg				    int16_t dst_x, int16_t dst_y,
215428d7b3dSmrg				    uint16_t width, uint16_t height);
216428d7b3dSmrg
217428d7b3dSmrg	struct kgem_bo *batch_bo;
218428d7b3dSmrg
219428d7b3dSmrg	uint16_t reloc__self[256];
220428d7b3dSmrg	struct drm_i915_gem_exec_object2 exec[384] page_aligned;
221428d7b3dSmrg	struct drm_i915_gem_relocation_entry reloc[8192] page_aligned;
222428d7b3dSmrg
223428d7b3dSmrg#ifdef DEBUG_MEMORY
224428d7b3dSmrg	struct {
225428d7b3dSmrg		int bo_allocs;
226428d7b3dSmrg		size_t bo_bytes;
227428d7b3dSmrg	} debug_memory;
228428d7b3dSmrg#endif
229428d7b3dSmrg};
230428d7b3dSmrg
231428d7b3dSmrg#define KGEM_MAX_DEFERRED_VBO 16
232428d7b3dSmrg
233428d7b3dSmrg#define KGEM_BATCH_RESERVED 1
234428d7b3dSmrg#define KGEM_RELOC_RESERVED (KGEM_MAX_DEFERRED_VBO)
235428d7b3dSmrg#define KGEM_EXEC_RESERVED (1+KGEM_MAX_DEFERRED_VBO)
236428d7b3dSmrg
237428d7b3dSmrg#ifndef ARRAY_SIZE
238428d7b3dSmrg#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
239428d7b3dSmrg#endif
240428d7b3dSmrg
241428d7b3dSmrg#define KGEM_BATCH_SIZE(K) ((K)->batch_size-KGEM_BATCH_RESERVED)
242428d7b3dSmrg#define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
243428d7b3dSmrg#define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
244428d7b3dSmrg
245428d7b3dSmrgvoid kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen);
246428d7b3dSmrgvoid kgem_reset(struct kgem *kgem);
247428d7b3dSmrg
248428d7b3dSmrgstruct kgem_bo *kgem_create_map(struct kgem *kgem,
249428d7b3dSmrg				void *ptr, uint32_t size,
250428d7b3dSmrg				bool read_only);
251428d7b3dSmrg
252428d7b3dSmrgstruct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
253428d7b3dSmrgstruct kgem_bo *kgem_create_for_prime(struct kgem *kgem, int name, uint32_t size);
254428d7b3dSmrgint kgem_bo_export_to_prime(struct kgem *kgem, struct kgem_bo *bo);
255428d7b3dSmrg
256428d7b3dSmrgstruct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags);
257428d7b3dSmrgstruct kgem_bo *kgem_create_proxy(struct kgem *kgem,
258428d7b3dSmrg				  struct kgem_bo *target,
259428d7b3dSmrg				  int offset, int length);
260428d7b3dSmrg
261428d7b3dSmrgstruct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
262428d7b3dSmrg					 const void *data,
263428d7b3dSmrg					 const BoxRec *box,
264428d7b3dSmrg					 int stride, int bpp);
265428d7b3dSmrgvoid kgem_proxy_bo_attach(struct kgem_bo *bo, struct kgem_bo **ptr);
266428d7b3dSmrg
267428d7b3dSmrgint kgem_choose_tiling(struct kgem *kgem,
268428d7b3dSmrg		       int tiling, int width, int height, int bpp);
269428d7b3dSmrgunsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
270428d7b3dSmrg#define KGEM_CAN_CREATE_GPU	0x1
271428d7b3dSmrg#define KGEM_CAN_CREATE_CPU	0x2
272428d7b3dSmrg#define KGEM_CAN_CREATE_LARGE	0x4
273428d7b3dSmrg#define KGEM_CAN_CREATE_GTT	0x8
274428d7b3dSmrg#define KGEM_CAN_CREATE_TILED	0x10
275428d7b3dSmrg
276428d7b3dSmrgbool kgem_check_surface_size(struct kgem *kgem,
277428d7b3dSmrg			     uint32_t width,
278428d7b3dSmrg			     uint32_t height,
279428d7b3dSmrg			     uint32_t bpp,
280428d7b3dSmrg			     uint32_t tiling,
281428d7b3dSmrg			     uint32_t pitch,
282428d7b3dSmrg			     uint32_t size);
283428d7b3dSmrg
284428d7b3dSmrgstruct kgem_bo *
285428d7b3dSmrgkgem_replace_bo(struct kgem *kgem,
286428d7b3dSmrg		struct kgem_bo *src,
287428d7b3dSmrg		uint32_t width,
288428d7b3dSmrg		uint32_t height,
289428d7b3dSmrg		uint32_t pitch,
290428d7b3dSmrg		uint32_t bpp);
291428d7b3dSmrgenum {
292428d7b3dSmrg	CREATE_EXACT = 0x1,
293428d7b3dSmrg	CREATE_INACTIVE = 0x2,
294428d7b3dSmrg	CREATE_CPU_MAP = 0x4,
295428d7b3dSmrg	CREATE_GTT_MAP = 0x8,
296428d7b3dSmrg	CREATE_SCANOUT = 0x10,
297428d7b3dSmrg	CREATE_PRIME = 0x20,
298428d7b3dSmrg	CREATE_TEMPORARY = 0x40,
299428d7b3dSmrg	CREATE_CACHED = 0x80,
300428d7b3dSmrg	CREATE_UNCACHED = 0x100,
301428d7b3dSmrg	CREATE_NO_RETIRE = 0x200,
302428d7b3dSmrg	CREATE_NO_THROTTLE = 0x400,
303428d7b3dSmrg};
304428d7b3dSmrgstruct kgem_bo *kgem_create_2d(struct kgem *kgem,
305428d7b3dSmrg			       int width,
306428d7b3dSmrg			       int height,
307428d7b3dSmrg			       int bpp,
308428d7b3dSmrg			       int tiling,
309428d7b3dSmrg			       uint32_t flags);
310428d7b3dSmrgstruct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
311428d7b3dSmrg				   int width,
312428d7b3dSmrg				   int height,
313428d7b3dSmrg				   int bpp,
314428d7b3dSmrg				   uint32_t flags);
315428d7b3dSmrg
316428d7b3dSmrgbool kgem_bo_convert_to_gpu(struct kgem *kgem,
317428d7b3dSmrg			    struct kgem_bo *bo,
318428d7b3dSmrg			    unsigned flags);
319428d7b3dSmrg
320428d7b3dSmrguint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
321428d7b3dSmrgvoid kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
322428d7b3dSmrg
323428d7b3dSmrgbool kgem_retire(struct kgem *kgem);
324428d7b3dSmrgvoid kgem_retire__buffers(struct kgem *kgem);
325428d7b3dSmrg
326428d7b3dSmrgstatic inline bool kgem_bo_discard_cache(struct kgem_bo *bo, bool force)
327428d7b3dSmrg{
328428d7b3dSmrg	if (bo == NULL || bo->proxy == NULL)
329428d7b3dSmrg		return false;
330428d7b3dSmrg
331428d7b3dSmrg	if (force)
332428d7b3dSmrg		return true;
333428d7b3dSmrg
334428d7b3dSmrg	if (bo->proxy->rq)
335428d7b3dSmrg		return false;
336428d7b3dSmrg
337428d7b3dSmrg	return bo->snoop;
338428d7b3dSmrg}
339428d7b3dSmrg
340428d7b3dSmrgbool __kgem_ring_is_idle(struct kgem *kgem, int ring);
341428d7b3dSmrgstatic inline bool kgem_ring_is_idle(struct kgem *kgem, int ring)
342428d7b3dSmrg{
343428d7b3dSmrg	ring = ring == KGEM_BLT;
344428d7b3dSmrg
345428d7b3dSmrg	if (list_is_empty(&kgem->requests[ring]))
346428d7b3dSmrg		return true;
347428d7b3dSmrg
348428d7b3dSmrg	return __kgem_ring_is_idle(kgem, ring);
349428d7b3dSmrg}
350428d7b3dSmrg
351428d7b3dSmrgstatic inline bool kgem_is_idle(struct kgem *kgem)
352428d7b3dSmrg{
353428d7b3dSmrg	if (!kgem->need_retire)
354428d7b3dSmrg		return true;
355428d7b3dSmrg
356428d7b3dSmrg	return kgem_ring_is_idle(kgem, kgem->ring);
357428d7b3dSmrg}
358428d7b3dSmrg
359428d7b3dSmrgstatic inline bool __kgem_ring_empty(struct kgem *kgem)
360428d7b3dSmrg{
361428d7b3dSmrg	return list_is_empty(&kgem->requests[kgem->ring == KGEM_BLT]);
362428d7b3dSmrg}
363428d7b3dSmrg
364428d7b3dSmrgvoid _kgem_submit(struct kgem *kgem);
365428d7b3dSmrgstatic inline void kgem_submit(struct kgem *kgem)
366428d7b3dSmrg{
367428d7b3dSmrg	if (kgem->nbatch)
368428d7b3dSmrg		_kgem_submit(kgem);
369428d7b3dSmrg}
370428d7b3dSmrg
371428d7b3dSmrgstatic inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
372428d7b3dSmrg{
373428d7b3dSmrg	if (bo->exec == NULL)
374428d7b3dSmrg		return;
375428d7b3dSmrg
376428d7b3dSmrg	assert(bo->refcnt);
377428d7b3dSmrg	_kgem_submit(kgem);
378428d7b3dSmrg}
379428d7b3dSmrg
380428d7b3dSmrgvoid kgem_scanout_flush(struct kgem *kgem, struct kgem_bo *bo);
381428d7b3dSmrg
382428d7b3dSmrgstatic inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo)
383428d7b3dSmrg{
384428d7b3dSmrg	assert(bo->refcnt);
385428d7b3dSmrg	bo->refcnt++;
386428d7b3dSmrg	return bo;
387428d7b3dSmrg}
388428d7b3dSmrg
389428d7b3dSmrgvoid _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
390428d7b3dSmrgstatic inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
391428d7b3dSmrg{
392428d7b3dSmrg	assert(bo->refcnt);
393428d7b3dSmrg	if (--bo->refcnt == 0)
394428d7b3dSmrg		_kgem_bo_destroy(kgem, bo);
395428d7b3dSmrg}
396428d7b3dSmrg
397428d7b3dSmrgvoid kgem_clear_dirty(struct kgem *kgem);
398428d7b3dSmrg
399428d7b3dSmrgstatic inline void kgem_set_mode(struct kgem *kgem,
400428d7b3dSmrg				 enum kgem_mode mode,
401428d7b3dSmrg				 struct kgem_bo *bo)
402428d7b3dSmrg{
403428d7b3dSmrg	assert(!kgem->wedged);
404428d7b3dSmrg
405428d7b3dSmrg#if DEBUG_FLUSH_BATCH
406428d7b3dSmrg	kgem_submit(kgem);
407428d7b3dSmrg#endif
408428d7b3dSmrg
409428d7b3dSmrg	if (kgem->nreloc && bo->exec == NULL && kgem_ring_is_idle(kgem, kgem->ring)) {
410428d7b3dSmrg		DBG(("%s: flushing before new bo\n", __FUNCTION__));
411428d7b3dSmrg		_kgem_submit(kgem);
412428d7b3dSmrg	}
413428d7b3dSmrg
414428d7b3dSmrg	if (kgem->mode == mode)
415428d7b3dSmrg		return;
416428d7b3dSmrg
417428d7b3dSmrg	kgem->context_switch(kgem, mode);
418428d7b3dSmrg	kgem->mode = mode;
419428d7b3dSmrg}
420428d7b3dSmrg
421428d7b3dSmrgstatic inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
422428d7b3dSmrg{
423428d7b3dSmrg	assert(kgem->mode == KGEM_NONE);
424428d7b3dSmrg	assert(kgem->nbatch == 0);
425428d7b3dSmrg	assert(!kgem->wedged);
426428d7b3dSmrg	kgem->context_switch(kgem, mode);
427428d7b3dSmrg	kgem->mode = mode;
428428d7b3dSmrg}
429428d7b3dSmrg
430428d7b3dSmrgstatic inline int kgem_batch_space(struct kgem *kgem)
431428d7b3dSmrg{
432428d7b3dSmrg	int rem = kgem->surface - kgem->nbatch;
433428d7b3dSmrg	assert(rem > 0);
434428d7b3dSmrg	return rem - KGEM_BATCH_RESERVED;
435428d7b3dSmrg}
436428d7b3dSmrg
437428d7b3dSmrgstatic inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
438428d7b3dSmrg{
439428d7b3dSmrg	assert(num_dwords > 0);
440428d7b3dSmrg	assert(kgem->nbatch < kgem->surface);
441428d7b3dSmrg	assert(kgem->surface <= kgem->batch_size);
442428d7b3dSmrg	return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface);
443428d7b3dSmrg}
444428d7b3dSmrg
445428d7b3dSmrgstatic inline bool kgem_check_reloc(struct kgem *kgem, int n)
446428d7b3dSmrg{
447428d7b3dSmrg	assert(kgem->nreloc <= KGEM_RELOC_SIZE(kgem));
448428d7b3dSmrg	return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem));
449428d7b3dSmrg}
450428d7b3dSmrg
451428d7b3dSmrgstatic inline bool kgem_check_exec(struct kgem *kgem, int n)
452428d7b3dSmrg{
453428d7b3dSmrg	assert(kgem->nexec <= KGEM_EXEC_SIZE(kgem));
454428d7b3dSmrg	return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
455428d7b3dSmrg}
456428d7b3dSmrg
457428d7b3dSmrgstatic inline bool kgem_check_reloc_and_exec(struct kgem *kgem, int n)
458428d7b3dSmrg{
459428d7b3dSmrg	return kgem_check_reloc(kgem, n) && kgem_check_exec(kgem, n);
460428d7b3dSmrg}
461428d7b3dSmrg
462428d7b3dSmrgstatic inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
463428d7b3dSmrg						  int num_dwords,
464428d7b3dSmrg						  int num_surfaces)
465428d7b3dSmrg{
466428d7b3dSmrg	return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) &&
467428d7b3dSmrg		kgem_check_reloc(kgem, num_surfaces) &&
468428d7b3dSmrg		kgem_check_exec(kgem, num_surfaces);
469428d7b3dSmrg}
470428d7b3dSmrg
471428d7b3dSmrgstatic inline uint32_t *kgem_get_batch(struct kgem *kgem)
472428d7b3dSmrg{
473428d7b3dSmrg	if (kgem->nreloc) {
474428d7b3dSmrg		unsigned mode = kgem->mode;
475428d7b3dSmrg		_kgem_submit(kgem);
476428d7b3dSmrg		_kgem_set_mode(kgem, mode);
477428d7b3dSmrg	}
478428d7b3dSmrg
479428d7b3dSmrg	return kgem->batch + kgem->nbatch;
480428d7b3dSmrg}
481428d7b3dSmrg
482428d7b3dSmrgbool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0)));
483428d7b3dSmrgbool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo);
484428d7b3dSmrgbool kgem_check_many_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
485428d7b3dSmrg
486428d7b3dSmrg#define KGEM_RELOC_FENCED 0x8000
487428d7b3dSmrguint32_t kgem_add_reloc(struct kgem *kgem,
488428d7b3dSmrg			uint32_t pos,
489428d7b3dSmrg			struct kgem_bo *bo,
490428d7b3dSmrg			uint32_t read_write_domains,
491428d7b3dSmrg			uint32_t delta);
492428d7b3dSmrguint64_t kgem_add_reloc64(struct kgem *kgem,
493428d7b3dSmrg			  uint32_t pos,
494428d7b3dSmrg			  struct kgem_bo *bo,
495428d7b3dSmrg			  uint32_t read_write_domains,
496428d7b3dSmrg			  uint64_t delta);
497428d7b3dSmrg
498428d7b3dSmrgvoid *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
499428d7b3dSmrgvoid *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo);
500428d7b3dSmrgvoid *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo);
501428d7b3dSmrgvoid *kgem_bo_map__wc(struct kgem *kgem, struct kgem_bo *bo);
502428d7b3dSmrgvoid kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
503428d7b3dSmrgvoid *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
504428d7b3dSmrgvoid *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
505428d7b3dSmrgvoid kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
506428d7b3dSmrgvoid kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write);
507428d7b3dSmrguint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
508428d7b3dSmrg
509428d7b3dSmrgbool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
510428d7b3dSmrg		   const void *data, int length);
511428d7b3dSmrg
512428d7b3dSmrgint kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
513428d7b3dSmrgvoid kgem_get_tile_size(struct kgem *kgem, int tiling, int pitch,
514428d7b3dSmrg			int *tile_width, int *tile_height, int *tile_size);
515428d7b3dSmrg
516428d7b3dSmrgstatic inline int __kgem_buffer_size(struct kgem_bo *bo)
517428d7b3dSmrg{
518428d7b3dSmrg	assert(bo->proxy != NULL);
519428d7b3dSmrg	return bo->size.bytes;
520428d7b3dSmrg}
521428d7b3dSmrg
522428d7b3dSmrgstatic inline int __kgem_bo_size(struct kgem_bo *bo)
523428d7b3dSmrg{
524428d7b3dSmrg	assert(bo->proxy == NULL);
525428d7b3dSmrg	return PAGE_SIZE * bo->size.pages.count;
526428d7b3dSmrg}
527428d7b3dSmrg
528428d7b3dSmrgstatic inline int __kgem_bo_num_pages(struct kgem_bo *bo)
529428d7b3dSmrg{
530428d7b3dSmrg	assert(bo->proxy == NULL);
531428d7b3dSmrg	return bo->size.pages.count;
532428d7b3dSmrg}
533428d7b3dSmrg
534428d7b3dSmrgstatic inline int kgem_bo_size(struct kgem_bo *bo)
535428d7b3dSmrg{
536428d7b3dSmrg	if (bo->proxy)
537428d7b3dSmrg		return __kgem_buffer_size(bo);
538428d7b3dSmrg	else
539428d7b3dSmrg		return __kgem_bo_size(bo);
540428d7b3dSmrg}
541428d7b3dSmrg
542428d7b3dSmrgstatic inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
543428d7b3dSmrg					   struct kgem_bo *bo)
544428d7b3dSmrg{
545428d7b3dSmrg	int pitch = bo->pitch;
546428d7b3dSmrg
547428d7b3dSmrg	if (kgem->gen >= 0100 && pitch & (1 << 4)) { /* bdw is broken */
548428d7b3dSmrg		DBG(("%s: can not blt to handle=%d, pitch=%d\n",
549428d7b3dSmrg		     __FUNCTION__, bo->handle, pitch));
550428d7b3dSmrg		return false;
551428d7b3dSmrg	}
552428d7b3dSmrg
553428d7b3dSmrg	if (kgem->gen >= 040 && bo->tiling)
554428d7b3dSmrg		pitch /= 4;
555428d7b3dSmrg	if (pitch > MAXSHORT) {
556428d7b3dSmrg		DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n",
557428d7b3dSmrg		     __FUNCTION__, bo->handle, pitch));
558428d7b3dSmrg		return false;
559428d7b3dSmrg	}
560428d7b3dSmrg
561428d7b3dSmrg	return true;
562428d7b3dSmrg}
563428d7b3dSmrg
564428d7b3dSmrgstatic inline bool kgem_bo_can_blt(struct kgem *kgem,
565428d7b3dSmrg				   struct kgem_bo *bo)
566428d7b3dSmrg{
567428d7b3dSmrg	assert(bo->refcnt);
568428d7b3dSmrg
569428d7b3dSmrg	if (bo->tiling == I915_TILING_Y) {
570428d7b3dSmrg		DBG(("%s: can not blt to handle=%d, tiling=Y\n",
571428d7b3dSmrg		     __FUNCTION__, bo->handle));
572428d7b3dSmrg		return false;
573428d7b3dSmrg	}
574428d7b3dSmrg
575428d7b3dSmrg	if (kgem->gen >= 0100 && bo->proxy && bo->delta & (1 << 4)) {
576428d7b3dSmrg		DBG(("%s: can not blt to handle=%d, delta=%d\n",
577428d7b3dSmrg		     __FUNCTION__, bo->handle, bo->delta));
578428d7b3dSmrg		return false;
579428d7b3dSmrg	}
580428d7b3dSmrg
581428d7b3dSmrg	return kgem_bo_blt_pitch_is_ok(kgem, bo);
582428d7b3dSmrg}
583428d7b3dSmrg
584428d7b3dSmrgstatic inline bool kgem_bo_is_snoop(struct kgem_bo *bo)
585428d7b3dSmrg{
586428d7b3dSmrg	assert(bo->refcnt);
587428d7b3dSmrg	while (bo->proxy)
588428d7b3dSmrg		bo = bo->proxy;
589428d7b3dSmrg	return bo->snoop;
590428d7b3dSmrg}
591428d7b3dSmrg
592428d7b3dSmrgvoid kgem_bo_undo(struct kgem *kgem, struct kgem_bo *bo);
593428d7b3dSmrgvoid kgem_bo_pair_undo(struct kgem *kgem, struct kgem_bo *a, struct kgem_bo *b);
594428d7b3dSmrg
595428d7b3dSmrgbool __kgem_busy(struct kgem *kgem, int handle);
596428d7b3dSmrg
597428d7b3dSmrgstatic inline void kgem_bo_mark_busy(struct kgem *kgem, struct kgem_bo *bo, int ring)
598428d7b3dSmrg{
599428d7b3dSmrg	assert(bo->refcnt);
600428d7b3dSmrg	bo->needs_flush = true;
601428d7b3dSmrg	if (bo->rq) {
602428d7b3dSmrg		bo->rq = MAKE_REQUEST(RQ(bo->rq), ring);
603428d7b3dSmrg	} else {
604428d7b3dSmrg		bo->rq = MAKE_REQUEST(kgem, ring);
605428d7b3dSmrg		list_add(&bo->request, &kgem->flushing);
606428d7b3dSmrg		kgem->need_retire = true;
607428d7b3dSmrg	}
608428d7b3dSmrg}
609428d7b3dSmrg
610428d7b3dSmrginline static void __kgem_bo_clear_busy(struct kgem_bo *bo)
611428d7b3dSmrg{
612428d7b3dSmrg	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
613428d7b3dSmrg	bo->rq = NULL;
614428d7b3dSmrg	list_del(&bo->request);
615428d7b3dSmrg
616428d7b3dSmrg	bo->domain = DOMAIN_NONE;
617428d7b3dSmrg	bo->needs_flush = false;
618428d7b3dSmrg	bo->gtt_dirty = false;
619428d7b3dSmrg}
620428d7b3dSmrg
621428d7b3dSmrgstatic inline bool kgem_bo_is_busy(struct kgem_bo *bo)
622428d7b3dSmrg{
623428d7b3dSmrg	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
624428d7b3dSmrg	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
625428d7b3dSmrg	assert(bo->refcnt);
626428d7b3dSmrg	return bo->rq;
627428d7b3dSmrg}
628428d7b3dSmrg
629428d7b3dSmrgvoid __kgem_retire_requests_upto(struct kgem *kgem, struct kgem_bo *bo);
630428d7b3dSmrgstatic inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
631428d7b3dSmrg{
632428d7b3dSmrg	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
633428d7b3dSmrg	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
634428d7b3dSmrg	assert(bo->refcnt);
635428d7b3dSmrg
636428d7b3dSmrg	if (bo->exec)
637428d7b3dSmrg		return true;
638428d7b3dSmrg
639428d7b3dSmrg	if (bo->rq && !__kgem_busy(kgem, bo->handle)) {
640428d7b3dSmrg		__kgem_retire_requests_upto(kgem, bo);
641428d7b3dSmrg		assert(list_is_empty(&bo->request));
642428d7b3dSmrg		assert(bo->rq == NULL);
643428d7b3dSmrg		assert(bo->domain == DOMAIN_NONE);
644428d7b3dSmrg	}
645428d7b3dSmrg
646428d7b3dSmrg	return kgem_bo_is_busy(bo);
647428d7b3dSmrg}
648428d7b3dSmrg
649428d7b3dSmrgstatic inline bool kgem_bo_is_render(struct kgem_bo *bo)
650428d7b3dSmrg{
651428d7b3dSmrg	DBG(("%s: handle=%d, rq? %d [%d]\n", __FUNCTION__,
652428d7b3dSmrg	     bo->handle, bo->rq != NULL, (int)RQ_RING(bo->rq)));
653428d7b3dSmrg	assert(bo->refcnt);
654428d7b3dSmrg	return bo->rq && RQ_RING(bo->rq) == I915_EXEC_RENDER;
655428d7b3dSmrg}
656428d7b3dSmrg
657428d7b3dSmrgstatic inline void kgem_bo_mark_unreusable(struct kgem_bo *bo)
658428d7b3dSmrg{
659428d7b3dSmrg	assert(bo->refcnt);
660428d7b3dSmrg	while (bo->proxy) {
661428d7b3dSmrg		bo->flush = true;
662428d7b3dSmrg		bo = bo->proxy;
663428d7b3dSmrg		assert(bo->refcnt);
664428d7b3dSmrg	}
665428d7b3dSmrg	bo->flush = true;
666428d7b3dSmrg	bo->reusable = false;
667428d7b3dSmrg}
668428d7b3dSmrg
669428d7b3dSmrgstatic inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
670428d7b3dSmrg{
671428d7b3dSmrg	if (bo == NULL)
672428d7b3dSmrg		return false;
673428d7b3dSmrg
674428d7b3dSmrg	assert(bo->refcnt);
675428d7b3dSmrg	return bo->gpu_dirty;
676428d7b3dSmrg}
677428d7b3dSmrg
678428d7b3dSmrgstatic inline void kgem_bo_unclean(struct kgem *kgem, struct kgem_bo *bo)
679428d7b3dSmrg{
680428d7b3dSmrg	/* The bo is outside of our control, so presume it is written to */
681428d7b3dSmrg	bo->needs_flush = true;
682428d7b3dSmrg	if (bo->rq == NULL)
683428d7b3dSmrg		bo->rq = (void *)kgem;
684428d7b3dSmrg
685428d7b3dSmrg	if (bo->domain != DOMAIN_GPU)
686428d7b3dSmrg		bo->domain = DOMAIN_NONE;
687428d7b3dSmrg}
688428d7b3dSmrg
689428d7b3dSmrgstatic inline void __kgem_bo_mark_dirty(struct kgem_bo *bo)
690428d7b3dSmrg{
691428d7b3dSmrg	DBG(("%s: handle=%d (proxy? %d)\n", __FUNCTION__,
692428d7b3dSmrg	     bo->handle, bo->proxy != NULL));
693428d7b3dSmrg
694428d7b3dSmrg	assert(bo->refcnt);
695428d7b3dSmrg	assert(bo->exec);
696428d7b3dSmrg	assert(bo->rq);
697428d7b3dSmrg
698428d7b3dSmrg	bo->exec->flags |= LOCAL_EXEC_OBJECT_WRITE;
699428d7b3dSmrg	bo->needs_flush = bo->gpu_dirty = true;
700428d7b3dSmrg	list_move(&bo->request, &RQ(bo->rq)->buffers);
701428d7b3dSmrg}
702428d7b3dSmrg
703428d7b3dSmrgstatic inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
704428d7b3dSmrg{
705428d7b3dSmrg	assert(bo->refcnt);
706428d7b3dSmrg	do {
707428d7b3dSmrg		assert(bo->exec);
708428d7b3dSmrg		assert(bo->rq);
709428d7b3dSmrg
710428d7b3dSmrg		if (bo->gpu_dirty)
711428d7b3dSmrg			return;
712428d7b3dSmrg
713428d7b3dSmrg		__kgem_bo_mark_dirty(bo);
714428d7b3dSmrg	} while ((bo = bo->proxy));
715428d7b3dSmrg}
716428d7b3dSmrg
717428d7b3dSmrgstatic inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo)
718428d7b3dSmrg{
719428d7b3dSmrg	DBG(("%s: handle=%d, map=%p:%p, tiling=%d, domain=%d\n",
720428d7b3dSmrg	     __FUNCTION__, bo->handle, bo->map__gtt, bo->map__cpu, bo->tiling, bo->domain));
721428d7b3dSmrg	assert(bo->proxy == NULL);
722428d7b3dSmrg
723428d7b3dSmrg	if (bo->tiling == I915_TILING_NONE && (bo->domain == DOMAIN_CPU || kgem->has_llc))
724428d7b3dSmrg		return bo->map__cpu != NULL;
725428d7b3dSmrg
726428d7b3dSmrg	if (bo->tiling == I915_TILING_NONE && bo->map__wc)
727428d7b3dSmrg		return true;
728428d7b3dSmrg
729428d7b3dSmrg	return bo->map__gtt != NULL;
730428d7b3dSmrg}
731428d7b3dSmrg
732428d7b3dSmrgstatic inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
733428d7b3dSmrg{
734428d7b3dSmrg	DBG(("%s: handle=%d, map=%p:%p:%p, tiling=%d, domain=%d, offset=%ld\n",
735428d7b3dSmrg	     __FUNCTION__, bo->handle, bo->map__gtt, bo->map__wc, bo->map__cpu, bo->tiling, bo->domain, (long)bo->presumed_offset));
736428d7b3dSmrg
737428d7b3dSmrg	if (!bo->tiling && (kgem->has_llc || bo->domain == DOMAIN_CPU))
738428d7b3dSmrg		return true;
739428d7b3dSmrg
740428d7b3dSmrg	assert(bo->proxy == NULL);
741428d7b3dSmrg
742428d7b3dSmrg	if (bo->map__gtt != NULL)
743428d7b3dSmrg		return true;
744428d7b3dSmrg
745428d7b3dSmrg	if (kgem->gen == 021 && bo->tiling == I915_TILING_Y)
746428d7b3dSmrg		return false;
747428d7b3dSmrg
748428d7b3dSmrg	if (!bo->tiling && kgem->has_wc_mmap)
749428d7b3dSmrg		return true;
750428d7b3dSmrg
751428d7b3dSmrg	return __kgem_bo_num_pages(bo) <= kgem->aperture_mappable / 4;
752428d7b3dSmrg}
753428d7b3dSmrg
754428d7b3dSmrgstatic inline bool kgem_bo_can_map__cpu(struct kgem *kgem,
755428d7b3dSmrg					struct kgem_bo *bo,
756428d7b3dSmrg					bool write)
757428d7b3dSmrg{
758428d7b3dSmrg	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
759428d7b3dSmrg	assert(bo->refcnt);
760428d7b3dSmrg
761428d7b3dSmrg	if (bo->purged || (bo->scanout && write)) {
762428d7b3dSmrg		DBG(("%s: no, writing to scanout? %d, or is stolen [inaccessible via CPU]? %d\n",
763428d7b3dSmrg		     __FUNCTION__, bo->scanout && write, bo->purged));
764428d7b3dSmrg		return false;
765428d7b3dSmrg	}
766428d7b3dSmrg
767428d7b3dSmrg	if (kgem->has_llc) {
768428d7b3dSmrg		DBG(("%s: yes, has LLC and target is in LLC\n", __FUNCTION__));
769428d7b3dSmrg		return true;
770428d7b3dSmrg	}
771428d7b3dSmrg
772428d7b3dSmrg	DBG(("%s: non-LLC - CPU domain? %d, clean? %d\n",
773428d7b3dSmrg	     __FUNCTION__, bo->domain == DOMAIN_CPU, !write || bo->exec == NULL));
774428d7b3dSmrg	if (bo->domain != DOMAIN_CPU)
775428d7b3dSmrg		return false;
776428d7b3dSmrg
777428d7b3dSmrg	return !write || bo->exec == NULL;
778428d7b3dSmrg}
779428d7b3dSmrg
780428d7b3dSmrg#define KGEM_BUFFER_WRITE	0x1
781428d7b3dSmrg#define KGEM_BUFFER_INPLACE	0x2
782428d7b3dSmrg#define KGEM_BUFFER_LAST	0x4
783428d7b3dSmrg
784428d7b3dSmrg#define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE)
785428d7b3dSmrg
786428d7b3dSmrgstruct kgem_bo *kgem_create_buffer(struct kgem *kgem,
787428d7b3dSmrg				   uint32_t size, uint32_t flags,
788428d7b3dSmrg				   void **ret);
789428d7b3dSmrgstruct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
790428d7b3dSmrg				      int width, int height, int bpp,
791428d7b3dSmrg				      uint32_t flags,
792428d7b3dSmrg				      void **ret);
793428d7b3dSmrgbool kgem_buffer_is_inplace(struct kgem_bo *bo);
794428d7b3dSmrgvoid kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
795428d7b3dSmrg
796428d7b3dSmrgint kgem_is_wedged(struct kgem *kgem);
797428d7b3dSmrgvoid kgem_throttle(struct kgem *kgem);
798428d7b3dSmrg#define MAX_INACTIVE_TIME 10
799428d7b3dSmrgbool kgem_expire_cache(struct kgem *kgem);
800428d7b3dSmrgbool kgem_cleanup_cache(struct kgem *kgem);
801428d7b3dSmrg
802428d7b3dSmrgvoid kgem_clean_scanout_cache(struct kgem *kgem);
803428d7b3dSmrgvoid kgem_clean_large_cache(struct kgem *kgem);
804428d7b3dSmrg
805428d7b3dSmrg#if HAS_DEBUG_FULL
806428d7b3dSmrgvoid __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch);
807428d7b3dSmrg#else
808428d7b3dSmrgstatic inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
809428d7b3dSmrg{
810428d7b3dSmrg	(void)kgem;
811428d7b3dSmrg	(void)nbatch;
812428d7b3dSmrg}
813428d7b3dSmrg#endif
814428d7b3dSmrg
815428d7b3dSmrgstatic inline void
816428d7b3dSmrgmemcpy_to_tiled_x(struct kgem *kgem,
817428d7b3dSmrg		  const void *src, void *dst, int bpp,
818428d7b3dSmrg		  int32_t src_stride, int32_t dst_stride,
819428d7b3dSmrg		  int16_t src_x, int16_t src_y,
820428d7b3dSmrg		  int16_t dst_x, int16_t dst_y,
821428d7b3dSmrg		  uint16_t width, uint16_t height)
822428d7b3dSmrg{
823428d7b3dSmrg	assert(kgem->memcpy_to_tiled_x);
824428d7b3dSmrg	assert(src_x >= 0 && src_y >= 0);
825428d7b3dSmrg	assert(dst_x >= 0 && dst_y >= 0);
826428d7b3dSmrg	assert(8*src_stride >= (src_x+width) * bpp);
827428d7b3dSmrg	assert(8*dst_stride >= (dst_x+width) * bpp);
828428d7b3dSmrg	return kgem->memcpy_to_tiled_x(src, dst, bpp,
829428d7b3dSmrg				       src_stride, dst_stride,
830428d7b3dSmrg				       src_x, src_y,
831428d7b3dSmrg				       dst_x, dst_y,
832428d7b3dSmrg				       width, height);
833428d7b3dSmrg}
834428d7b3dSmrg
835428d7b3dSmrgstatic inline void
836428d7b3dSmrgmemcpy_from_tiled_x(struct kgem *kgem,
837428d7b3dSmrg		    const void *src, void *dst, int bpp,
838428d7b3dSmrg		    int32_t src_stride, int32_t dst_stride,
839428d7b3dSmrg		    int16_t src_x, int16_t src_y,
840428d7b3dSmrg		    int16_t dst_x, int16_t dst_y,
841428d7b3dSmrg		    uint16_t width, uint16_t height)
842428d7b3dSmrg{
843428d7b3dSmrg	assert(kgem->memcpy_from_tiled_x);
844428d7b3dSmrg	assert(src_x >= 0 && src_y >= 0);
845428d7b3dSmrg	assert(dst_x >= 0 && dst_y >= 0);
846428d7b3dSmrg	assert(8*src_stride >= (src_x+width) * bpp);
847428d7b3dSmrg	assert(8*dst_stride >= (dst_x+width) * bpp);
848428d7b3dSmrg	return kgem->memcpy_from_tiled_x(src, dst, bpp,
849428d7b3dSmrg					 src_stride, dst_stride,
850428d7b3dSmrg					 src_x, src_y,
851428d7b3dSmrg					 dst_x, dst_y,
852428d7b3dSmrg					 width, height);
853428d7b3dSmrg}
854428d7b3dSmrg
855428d7b3dSmrgvoid choose_memcpy_tiled_x(struct kgem *kgem, int swizzling);
856428d7b3dSmrg
857428d7b3dSmrg#endif /* KGEM_H */
858