kgem.h revision 03b705cf
1/*
2 * Copyright (c) 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 *    Chris Wilson <chris@chris-wilson.co.uk>
25 *
26 */
27
28#ifndef KGEM_H
29#define KGEM_H
30
31#include <stdint.h>
32#include <stdbool.h>
33#include <stdarg.h>
34
35#include <i915_drm.h>
36
37#include "compiler.h"
38
39#if HAS_DEBUG_FULL
40#define DBG(x) ErrorF x
41#else
42#define DBG(x)
43#endif
44
45struct kgem_bo {
46	struct kgem_request *rq;
47#define RQ(rq) ((struct kgem_request *)((uintptr_t)(rq) & ~3))
48#define RQ_RING(rq) ((uintptr_t)(rq) & 3)
49#define RQ_IS_BLT(rq) (RQ_RING(rq) == KGEM_BLT)
50	struct drm_i915_gem_exec_object2 *exec;
51
52	struct kgem_bo *proxy;
53
54	struct list list;
55	struct list request;
56	struct list vma;
57
58	void *map;
59#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1)
60#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0)
61#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
62
63	struct kgem_bo_binding {
64		struct kgem_bo_binding *next;
65		uint32_t format;
66		uint16_t offset;
67	} binding;
68
69	uint32_t unique_id;
70	uint32_t refcnt;
71	uint32_t handle;
72	uint32_t target_handle;
73	uint32_t presumed_offset;
74	uint32_t delta;
75	union {
76		struct {
77			uint32_t count:27;
78#define PAGE_SIZE 4096
79			uint32_t bucket:5;
80#define NUM_CACHE_BUCKETS 16
81#define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12))
82		} pages;
83		uint32_t bytes;
84	} size;
85	uint32_t pitch : 18; /* max 128k */
86	uint32_t tiling : 2;
87	uint32_t reusable : 1;
88	uint32_t gpu_dirty : 1;
89	uint32_t gtt_dirty : 1;
90	uint32_t domain : 2;
91	uint32_t needs_flush : 1;
92	uint32_t snoop : 1;
93	uint32_t io : 1;
94	uint32_t flush : 1;
95	uint32_t scanout : 1;
96	uint32_t purged : 1;
97};
98#define DOMAIN_NONE 0
99#define DOMAIN_CPU 1
100#define DOMAIN_GTT 2
101#define DOMAIN_GPU 3
102
103struct kgem_request {
104	struct list list;
105	struct kgem_bo *bo;
106	struct list buffers;
107	int ring;
108};
109
110enum {
111	MAP_GTT = 0,
112	MAP_CPU,
113	NUM_MAP_TYPES,
114};
115
116struct kgem {
117	int fd;
118	int wedged;
119	unsigned gen;
120
121	uint32_t unique_id;
122
123	enum kgem_mode {
124		/* order matches I915_EXEC_RING ordering */
125		KGEM_NONE = 0,
126		KGEM_RENDER,
127		KGEM_BSD,
128		KGEM_BLT,
129	} mode, ring;
130
131	struct list flushing;
132	struct list large;
133	struct list large_inactive;
134	struct list active[NUM_CACHE_BUCKETS][3];
135	struct list inactive[NUM_CACHE_BUCKETS];
136	struct list pinned_batches[2];
137	struct list snoop;
138	struct list scanout;
139	struct list batch_buffers, active_buffers;
140
141	struct list requests[2];
142	struct kgem_request *next_request;
143	struct kgem_request static_request;
144
145	struct {
146		struct list inactive[NUM_CACHE_BUCKETS];
147		int16_t count;
148	} vma[NUM_MAP_TYPES];
149
150	uint32_t batch_flags;
151	uint32_t batch_flags_base;
152#define I915_EXEC_SECURE (1<<9)
153#define LOCAL_EXEC_OBJECT_WRITE (1<<2)
154
155	uint16_t nbatch;
156	uint16_t surface;
157	uint16_t nexec;
158	uint16_t nreloc;
159	uint16_t nreloc__self;
160	uint16_t nfence;
161	uint16_t batch_size;
162	uint16_t min_alignment;
163
164	uint32_t flush:1;
165	uint32_t need_expire:1;
166	uint32_t need_purge:1;
167	uint32_t need_retire:1;
168	uint32_t need_throttle:1;
169	uint32_t scanout_busy:1;
170	uint32_t busy:1;
171
172	uint32_t has_create2 :1;
173	uint32_t has_userptr :1;
174	uint32_t has_blt :1;
175	uint32_t has_relaxed_fencing :1;
176	uint32_t has_relaxed_delta :1;
177	uint32_t has_semaphores :1;
178	uint32_t has_secure_batches :1;
179	uint32_t has_pinned_batches :1;
180	uint32_t has_caching :1;
181	uint32_t has_llc :1;
182	uint32_t has_wt :1;
183	uint32_t has_no_reloc :1;
184	uint32_t has_handle_lut :1;
185
186	uint32_t can_blt_cpu :1;
187
188	uint16_t fence_max;
189	uint16_t half_cpu_cache_pages;
190	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
191	uint32_t aperture, aperture_fenced;
192	uint32_t max_upload_tile_size, max_copy_tile_size;
193	uint32_t max_gpu_size, max_cpu_size;
194	uint32_t large_object_size, max_object_size;
195	uint32_t buffer_size;
196
197	void (*context_switch)(struct kgem *kgem, int new_mode);
198	void (*retire)(struct kgem *kgem);
199	void (*expire)(struct kgem *kgem);
200
201	void (*memcpy_to_tiled_x)(const void *src, void *dst, int bpp,
202				  int32_t src_stride, int32_t dst_stride,
203				  int16_t src_x, int16_t src_y,
204				  int16_t dst_x, int16_t dst_y,
205				  uint16_t width, uint16_t height);
206	void (*memcpy_from_tiled_x)(const void *src, void *dst, int bpp,
207				    int32_t src_stride, int32_t dst_stride,
208				    int16_t src_x, int16_t src_y,
209				    int16_t dst_x, int16_t dst_y,
210				    uint16_t width, uint16_t height);
211
212	uint16_t reloc__self[256];
213	uint32_t batch[64*1024-8] page_aligned;
214	struct drm_i915_gem_exec_object2 exec[384] page_aligned;
215	struct drm_i915_gem_relocation_entry reloc[8192] page_aligned;
216
217#ifdef DEBUG_MEMORY
218	struct {
219		int bo_allocs;
220		size_t bo_bytes;
221	} debug_memory;
222#endif
223};
224
225#define KGEM_MAX_DEFERRED_VBO 16
226
227#define KGEM_BATCH_RESERVED 1
228#define KGEM_RELOC_RESERVED (KGEM_MAX_DEFERRED_VBO)
229#define KGEM_EXEC_RESERVED (1+KGEM_MAX_DEFERRED_VBO)
230
231#ifndef ARRAY_SIZE
232#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
233#endif
234
235#define KGEM_BATCH_SIZE(K) ((K)->batch_size-KGEM_BATCH_RESERVED)
236#define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
237#define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
238
239void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen);
240void kgem_reset(struct kgem *kgem);
241
242struct kgem_bo *kgem_create_map(struct kgem *kgem,
243				void *ptr, uint32_t size,
244				bool read_only);
245
246struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
247struct kgem_bo *kgem_create_for_prime(struct kgem *kgem, int name, uint32_t size);
248int kgem_bo_export_to_prime(struct kgem *kgem, struct kgem_bo *bo);
249
250struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags);
251struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
252				  struct kgem_bo *target,
253				  int offset, int length);
254
255struct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
256					 const void *data,
257					 const BoxRec *box,
258					 int stride, int bpp);
259void kgem_proxy_bo_attach(struct kgem_bo *bo, struct kgem_bo **ptr);
260
261int kgem_choose_tiling(struct kgem *kgem,
262		       int tiling, int width, int height, int bpp);
263unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
264#define KGEM_CAN_CREATE_GPU	0x1
265#define KGEM_CAN_CREATE_CPU	0x2
266#define KGEM_CAN_CREATE_LARGE	0x4
267#define KGEM_CAN_CREATE_GTT	0x8
268
269uint32_t kgem_get_unique_id(struct kgem *kgem);
270
271struct kgem_bo *
272kgem_replace_bo(struct kgem *kgem,
273		struct kgem_bo *src,
274		uint32_t width,
275		uint32_t height,
276		uint32_t pitch,
277		uint32_t bpp);
278enum {
279	CREATE_EXACT = 0x1,
280	CREATE_INACTIVE = 0x2,
281	CREATE_CPU_MAP = 0x4,
282	CREATE_GTT_MAP = 0x8,
283	CREATE_SCANOUT = 0x10,
284	CREATE_PRIME = 0x20,
285	CREATE_TEMPORARY = 0x40,
286	CREATE_CACHED = 0x80,
287	CREATE_NO_RETIRE = 0x100,
288	CREATE_NO_THROTTLE = 0x200,
289};
290struct kgem_bo *kgem_create_2d(struct kgem *kgem,
291			       int width,
292			       int height,
293			       int bpp,
294			       int tiling,
295			       uint32_t flags);
296struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
297				   int width,
298				   int height,
299				   int bpp,
300				   uint32_t flags);
301
302uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
303void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
304
305bool kgem_retire(struct kgem *kgem);
306
307bool __kgem_ring_is_idle(struct kgem *kgem, int ring);
308static inline bool kgem_ring_is_idle(struct kgem *kgem, int ring)
309{
310	ring = ring == KGEM_BLT;
311
312	if (list_is_empty(&kgem->requests[ring]))
313		return true;
314
315	return __kgem_ring_is_idle(kgem, ring);
316}
317
318static inline bool kgem_is_idle(struct kgem *kgem)
319{
320	if (!kgem->need_retire)
321		return true;
322
323	return kgem_ring_is_idle(kgem, kgem->ring);
324}
325
326static inline bool __kgem_ring_empty(struct kgem *kgem)
327{
328	return list_is_empty(&kgem->requests[kgem->ring == KGEM_BLT]);
329}
330
331void _kgem_submit(struct kgem *kgem);
332static inline void kgem_submit(struct kgem *kgem)
333{
334	if (kgem->nbatch)
335		_kgem_submit(kgem);
336}
337
338static inline bool kgem_flush(struct kgem *kgem, bool flush)
339{
340	if (kgem->nreloc == 0)
341		return false;
342
343	return (kgem->flush ^ flush) && kgem_ring_is_idle(kgem, kgem->ring);
344}
345
346static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
347{
348	if (bo->exec)
349		_kgem_submit(kgem);
350}
351
352void kgem_scanout_flush(struct kgem *kgem, struct kgem_bo *bo);
353
354static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo)
355{
356	assert(bo->refcnt);
357	bo->refcnt++;
358	return bo;
359}
360
361void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
362static inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
363{
364	assert(bo->refcnt);
365	if (--bo->refcnt == 0)
366		_kgem_bo_destroy(kgem, bo);
367}
368
369void kgem_clear_dirty(struct kgem *kgem);
370
371static inline void kgem_set_mode(struct kgem *kgem,
372				 enum kgem_mode mode,
373				 struct kgem_bo *bo)
374{
375	assert(!kgem->wedged);
376
377#if DEBUG_FLUSH_BATCH
378	kgem_submit(kgem);
379#endif
380
381	if (kgem->nreloc && bo->exec == NULL && kgem_ring_is_idle(kgem, kgem->ring))
382		_kgem_submit(kgem);
383
384	if (kgem->mode == mode)
385		return;
386
387	kgem->context_switch(kgem, mode);
388	kgem->mode = mode;
389}
390
391static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
392{
393	assert(kgem->mode == KGEM_NONE);
394	assert(kgem->nbatch == 0);
395	assert(!kgem->wedged);
396	kgem->context_switch(kgem, mode);
397	kgem->mode = mode;
398}
399
400static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
401{
402	assert(num_dwords > 0);
403	assert(kgem->nbatch < kgem->surface);
404	assert(kgem->surface <= kgem->batch_size);
405	return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface);
406}
407
408static inline bool kgem_check_reloc(struct kgem *kgem, int n)
409{
410	assert(kgem->nreloc <= KGEM_RELOC_SIZE(kgem));
411	return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem));
412}
413
414static inline bool kgem_check_exec(struct kgem *kgem, int n)
415{
416	assert(kgem->nexec <= KGEM_EXEC_SIZE(kgem));
417	return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
418}
419
420static inline bool kgem_check_reloc_and_exec(struct kgem *kgem, int n)
421{
422	return kgem_check_reloc(kgem, n) && kgem_check_exec(kgem, n);
423}
424
425static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
426						  int num_dwords,
427						  int num_surfaces)
428{
429	return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) &&
430		kgem_check_reloc(kgem, num_surfaces) &&
431		kgem_check_exec(kgem, num_surfaces);
432}
433
434static inline uint32_t *kgem_get_batch(struct kgem *kgem)
435{
436	if (kgem->nreloc) {
437		unsigned mode = kgem->mode;
438		_kgem_submit(kgem);
439		_kgem_set_mode(kgem, mode);
440	}
441
442	return kgem->batch + kgem->nbatch;
443}
444
445bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0)));
446bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo);
447bool kgem_check_many_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
448
449#define KGEM_RELOC_FENCED 0x8000
450uint32_t kgem_add_reloc(struct kgem *kgem,
451			uint32_t pos,
452			struct kgem_bo *bo,
453			uint32_t read_write_domains,
454			uint32_t delta);
455
456void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
457void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo);
458void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo);
459void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
460void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
461void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
462void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
463void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write);
464void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
465void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr);
466uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
467
468bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
469		   const void *data, int length);
470
471int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
472void kgem_get_tile_size(struct kgem *kgem, int tiling,
473			int *tile_width, int *tile_height, int *tile_size);
474
475static inline int __kgem_buffer_size(struct kgem_bo *bo)
476{
477	assert(bo->proxy != NULL);
478	return bo->size.bytes;
479}
480
481static inline int __kgem_bo_size(struct kgem_bo *bo)
482{
483	assert(bo->proxy == NULL);
484	return PAGE_SIZE * bo->size.pages.count;
485}
486
487static inline int kgem_bo_size(struct kgem_bo *bo)
488{
489	if (bo->proxy)
490		return __kgem_buffer_size(bo);
491	else
492		return __kgem_bo_size(bo);
493}
494
495static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
496					   struct kgem_bo *bo)
497{
498	int pitch = bo->pitch;
499	if (kgem->gen >= 040 && bo->tiling)
500		pitch /= 4;
501	if (pitch > MAXSHORT) {
502		DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n",
503		     __FUNCTION__, bo->handle, pitch));
504		return false;
505	}
506
507	return true;
508}
509
510static inline bool kgem_bo_can_blt(struct kgem *kgem,
511				   struct kgem_bo *bo)
512{
513	if (bo->tiling == I915_TILING_Y) {
514		DBG(("%s: can not blt to handle=%d, tiling=Y\n",
515		     __FUNCTION__, bo->handle));
516		return false;
517	}
518
519	return kgem_bo_blt_pitch_is_ok(kgem, bo);
520}
521
522static inline bool __kgem_bo_is_mappable(struct kgem *kgem,
523					 struct kgem_bo *bo)
524{
525	if (bo->domain == DOMAIN_GTT)
526		return true;
527
528	if (kgem->gen < 040 && bo->tiling &&
529	    bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1))
530		return false;
531
532	if (kgem->gen == 021 && bo->tiling == I915_TILING_Y)
533		return false;
534
535	if (kgem->has_llc && bo->tiling == I915_TILING_NONE)
536		return true;
537
538	if (!bo->presumed_offset)
539		return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
540
541	return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable;
542}
543
544static inline bool kgem_bo_is_mappable(struct kgem *kgem,
545				       struct kgem_bo *bo)
546{
547	DBG(("%s: domain=%d, offset: %d size: %d\n",
548	     __FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo)));
549	assert(bo->refcnt);
550	return __kgem_bo_is_mappable(kgem, bo);
551}
552
553static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo)
554{
555	DBG(("%s: map=%p, tiling=%d, domain=%d\n",
556	     __FUNCTION__, bo->map, bo->tiling, bo->domain));
557	assert(bo->refcnt);
558
559	if (bo->map == NULL)
560		return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU;
561
562	return IS_CPU_MAP(bo->map) == !bo->tiling;
563}
564
565static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
566{
567	if (kgem_bo_mapped(kgem, bo))
568		return true;
569
570	if (!bo->tiling && (kgem->has_llc || bo->domain == DOMAIN_CPU))
571		return true;
572
573	if (kgem->gen == 021 && bo->tiling == I915_TILING_Y)
574		return false;
575
576	return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
577}
578
579static inline bool kgem_bo_can_map__cpu(struct kgem *kgem,
580					struct kgem_bo *bo,
581					bool write)
582{
583	if (bo->purged || (bo->scanout && write))
584		return false;
585
586	if (kgem->has_llc)
587		return true;
588
589	if (bo->domain != DOMAIN_CPU)
590		return false;
591
592	return !write || bo->exec == NULL;
593}
594
595static inline bool kgem_bo_is_snoop(struct kgem_bo *bo)
596{
597	assert(bo->refcnt);
598	while (bo->proxy)
599		bo = bo->proxy;
600	return bo->snoop;
601}
602
603void kgem_bo_undo(struct kgem *kgem, struct kgem_bo *bo);
604
605bool __kgem_busy(struct kgem *kgem, int handle);
606
607static inline void kgem_bo_mark_busy(struct kgem_bo *bo, int ring)
608{
609	bo->rq = (struct kgem_request *)((uintptr_t)bo->rq | ring);
610}
611
612inline static void __kgem_bo_clear_busy(struct kgem_bo *bo)
613{
614	bo->rq = NULL;
615	list_del(&bo->request);
616
617	bo->domain = DOMAIN_NONE;
618	bo->needs_flush = false;
619	bo->gtt_dirty = false;
620}
621
622static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
623{
624	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
625	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
626	assert(bo->refcnt);
627	return bo->rq;
628}
629
630static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
631{
632	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
633	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
634	assert(bo->refcnt);
635
636	if (bo->exec)
637		return true;
638
639	if (kgem_flush(kgem, bo->flush))
640		kgem_submit(kgem);
641
642	if (bo->rq && !__kgem_busy(kgem, bo->handle))
643		__kgem_bo_clear_busy(bo);
644
645	return kgem_bo_is_busy(bo);
646}
647
648static inline bool kgem_bo_is_render(struct kgem_bo *bo)
649{
650	DBG(("%s: handle=%d, rq? %d [%d]\n", __FUNCTION__,
651	     bo->handle, bo->rq != NULL, RQ_RING(bo->rq)));
652	assert(bo->refcnt);
653	return bo->rq && RQ_RING(bo->rq) == I915_EXEC_RENDER;
654}
655
656static inline void kgem_bo_mark_unreusable(struct kgem_bo *bo)
657{
658	while (bo->proxy) {
659		bo->flush = true;
660		bo = bo->proxy;
661	}
662	bo->flush = true;
663	bo->reusable = false;
664}
665
666static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
667{
668	if (bo == NULL)
669		return false;
670
671	assert(bo->refcnt);
672	return bo->gpu_dirty;
673}
674
675static inline void kgem_bo_unclean(struct kgem *kgem, struct kgem_bo *bo)
676{
677	/* The bo is outside of our control, so presume it is written to */
678	bo->needs_flush = true;
679	if (bo->rq == NULL)
680		bo->rq = (void *)kgem;
681
682	if (bo->domain != DOMAIN_GPU)
683		bo->domain = DOMAIN_NONE;
684}
685
686static inline void __kgem_bo_mark_dirty(struct kgem_bo *bo)
687{
688	DBG(("%s: handle=%d (proxy? %d)\n", __FUNCTION__,
689	     bo->handle, bo->proxy != NULL));
690
691	bo->exec->flags |= LOCAL_EXEC_OBJECT_WRITE;
692	bo->needs_flush = bo->gpu_dirty = true;
693	list_move(&bo->request, &RQ(bo->rq)->buffers);
694}
695
696static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
697{
698	assert(bo->refcnt);
699	do {
700		assert(bo->exec);
701		assert(bo->rq);
702
703		if (bo->gpu_dirty)
704			return;
705
706		__kgem_bo_mark_dirty(bo);
707	} while ((bo = bo->proxy));
708}
709
710#define KGEM_BUFFER_WRITE	0x1
711#define KGEM_BUFFER_INPLACE	0x2
712#define KGEM_BUFFER_LAST	0x4
713
714#define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE)
715
716struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
717				   uint32_t size, uint32_t flags,
718				   void **ret);
719struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
720				      int width, int height, int bpp,
721				      uint32_t flags,
722				      void **ret);
723bool kgem_buffer_is_inplace(struct kgem_bo *bo);
724void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
725
726void kgem_throttle(struct kgem *kgem);
727#define MAX_INACTIVE_TIME 10
728bool kgem_expire_cache(struct kgem *kgem);
729void kgem_purge_cache(struct kgem *kgem);
730void kgem_cleanup_cache(struct kgem *kgem);
731
732void kgem_clean_scanout_cache(struct kgem *kgem);
733void kgem_clean_large_cache(struct kgem *kgem);
734
735#if HAS_DEBUG_FULL
736void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch);
737#else
738static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
739{
740	(void)kgem;
741	(void)nbatch;
742}
743#endif
744
745static inline void
746memcpy_to_tiled_x(struct kgem *kgem,
747		  const void *src, void *dst, int bpp,
748		  int32_t src_stride, int32_t dst_stride,
749		  int16_t src_x, int16_t src_y,
750		  int16_t dst_x, int16_t dst_y,
751		  uint16_t width, uint16_t height)
752{
753	return kgem->memcpy_to_tiled_x(src, dst, bpp,
754				       src_stride, dst_stride,
755				       src_x, src_y,
756				       dst_x, dst_y,
757				       width, height);
758}
759
760static inline void
761memcpy_from_tiled_x(struct kgem *kgem,
762		    const void *src, void *dst, int bpp,
763		    int32_t src_stride, int32_t dst_stride,
764		    int16_t src_x, int16_t src_y,
765		    int16_t dst_x, int16_t dst_y,
766		    uint16_t width, uint16_t height)
767{
768	return kgem->memcpy_from_tiled_x(src, dst, bpp,
769					 src_stride, dst_stride,
770					 src_x, src_y,
771					 dst_x, dst_y,
772					 width, height);
773}
774
775void choose_memcpy_tiled_x(struct kgem *kgem, int swizzling);
776
777#endif /* KGEM_H */
778