gen5_render.c revision 42542f5f
1/*
2 * Copyright © 2006,2008,2011 Intel Corporation
3 * Copyright © 2007 Red Hat, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 *    Wang Zhenyu <zhenyu.z.wang@sna.com>
26 *    Eric Anholt <eric@anholt.net>
27 *    Carl Worth <cworth@redhat.com>
28 *    Keith Packard <keithp@keithp.com>
29 *    Chris Wilson <chris@chris-wilson.co.uk>
30 *
31 */
32
33#ifdef HAVE_CONFIG_H
34#include "config.h"
35#endif
36
37#include "sna.h"
38#include "sna_reg.h"
39#include "sna_render.h"
40#include "sna_render_inline.h"
41#include "sna_video.h"
42
43#include "brw/brw.h"
44#include "gen5_render.h"
45#include "gen4_common.h"
46#include "gen4_source.h"
47#include "gen4_vertex.h"
48
49#define NO_COMPOSITE 0
50#define NO_COMPOSITE_SPANS 0
51
52#define PREFER_BLT_FILL 1
53
54#define DBG_NO_STATE_CACHE 0
55#define DBG_NO_SURFACE_CACHE 0
56
57#define ALWAYS_FLUSH 0
58
59#define MAX_3D_SIZE 8192
60
61#define GEN5_GRF_BLOCKS(nreg)    ((nreg + 15) / 16 - 1)
62
63/* Set up a default static partitioning of the URB, which is supposed to
64 * allow anything we would want to do, at potentially lower performance.
65 */
66#define URB_CS_ENTRY_SIZE     1
67#define URB_CS_ENTRIES	      0
68
69#define URB_VS_ENTRY_SIZE     1
70#define URB_VS_ENTRIES	      256 /* minimum of 8 */
71
72#define URB_GS_ENTRY_SIZE     0
73#define URB_GS_ENTRIES	      0
74
75#define URB_CLIP_ENTRY_SIZE   0
76#define URB_CLIP_ENTRIES      0
77
78#define URB_SF_ENTRY_SIZE     2
79#define URB_SF_ENTRIES	      64
80
81/*
82 * this program computes dA/dx and dA/dy for the texture coordinates along
83 * with the base texture coordinate. It was extracted from the Mesa driver
84 */
85
86#define SF_KERNEL_NUM_GRF  16
87#define SF_MAX_THREADS	   48
88
89#define PS_KERNEL_NUM_GRF   32
90#define PS_MAX_THREADS	    72
91
92static const uint32_t ps_kernel_packed_static[][4] = {
93#include "exa_wm_xy.g5b"
94#include "exa_wm_src_affine.g5b"
95#include "exa_wm_src_sample_argb.g5b"
96#include "exa_wm_yuv_rgb.g5b"
97#include "exa_wm_write.g5b"
98};
99
100static const uint32_t ps_kernel_planar_static[][4] = {
101#include "exa_wm_xy.g5b"
102#include "exa_wm_src_affine.g5b"
103#include "exa_wm_src_sample_planar.g5b"
104#include "exa_wm_yuv_rgb.g5b"
105#include "exa_wm_write.g5b"
106};
107
108#define NOKERNEL(kernel_enum, func, masked) \
109    [kernel_enum] = {func, 0, masked}
110#define KERNEL(kernel_enum, kernel, masked) \
111    [kernel_enum] = {&kernel, sizeof(kernel), masked}
112static const struct wm_kernel_info {
113	const void *data;
114	unsigned int size;
115	bool has_mask;
116} wm_kernels[] = {
117	NOKERNEL(WM_KERNEL, brw_wm_kernel__affine, false),
118	NOKERNEL(WM_KERNEL_P, brw_wm_kernel__projective, false),
119
120	NOKERNEL(WM_KERNEL_MASK, brw_wm_kernel__affine_mask, true),
121	NOKERNEL(WM_KERNEL_MASK_P, brw_wm_kernel__projective_mask, true),
122
123	NOKERNEL(WM_KERNEL_MASKCA, brw_wm_kernel__affine_mask_ca, true),
124	NOKERNEL(WM_KERNEL_MASKCA_P, brw_wm_kernel__projective_mask_ca, true),
125
126	NOKERNEL(WM_KERNEL_MASKSA, brw_wm_kernel__affine_mask_sa, true),
127	NOKERNEL(WM_KERNEL_MASKSA_P, brw_wm_kernel__projective_mask_sa, true),
128
129	NOKERNEL(WM_KERNEL_OPACITY, brw_wm_kernel__affine_opacity, true),
130	NOKERNEL(WM_KERNEL_OPACITY_P, brw_wm_kernel__projective_opacity, true),
131
132	KERNEL(WM_KERNEL_VIDEO_PLANAR, ps_kernel_planar_static, false),
133	KERNEL(WM_KERNEL_VIDEO_PACKED, ps_kernel_packed_static, false),
134};
135#undef KERNEL
136
137static const struct blendinfo {
138	bool src_alpha;
139	uint32_t src_blend;
140	uint32_t dst_blend;
141} gen5_blend_op[] = {
142	/* Clear */	{0, GEN5_BLENDFACTOR_ZERO, GEN5_BLENDFACTOR_ZERO},
143	/* Src */	{0, GEN5_BLENDFACTOR_ONE, GEN5_BLENDFACTOR_ZERO},
144	/* Dst */	{0, GEN5_BLENDFACTOR_ZERO, GEN5_BLENDFACTOR_ONE},
145	/* Over */	{1, GEN5_BLENDFACTOR_ONE, GEN5_BLENDFACTOR_INV_SRC_ALPHA},
146	/* OverReverse */ {0, GEN5_BLENDFACTOR_INV_DST_ALPHA, GEN5_BLENDFACTOR_ONE},
147	/* In */	{0, GEN5_BLENDFACTOR_DST_ALPHA, GEN5_BLENDFACTOR_ZERO},
148	/* InReverse */	{1, GEN5_BLENDFACTOR_ZERO, GEN5_BLENDFACTOR_SRC_ALPHA},
149	/* Out */	{0, GEN5_BLENDFACTOR_INV_DST_ALPHA, GEN5_BLENDFACTOR_ZERO},
150	/* OutReverse */ {1, GEN5_BLENDFACTOR_ZERO, GEN5_BLENDFACTOR_INV_SRC_ALPHA},
151	/* Atop */	{1, GEN5_BLENDFACTOR_DST_ALPHA, GEN5_BLENDFACTOR_INV_SRC_ALPHA},
152	/* AtopReverse */ {1, GEN5_BLENDFACTOR_INV_DST_ALPHA, GEN5_BLENDFACTOR_SRC_ALPHA},
153	/* Xor */	{1, GEN5_BLENDFACTOR_INV_DST_ALPHA, GEN5_BLENDFACTOR_INV_SRC_ALPHA},
154	/* Add */	{0, GEN5_BLENDFACTOR_ONE, GEN5_BLENDFACTOR_ONE},
155};
156
157/**
158 * Highest-valued BLENDFACTOR used in gen5_blend_op.
159 *
160 * This leaves out GEN5_BLENDFACTOR_INV_DST_COLOR,
161 * GEN5_BLENDFACTOR_INV_CONST_{COLOR,ALPHA},
162 * GEN5_BLENDFACTOR_INV_SRC1_{COLOR,ALPHA}
163 */
164#define GEN5_BLENDFACTOR_COUNT (GEN5_BLENDFACTOR_INV_DST_ALPHA + 1)
165
166#define BLEND_OFFSET(s, d) \
167	(((s) * GEN5_BLENDFACTOR_COUNT + (d)) * 64)
168
169#define SAMPLER_OFFSET(sf, se, mf, me, k) \
170	((((((sf) * EXTEND_COUNT + (se)) * FILTER_COUNT + (mf)) * EXTEND_COUNT + (me)) * KERNEL_COUNT + (k)) * 64)
171
172static bool
173gen5_emit_pipelined_pointers(struct sna *sna,
174			     const struct sna_composite_op *op,
175			     int blend, int kernel);
176
177#define OUT_BATCH(v) batch_emit(sna, v)
178#define OUT_VERTEX(x,y) vertex_emit_2s(sna, x,y)
179#define OUT_VERTEX_F(v) vertex_emit(sna, v)
180
181static inline bool too_large(int width, int height)
182{
183	return width > MAX_3D_SIZE || height > MAX_3D_SIZE;
184}
185
186static int
187gen5_choose_composite_kernel(int op, bool has_mask, bool is_ca, bool is_affine)
188{
189	int base;
190
191	if (has_mask) {
192		if (is_ca) {
193			if (gen5_blend_op[op].src_alpha)
194				base = WM_KERNEL_MASKSA;
195			else
196				base = WM_KERNEL_MASKCA;
197		} else
198			base = WM_KERNEL_MASK;
199	} else
200		base = WM_KERNEL;
201
202	return base + !is_affine;
203}
204
205static bool gen5_magic_ca_pass(struct sna *sna,
206			       const struct sna_composite_op *op)
207{
208	struct gen5_render_state *state = &sna->render_state.gen5;
209
210	if (!op->need_magic_ca_pass)
211		return false;
212
213	assert(sna->render.vertex_index > sna->render.vertex_start);
214
215	DBG(("%s: CA fixup\n", __FUNCTION__));
216	assert(op->mask.bo != NULL);
217	assert(op->has_component_alpha);
218
219	gen5_emit_pipelined_pointers
220		(sna, op, PictOpAdd,
221		 gen5_choose_composite_kernel(PictOpAdd,
222					      true, true, op->is_affine));
223
224	OUT_BATCH(GEN5_3DPRIMITIVE |
225		  GEN5_3DPRIMITIVE_VERTEX_SEQUENTIAL |
226		  (_3DPRIM_RECTLIST << GEN5_3DPRIMITIVE_TOPOLOGY_SHIFT) |
227		  (0 << 9) |
228		  4);
229	OUT_BATCH(sna->render.vertex_index - sna->render.vertex_start);
230	OUT_BATCH(sna->render.vertex_start);
231	OUT_BATCH(1);	/* single instance */
232	OUT_BATCH(0);	/* start instance location */
233	OUT_BATCH(0);	/* index buffer offset, ignored */
234
235	state->last_primitive = sna->kgem.nbatch;
236	return true;
237}
238
239static uint32_t gen5_get_blend(int op,
240			       bool has_component_alpha,
241			       uint32_t dst_format)
242{
243	uint32_t src, dst;
244
245	src = gen5_blend_op[op].src_blend;
246	dst = gen5_blend_op[op].dst_blend;
247
248	/* If there's no dst alpha channel, adjust the blend op so that we'll treat
249	 * it as always 1.
250	 */
251	if (PICT_FORMAT_A(dst_format) == 0) {
252		if (src == GEN5_BLENDFACTOR_DST_ALPHA)
253			src = GEN5_BLENDFACTOR_ONE;
254		else if (src == GEN5_BLENDFACTOR_INV_DST_ALPHA)
255			src = GEN5_BLENDFACTOR_ZERO;
256	}
257
258	/* If the source alpha is being used, then we should only be in a
259	 * case where the source blend factor is 0, and the source blend
260	 * value is the mask channels multiplied by the source picture's alpha.
261	 */
262	if (has_component_alpha && gen5_blend_op[op].src_alpha) {
263		if (dst == GEN5_BLENDFACTOR_SRC_ALPHA)
264			dst = GEN5_BLENDFACTOR_SRC_COLOR;
265		else if (dst == GEN5_BLENDFACTOR_INV_SRC_ALPHA)
266			dst = GEN5_BLENDFACTOR_INV_SRC_COLOR;
267	}
268
269	DBG(("blend op=%d, dst=%x [A=%d] => src=%d, dst=%d => offset=%x\n",
270	     op, dst_format, PICT_FORMAT_A(dst_format),
271	     src, dst, BLEND_OFFSET(src, dst)));
272	return BLEND_OFFSET(src, dst);
273}
274
275static uint32_t gen5_get_card_format(PictFormat format)
276{
277	switch (format) {
278	default:
279		return -1;
280	case PICT_a8r8g8b8:
281		return GEN5_SURFACEFORMAT_B8G8R8A8_UNORM;
282	case PICT_x8r8g8b8:
283		return GEN5_SURFACEFORMAT_B8G8R8X8_UNORM;
284	case PICT_a8b8g8r8:
285		return GEN5_SURFACEFORMAT_R8G8B8A8_UNORM;
286	case PICT_x8b8g8r8:
287		return GEN5_SURFACEFORMAT_R8G8B8X8_UNORM;
288#ifdef PICT_a2r10g10b10
289	case PICT_a2r10g10b10:
290		return GEN5_SURFACEFORMAT_B10G10R10A2_UNORM;
291	case PICT_x2r10g10b10:
292		return GEN5_SURFACEFORMAT_B10G10R10X2_UNORM;
293#endif
294	case PICT_r8g8b8:
295		return GEN5_SURFACEFORMAT_R8G8B8_UNORM;
296	case PICT_r5g6b5:
297		return GEN5_SURFACEFORMAT_B5G6R5_UNORM;
298	case PICT_a1r5g5b5:
299		return GEN5_SURFACEFORMAT_B5G5R5A1_UNORM;
300	case PICT_a8:
301		return GEN5_SURFACEFORMAT_A8_UNORM;
302	case PICT_a4r4g4b4:
303		return GEN5_SURFACEFORMAT_B4G4R4A4_UNORM;
304	}
305}
306
307static uint32_t gen5_get_dest_format(PictFormat format)
308{
309	switch (format) {
310	default:
311		return -1;
312	case PICT_a8r8g8b8:
313	case PICT_x8r8g8b8:
314		return GEN5_SURFACEFORMAT_B8G8R8A8_UNORM;
315	case PICT_a8b8g8r8:
316	case PICT_x8b8g8r8:
317		return GEN5_SURFACEFORMAT_R8G8B8A8_UNORM;
318#ifdef PICT_a2r10g10b10
319	case PICT_a2r10g10b10:
320	case PICT_x2r10g10b10:
321		return GEN5_SURFACEFORMAT_B10G10R10A2_UNORM;
322#endif
323	case PICT_r5g6b5:
324		return GEN5_SURFACEFORMAT_B5G6R5_UNORM;
325	case PICT_x1r5g5b5:
326	case PICT_a1r5g5b5:
327		return GEN5_SURFACEFORMAT_B5G5R5A1_UNORM;
328	case PICT_a8:
329		return GEN5_SURFACEFORMAT_A8_UNORM;
330	case PICT_a4r4g4b4:
331	case PICT_x4r4g4b4:
332		return GEN5_SURFACEFORMAT_B4G4R4A4_UNORM;
333	}
334}
335
336static bool gen5_check_dst_format(PictFormat format)
337{
338	if (gen5_get_dest_format(format) != -1)
339		return true;
340
341	DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
342	return false;
343}
344
345static bool gen5_check_format(uint32_t format)
346{
347	if (gen5_get_card_format(format) != -1)
348		return true;
349
350	DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
351	return false;
352}
353
354typedef struct gen5_surface_state_padded {
355	struct gen5_surface_state state;
356	char pad[32 - sizeof(struct gen5_surface_state)];
357} gen5_surface_state_padded;
358
359static void null_create(struct sna_static_stream *stream)
360{
361	/* A bunch of zeros useful for legacy border color and depth-stencil */
362	sna_static_stream_map(stream, 64, 64);
363}
364
365static void
366sampler_state_init(struct gen5_sampler_state *sampler_state,
367		   sampler_filter_t filter,
368		   sampler_extend_t extend)
369{
370	sampler_state->ss0.lod_preclamp = 1;	/* GL mode */
371
372	/* We use the legacy mode to get the semantics specified by
373	 * the Render extension. */
374	sampler_state->ss0.border_color_mode = GEN5_BORDER_COLOR_MODE_LEGACY;
375
376	switch (filter) {
377	default:
378	case SAMPLER_FILTER_NEAREST:
379		sampler_state->ss0.min_filter = GEN5_MAPFILTER_NEAREST;
380		sampler_state->ss0.mag_filter = GEN5_MAPFILTER_NEAREST;
381		break;
382	case SAMPLER_FILTER_BILINEAR:
383		sampler_state->ss0.min_filter = GEN5_MAPFILTER_LINEAR;
384		sampler_state->ss0.mag_filter = GEN5_MAPFILTER_LINEAR;
385		break;
386	}
387
388	switch (extend) {
389	default:
390	case SAMPLER_EXTEND_NONE:
391		sampler_state->ss1.r_wrap_mode = GEN5_TEXCOORDMODE_CLAMP_BORDER;
392		sampler_state->ss1.s_wrap_mode = GEN5_TEXCOORDMODE_CLAMP_BORDER;
393		sampler_state->ss1.t_wrap_mode = GEN5_TEXCOORDMODE_CLAMP_BORDER;
394		break;
395	case SAMPLER_EXTEND_REPEAT:
396		sampler_state->ss1.r_wrap_mode = GEN5_TEXCOORDMODE_WRAP;
397		sampler_state->ss1.s_wrap_mode = GEN5_TEXCOORDMODE_WRAP;
398		sampler_state->ss1.t_wrap_mode = GEN5_TEXCOORDMODE_WRAP;
399		break;
400	case SAMPLER_EXTEND_PAD:
401		sampler_state->ss1.r_wrap_mode = GEN5_TEXCOORDMODE_CLAMP;
402		sampler_state->ss1.s_wrap_mode = GEN5_TEXCOORDMODE_CLAMP;
403		sampler_state->ss1.t_wrap_mode = GEN5_TEXCOORDMODE_CLAMP;
404		break;
405	case SAMPLER_EXTEND_REFLECT:
406		sampler_state->ss1.r_wrap_mode = GEN5_TEXCOORDMODE_MIRROR;
407		sampler_state->ss1.s_wrap_mode = GEN5_TEXCOORDMODE_MIRROR;
408		sampler_state->ss1.t_wrap_mode = GEN5_TEXCOORDMODE_MIRROR;
409		break;
410	}
411}
412
413static uint32_t gen5_filter(uint32_t filter)
414{
415	switch (filter) {
416	default:
417		assert(0);
418	case PictFilterNearest:
419		return SAMPLER_FILTER_NEAREST;
420	case PictFilterBilinear:
421		return SAMPLER_FILTER_BILINEAR;
422	}
423}
424
425static uint32_t gen5_check_filter(PicturePtr picture)
426{
427	switch (picture->filter) {
428	case PictFilterNearest:
429	case PictFilterBilinear:
430		return true;
431	default:
432		DBG(("%s: unknown filter: %x\n", __FUNCTION__, picture->filter));
433		return false;
434	}
435}
436
437static uint32_t gen5_repeat(uint32_t repeat)
438{
439	switch (repeat) {
440	default:
441		assert(0);
442	case RepeatNone:
443		return SAMPLER_EXTEND_NONE;
444	case RepeatNormal:
445		return SAMPLER_EXTEND_REPEAT;
446	case RepeatPad:
447		return SAMPLER_EXTEND_PAD;
448	case RepeatReflect:
449		return SAMPLER_EXTEND_REFLECT;
450	}
451}
452
453static bool gen5_check_repeat(PicturePtr picture)
454{
455	if (!picture->repeat)
456		return true;
457
458	switch (picture->repeatType) {
459	case RepeatNone:
460	case RepeatNormal:
461	case RepeatPad:
462	case RepeatReflect:
463		return true;
464	default:
465		DBG(("%s: unknown repeat: %x\n",
466		     __FUNCTION__, picture->repeatType));
467		return false;
468	}
469}
470
471static uint32_t
472gen5_tiling_bits(uint32_t tiling)
473{
474	switch (tiling) {
475	default: assert(0);
476	case I915_TILING_NONE: return 0;
477	case I915_TILING_X: return GEN5_SURFACE_TILED;
478	case I915_TILING_Y: return GEN5_SURFACE_TILED | GEN5_SURFACE_TILED_Y;
479	}
480}
481
482/**
483 * Sets up the common fields for a surface state buffer for the given
484 * picture in the given surface state buffer.
485 */
486static uint32_t
487gen5_bind_bo(struct sna *sna,
488	     struct kgem_bo *bo,
489	     uint32_t width,
490	     uint32_t height,
491	     uint32_t format,
492	     bool is_dst)
493{
494	uint32_t domains;
495	uint16_t offset;
496	uint32_t *ss;
497
498	/* After the first bind, we manage the cache domains within the batch */
499	if (!DBG_NO_SURFACE_CACHE) {
500		offset = kgem_bo_get_binding(bo, format | is_dst << 31);
501		if (offset) {
502			if (is_dst)
503				kgem_bo_mark_dirty(bo);
504			assert(offset >= sna->kgem.surface);
505			return offset * sizeof(uint32_t);
506		}
507	}
508
509	offset = sna->kgem.surface -=
510		sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t);
511	ss = sna->kgem.batch + offset;
512
513	ss[0] = (GEN5_SURFACE_2D << GEN5_SURFACE_TYPE_SHIFT |
514		 GEN5_SURFACE_BLEND_ENABLED |
515		 format << GEN5_SURFACE_FORMAT_SHIFT);
516
517	if (is_dst) {
518		ss[0] |= GEN5_SURFACE_RC_READ_WRITE;
519		domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
520	} else
521		domains = I915_GEM_DOMAIN_SAMPLER << 16;
522	ss[1] = kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
523
524	ss[2] = ((width - 1)  << GEN5_SURFACE_WIDTH_SHIFT |
525		 (height - 1) << GEN5_SURFACE_HEIGHT_SHIFT);
526	ss[3] = (gen5_tiling_bits(bo->tiling) |
527		 (bo->pitch - 1) << GEN5_SURFACE_PITCH_SHIFT);
528	ss[4] = 0;
529	ss[5] = 0;
530
531	kgem_bo_set_binding(bo, format | is_dst << 31, offset);
532
533	DBG(("[%x] bind bo(handle=%d, addr=%d), format=%d, width=%d, height=%d, pitch=%d, tiling=%d -> %s\n",
534	     offset, bo->handle, ss[1],
535	     format, width, height, bo->pitch, bo->tiling,
536	     domains & 0xffff ? "render" : "sampler"));
537
538	return offset * sizeof(uint32_t);
539}
540
541static void gen5_emit_vertex_buffer(struct sna *sna,
542				    const struct sna_composite_op *op)
543{
544	int id = op->u.gen5.ve_id;
545
546	assert((sna->render.vb_id & (1 << id)) == 0);
547
548	OUT_BATCH(GEN5_3DSTATE_VERTEX_BUFFERS | 3);
549	OUT_BATCH(id << VB0_BUFFER_INDEX_SHIFT | VB0_VERTEXDATA |
550		  (4*op->floats_per_vertex << VB0_BUFFER_PITCH_SHIFT));
551	assert(sna->render.nvertex_reloc < ARRAY_SIZE(sna->render.vertex_reloc));
552	sna->render.vertex_reloc[sna->render.nvertex_reloc++] = sna->kgem.nbatch;
553	OUT_BATCH(0);
554	OUT_BATCH(~0); /* max address: disabled */
555	OUT_BATCH(0);
556
557	sna->render.vb_id |= 1 << id;
558}
559
560static void gen5_emit_primitive(struct sna *sna)
561{
562	if (sna->kgem.nbatch == sna->render_state.gen5.last_primitive) {
563		sna->render.vertex_offset = sna->kgem.nbatch - 5;
564		return;
565	}
566
567	OUT_BATCH(GEN5_3DPRIMITIVE |
568		  GEN5_3DPRIMITIVE_VERTEX_SEQUENTIAL |
569		  (_3DPRIM_RECTLIST << GEN5_3DPRIMITIVE_TOPOLOGY_SHIFT) |
570		  (0 << 9) |
571		  4);
572	sna->render.vertex_offset = sna->kgem.nbatch;
573	OUT_BATCH(0);	/* vertex count, to be filled in later */
574	OUT_BATCH(sna->render.vertex_index);
575	OUT_BATCH(1);	/* single instance */
576	OUT_BATCH(0);	/* start instance location */
577	OUT_BATCH(0);	/* index buffer offset, ignored */
578	sna->render.vertex_start = sna->render.vertex_index;
579
580	sna->render_state.gen5.last_primitive = sna->kgem.nbatch;
581}
582
583static bool gen5_rectangle_begin(struct sna *sna,
584				 const struct sna_composite_op *op)
585{
586	int id = op->u.gen5.ve_id;
587	int ndwords;
588
589	if (sna_vertex_wait__locked(&sna->render) && sna->render.vertex_offset)
590		return true;
591
592	ndwords = op->need_magic_ca_pass ? 20 : 6;
593	if ((sna->render.vb_id & (1 << id)) == 0)
594		ndwords += 5;
595
596	if (!kgem_check_batch(&sna->kgem, ndwords))
597		return false;
598
599	if ((sna->render.vb_id & (1 << id)) == 0)
600		gen5_emit_vertex_buffer(sna, op);
601	if (sna->render.vertex_offset == 0)
602		gen5_emit_primitive(sna);
603
604	return true;
605}
606
607static int gen5_get_rectangles__flush(struct sna *sna,
608				      const struct sna_composite_op *op)
609{
610	/* Preventing discarding new vbo after lock contention */
611	if (sna_vertex_wait__locked(&sna->render)) {
612		int rem = vertex_space(sna);
613		if (rem > op->floats_per_rect)
614			return rem;
615	}
616
617	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 40 : 6))
618		return 0;
619	if (!kgem_check_reloc_and_exec(&sna->kgem, 2))
620		return 0;
621
622	if (sna->render.vertex_offset) {
623		gen4_vertex_flush(sna);
624		if (gen5_magic_ca_pass(sna, op))
625			gen5_emit_pipelined_pointers(sna, op, op->op,
626						     op->u.gen5.wm_kernel);
627	}
628
629	return gen4_vertex_finish(sna);
630}
631
632inline static int gen5_get_rectangles(struct sna *sna,
633				      const struct sna_composite_op *op,
634				      int want,
635				      void (*emit_state)(struct sna *sna,
636							 const struct sna_composite_op *op))
637{
638	int rem;
639
640	assert(want);
641
642start:
643	rem = vertex_space(sna);
644	if (unlikely(rem < op->floats_per_rect)) {
645		DBG(("flushing vbo for %s: %d < %d\n",
646		     __FUNCTION__, rem, op->floats_per_rect));
647		rem = gen5_get_rectangles__flush(sna, op);
648		if (unlikely (rem == 0))
649			goto flush;
650	}
651
652	if (unlikely(sna->render.vertex_offset == 0)) {
653		if (!gen5_rectangle_begin(sna, op))
654			goto flush;
655		else
656			goto start;
657	}
658
659	assert(rem <= vertex_space(sna));
660	assert(op->floats_per_rect <= rem);
661	if (want > 1 && want * op->floats_per_rect > rem)
662		want = rem / op->floats_per_rect;
663
664	sna->render.vertex_index += 3*want;
665	return want;
666
667flush:
668	if (sna->render.vertex_offset) {
669		gen4_vertex_flush(sna);
670		gen5_magic_ca_pass(sna, op);
671	}
672	sna_vertex_wait__locked(&sna->render);
673	_kgem_submit(&sna->kgem);
674	emit_state(sna, op);
675	goto start;
676}
677
678static uint32_t *
679gen5_composite_get_binding_table(struct sna *sna,
680				 uint16_t *offset)
681{
682	sna->kgem.surface -=
683		sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t);
684
685	DBG(("%s(%x)\n", __FUNCTION__, 4*sna->kgem.surface));
686
687	/* Clear all surplus entries to zero in case of prefetch */
688	*offset = sna->kgem.surface;
689	return memset(sna->kgem.batch + sna->kgem.surface,
690		      0, sizeof(struct gen5_surface_state_padded));
691}
692
693static void
694gen5_emit_urb(struct sna *sna)
695{
696	int urb_vs_start, urb_vs_size;
697	int urb_gs_start, urb_gs_size;
698	int urb_clip_start, urb_clip_size;
699	int urb_sf_start, urb_sf_size;
700	int urb_cs_start, urb_cs_size;
701
702	urb_vs_start = 0;
703	urb_vs_size = URB_VS_ENTRIES * URB_VS_ENTRY_SIZE;
704	urb_gs_start = urb_vs_start + urb_vs_size;
705	urb_gs_size = URB_GS_ENTRIES * URB_GS_ENTRY_SIZE;
706	urb_clip_start = urb_gs_start + urb_gs_size;
707	urb_clip_size = URB_CLIP_ENTRIES * URB_CLIP_ENTRY_SIZE;
708	urb_sf_start = urb_clip_start + urb_clip_size;
709	urb_sf_size = URB_SF_ENTRIES * URB_SF_ENTRY_SIZE;
710	urb_cs_start = urb_sf_start + urb_sf_size;
711	urb_cs_size = URB_CS_ENTRIES * URB_CS_ENTRY_SIZE;
712
713	OUT_BATCH(GEN5_URB_FENCE |
714		  UF0_CS_REALLOC |
715		  UF0_SF_REALLOC |
716		  UF0_CLIP_REALLOC |
717		  UF0_GS_REALLOC |
718		  UF0_VS_REALLOC |
719		  1);
720	OUT_BATCH(((urb_clip_start + urb_clip_size) << UF1_CLIP_FENCE_SHIFT) |
721		  ((urb_gs_start + urb_gs_size) << UF1_GS_FENCE_SHIFT) |
722		  ((urb_vs_start + urb_vs_size) << UF1_VS_FENCE_SHIFT));
723	OUT_BATCH(((urb_cs_start + urb_cs_size) << UF2_CS_FENCE_SHIFT) |
724		  ((urb_sf_start + urb_sf_size) << UF2_SF_FENCE_SHIFT));
725
726	/* Constant buffer state */
727	OUT_BATCH(GEN5_CS_URB_STATE | 0);
728	OUT_BATCH((URB_CS_ENTRY_SIZE - 1) << 4 | URB_CS_ENTRIES << 0);
729}
730
731static void
732gen5_emit_state_base_address(struct sna *sna)
733{
734	assert(sna->render_state.gen5.general_bo->proxy == NULL);
735	OUT_BATCH(GEN5_STATE_BASE_ADDRESS | 6);
736	OUT_BATCH(kgem_add_reloc(&sna->kgem, /* general */
737				 sna->kgem.nbatch,
738				 sna->render_state.gen5.general_bo,
739				 I915_GEM_DOMAIN_INSTRUCTION << 16,
740				 BASE_ADDRESS_MODIFY));
741	OUT_BATCH(kgem_add_reloc(&sna->kgem, /* surface */
742				 sna->kgem.nbatch,
743				 NULL,
744				 I915_GEM_DOMAIN_INSTRUCTION << 16,
745				 BASE_ADDRESS_MODIFY));
746	OUT_BATCH(0); /* media */
747	OUT_BATCH(kgem_add_reloc(&sna->kgem, /* instruction */
748				 sna->kgem.nbatch,
749				 sna->render_state.gen5.general_bo,
750				 I915_GEM_DOMAIN_INSTRUCTION << 16,
751				 BASE_ADDRESS_MODIFY));
752
753	/* upper bounds, all disabled */
754	OUT_BATCH(BASE_ADDRESS_MODIFY);
755	OUT_BATCH(0);
756	OUT_BATCH(BASE_ADDRESS_MODIFY);
757}
758
759static void
760gen5_emit_invariant(struct sna *sna)
761{
762	/* Ironlake errata workaround: Before disabling the clipper,
763	 * you have to MI_FLUSH to get the pipeline idle.
764	 *
765	 * However, the kernel flushes the pipeline between batches,
766	 * so we should be safe....
767	 *
768	 * On the other hand, after using BLT we must use a non-pipelined
769	 * operation...
770	 */
771	if (sna->kgem.nreloc)
772		OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH);
773
774	OUT_BATCH(GEN5_PIPELINE_SELECT | PIPELINE_SELECT_3D);
775
776	gen5_emit_state_base_address(sna);
777
778	sna->render_state.gen5.needs_invariant = false;
779}
780
781static void
782gen5_get_batch(struct sna *sna, const struct sna_composite_op *op)
783{
784	kgem_set_mode(&sna->kgem, KGEM_RENDER, op->dst.bo);
785
786	if (!kgem_check_batch_with_surfaces(&sna->kgem, 150, 4)) {
787		DBG(("%s: flushing batch: %d < %d+%d\n",
788		     __FUNCTION__, sna->kgem.surface - sna->kgem.nbatch,
789		     150, 4*8));
790		kgem_submit(&sna->kgem);
791		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
792	}
793
794	if (sna->render_state.gen5.needs_invariant)
795		gen5_emit_invariant(sna);
796}
797
798static void
799gen5_align_vertex(struct sna *sna, const struct sna_composite_op *op)
800{
801	assert(op->floats_per_rect == 3*op->floats_per_vertex);
802	if (op->floats_per_vertex != sna->render_state.gen5.floats_per_vertex) {
803		DBG(("aligning vertex: was %d, now %d floats per vertex\n",
804		     sna->render_state.gen5.floats_per_vertex,
805		     op->floats_per_vertex));
806		gen4_vertex_align(sna, op);
807		sna->render_state.gen5.floats_per_vertex = op->floats_per_vertex;
808	}
809}
810
811static void
812gen5_emit_binding_table(struct sna *sna, uint16_t offset)
813{
814	if (!DBG_NO_STATE_CACHE &&
815	    sna->render_state.gen5.surface_table == offset)
816		return;
817
818	sna->render_state.gen5.surface_table = offset;
819
820	/* Binding table pointers */
821	OUT_BATCH(GEN5_3DSTATE_BINDING_TABLE_POINTERS | 4);
822	OUT_BATCH(0);		/* vs */
823	OUT_BATCH(0);		/* gs */
824	OUT_BATCH(0);		/* clip */
825	OUT_BATCH(0);		/* sf */
826	/* Only the PS uses the binding table */
827	OUT_BATCH(offset*4);
828}
829
830static bool
831gen5_emit_pipelined_pointers(struct sna *sna,
832			     const struct sna_composite_op *op,
833			     int blend, int kernel)
834{
835	uint16_t sp, bp;
836	uint32_t key;
837
838	DBG(("%s: has_mask=%d, src=(%d, %d), mask=(%d, %d),kernel=%d, blend=%d, ca=%d, format=%x\n",
839	     __FUNCTION__, op->u.gen5.ve_id & 2,
840	     op->src.filter, op->src.repeat,
841	     op->mask.filter, op->mask.repeat,
842	     kernel, blend, op->has_component_alpha, (int)op->dst.format));
843
844	sp = SAMPLER_OFFSET(op->src.filter, op->src.repeat,
845			    op->mask.filter, op->mask.repeat,
846			    kernel);
847	bp = gen5_get_blend(blend, op->has_component_alpha, op->dst.format);
848
849	key = sp | (uint32_t)bp << 16 | (op->mask.bo != NULL) << 31;
850	DBG(("%s: sp=%d, bp=%d, key=%08x (current sp=%d, bp=%d, key=%08x)\n",
851	     __FUNCTION__, sp, bp, key,
852	     sna->render_state.gen5.last_pipelined_pointers & 0xffff,
853	     (sna->render_state.gen5.last_pipelined_pointers >> 16) & 0x7fff,
854	     sna->render_state.gen5.last_pipelined_pointers));
855	if (key == sna->render_state.gen5.last_pipelined_pointers)
856		return false;
857
858	OUT_BATCH(GEN5_3DSTATE_PIPELINED_POINTERS | 5);
859	OUT_BATCH(sna->render_state.gen5.vs);
860	OUT_BATCH(GEN5_GS_DISABLE); /* passthrough */
861	OUT_BATCH(GEN5_CLIP_DISABLE); /* passthrough */
862	OUT_BATCH(sna->render_state.gen5.sf[op->mask.bo != NULL]);
863	OUT_BATCH(sna->render_state.gen5.wm + sp);
864	OUT_BATCH(sna->render_state.gen5.cc + bp);
865
866	bp = (sna->render_state.gen5.last_pipelined_pointers & 0x7fff0000) != ((uint32_t)bp << 16);
867	sna->render_state.gen5.last_pipelined_pointers = key;
868
869	gen5_emit_urb(sna);
870
871	return bp;
872}
873
874static bool
875gen5_emit_drawing_rectangle(struct sna *sna, const struct sna_composite_op *op)
876{
877	uint32_t limit = (op->dst.height - 1) << 16 | (op->dst.width - 1);
878	uint32_t offset = (uint16_t)op->dst.y << 16 | (uint16_t)op->dst.x;
879
880	assert(!too_large(op->dst.x, op->dst.y));
881	assert(!too_large(op->dst.width, op->dst.height));
882
883	if (!DBG_NO_STATE_CACHE &&
884	    sna->render_state.gen5.drawrect_limit == limit &&
885	    sna->render_state.gen5.drawrect_offset == offset)
886		return false;
887
888	sna->render_state.gen5.drawrect_offset = offset;
889	sna->render_state.gen5.drawrect_limit = limit;
890
891	OUT_BATCH(GEN5_3DSTATE_DRAWING_RECTANGLE | (4 - 2));
892	OUT_BATCH(0x00000000);
893	OUT_BATCH(limit);
894	OUT_BATCH(offset);
895	return true;
896}
897
898static void
899gen5_emit_vertex_elements(struct sna *sna,
900			  const struct sna_composite_op *op)
901{
902	/*
903	 * vertex data in vertex buffer
904	 *    position: (x, y)
905	 *    texture coordinate 0: (u0, v0) if (is_affine is true) else (u0, v0, w0)
906	 *    texture coordinate 1 if (has_mask is true): same as above
907	 */
908	struct gen5_render_state *render = &sna->render_state.gen5;
909	int id = op->u.gen5.ve_id;
910	bool has_mask = id >> 2;
911	uint32_t format, dw;
912
913	if (!DBG_NO_STATE_CACHE && render->ve_id == id)
914		return;
915
916	DBG(("%s: changing %d -> %d\n", __FUNCTION__, render->ve_id, id));
917	render->ve_id = id;
918
919	/* The VUE layout
920	 *    dword 0-3: pad (0.0, 0.0, 0.0. 0.0)
921	 *    dword 4-7: position (x, y, 1.0, 1.0),
922	 *    dword 8-11: texture coordinate 0 (u0, v0, w0, 1.0)
923	 *    dword 12-15: texture coordinate 1 (u1, v1, w1, 1.0)
924	 *
925	 * dword 4-15 are fetched from vertex buffer
926	 */
927	OUT_BATCH(GEN5_3DSTATE_VERTEX_ELEMENTS |
928		((2 * (has_mask ? 4 : 3)) + 1 - 2));
929
930	OUT_BATCH((id << VE0_VERTEX_BUFFER_INDEX_SHIFT) | VE0_VALID |
931		  (GEN5_SURFACEFORMAT_R32G32B32A32_FLOAT << VE0_FORMAT_SHIFT) |
932		  (0 << VE0_OFFSET_SHIFT));
933	OUT_BATCH((VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_0_SHIFT) |
934		  (VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT) |
935		  (VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT) |
936		  (VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_3_SHIFT));
937
938	/* x,y */
939	OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
940		  GEN5_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
941		  0 << VE0_OFFSET_SHIFT);
942	OUT_BATCH(VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
943		  VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
944		  VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT |
945		  VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
946
947	/* u0, v0, w0 */
948	DBG(("%s: id=%d, first channel %d floats, offset=4b\n", __FUNCTION__,
949	     id, id & 3));
950	dw = VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT;
951	switch (id & 3) {
952	default:
953		assert(0);
954	case 0:
955		format = GEN5_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT;
956		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
957		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
958		dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
959		break;
960	case 1:
961		format = GEN5_SURFACEFORMAT_R32_FLOAT << VE0_FORMAT_SHIFT;
962		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
963		dw |= VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT;
964		dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
965		break;
966	case 2:
967		format = GEN5_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT;
968		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
969		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
970		dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
971		break;
972	case 3:
973		format = GEN5_SURFACEFORMAT_R32G32B32_FLOAT << VE0_FORMAT_SHIFT;
974		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
975		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
976		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_2_SHIFT;
977		break;
978	}
979	OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
980		  format | 4 << VE0_OFFSET_SHIFT);
981	OUT_BATCH(dw);
982
983	/* u1, v1, w1 */
984	if (has_mask) {
985		unsigned offset = 4 + ((id & 3) ?: 1) * sizeof(float);
986		DBG(("%s: id=%x, second channel %d floats, offset=%db\n", __FUNCTION__,
987		     id, id >> 2, offset));
988		dw = VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT;
989		switch (id >> 2) {
990		case 1:
991			format = GEN5_SURFACEFORMAT_R32_FLOAT << VE0_FORMAT_SHIFT;
992			dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
993			dw |= VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT;
994			dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
995			break;
996		default:
997			assert(0);
998		case 2:
999			format = GEN5_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT;
1000			dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
1001			dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
1002			dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
1003			break;
1004		case 3:
1005			format = GEN5_SURFACEFORMAT_R32G32B32_FLOAT << VE0_FORMAT_SHIFT;
1006			dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
1007			dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
1008			dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_2_SHIFT;
1009			break;
1010		}
1011		OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
1012			  format | offset << VE0_OFFSET_SHIFT);
1013		OUT_BATCH(dw);
1014	}
1015}
1016
1017inline static void
1018gen5_emit_pipe_flush(struct sna *sna)
1019{
1020#if 1
1021	OUT_BATCH(GEN5_PIPE_CONTROL |
1022		  GEN5_PIPE_CONTROL_WC_FLUSH |
1023		  (4 - 2));
1024	OUT_BATCH(0);
1025	OUT_BATCH(0);
1026	OUT_BATCH(0);
1027#else
1028	OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH);
1029#endif
1030}
1031
1032static void
1033gen5_emit_state(struct sna *sna,
1034		const struct sna_composite_op *op,
1035		uint16_t offset)
1036{
1037	bool flush = false;
1038
1039	assert(op->dst.bo->exec);
1040
1041	/* drawrect must be first for Ironlake BLT workaround */
1042	if (gen5_emit_drawing_rectangle(sna, op))
1043		offset &= ~1;
1044	gen5_emit_binding_table(sna, offset & ~1);
1045	if (gen5_emit_pipelined_pointers(sna, op, op->op, op->u.gen5.wm_kernel)){
1046		DBG(("%s: changed blend state, flush required? %d\n",
1047		     __FUNCTION__, (offset & 1) && op->op > PictOpSrc));
1048		flush = (offset & 1) && op->op > PictOpSrc;
1049	}
1050	gen5_emit_vertex_elements(sna, op);
1051
1052	if (ALWAYS_FLUSH || kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
1053		DBG(("%s: flushing dirty (%d, %d)\n", __FUNCTION__,
1054		     kgem_bo_is_dirty(op->src.bo),
1055		     kgem_bo_is_dirty(op->mask.bo)));
1056		OUT_BATCH(MI_FLUSH);
1057		kgem_clear_dirty(&sna->kgem);
1058		kgem_bo_mark_dirty(op->dst.bo);
1059		flush = false;
1060	}
1061	if (flush) {
1062		DBG(("%s: forcing flush\n", __FUNCTION__));
1063		gen5_emit_pipe_flush(sna);
1064	}
1065}
1066
1067static void gen5_bind_surfaces(struct sna *sna,
1068			       const struct sna_composite_op *op)
1069{
1070	bool dirty = kgem_bo_is_dirty(op->dst.bo);
1071	uint32_t *binding_table;
1072	uint16_t offset;
1073
1074	gen5_get_batch(sna, op);
1075
1076	binding_table = gen5_composite_get_binding_table(sna, &offset);
1077
1078	binding_table[0] =
1079		gen5_bind_bo(sna,
1080			    op->dst.bo, op->dst.width, op->dst.height,
1081			    gen5_get_dest_format(op->dst.format),
1082			    true);
1083	binding_table[1] =
1084		gen5_bind_bo(sna,
1085			     op->src.bo, op->src.width, op->src.height,
1086			     op->src.card_format,
1087			     false);
1088	if (op->mask.bo) {
1089		assert(op->u.gen5.ve_id >> 2);
1090		binding_table[2] =
1091			gen5_bind_bo(sna,
1092				     op->mask.bo,
1093				     op->mask.width,
1094				     op->mask.height,
1095				     op->mask.card_format,
1096				     false);
1097	}
1098
1099	if (sna->kgem.surface == offset &&
1100	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen5.surface_table) == *(uint64_t*)binding_table &&
1101	    (op->mask.bo == NULL ||
1102	     sna->kgem.batch[sna->render_state.gen5.surface_table+2] == binding_table[2])) {
1103		sna->kgem.surface += sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t);
1104		offset = sna->render_state.gen5.surface_table;
1105	}
1106
1107	gen5_emit_state(sna, op, offset | dirty);
1108}
1109
1110fastcall static void
1111gen5_render_composite_blt(struct sna *sna,
1112			  const struct sna_composite_op *op,
1113			  const struct sna_composite_rectangles *r)
1114{
1115	DBG(("%s: src=(%d, %d)+(%d, %d), mask=(%d, %d)+(%d, %d), dst=(%d, %d)+(%d, %d), size=(%d, %d)\n",
1116	     __FUNCTION__,
1117	     r->src.x, r->src.y, op->src.offset[0], op->src.offset[1],
1118	     r->mask.x, r->mask.y, op->mask.offset[0], op->mask.offset[1],
1119	     r->dst.x, r->dst.y, op->dst.x, op->dst.y,
1120	     r->width, r->height));
1121
1122	gen5_get_rectangles(sna, op, 1, gen5_bind_surfaces);
1123	op->prim_emit(sna, op, r);
1124}
1125
1126fastcall static void
1127gen5_render_composite_box(struct sna *sna,
1128			  const struct sna_composite_op *op,
1129			  const BoxRec *box)
1130{
1131	struct sna_composite_rectangles r;
1132
1133	DBG(("  %s: (%d, %d), (%d, %d)\n",
1134	     __FUNCTION__,
1135	     box->x1, box->y1, box->x2, box->y2));
1136
1137	gen5_get_rectangles(sna, op, 1, gen5_bind_surfaces);
1138
1139	r.dst.x = box->x1;
1140	r.dst.y = box->y1;
1141	r.width  = box->x2 - box->x1;
1142	r.height = box->y2 - box->y1;
1143	r.mask = r.src = r.dst;
1144
1145	op->prim_emit(sna, op, &r);
1146}
1147
1148static void
1149gen5_render_composite_boxes__blt(struct sna *sna,
1150				 const struct sna_composite_op *op,
1151				 const BoxRec *box, int nbox)
1152{
1153	DBG(("%s(%d) delta=(%d, %d), src=(%d, %d)/(%d, %d), mask=(%d, %d)/(%d, %d)\n",
1154	     __FUNCTION__, nbox, op->dst.x, op->dst.y,
1155	     op->src.offset[0], op->src.offset[1],
1156	     op->src.width, op->src.height,
1157	     op->mask.offset[0], op->mask.offset[1],
1158	     op->mask.width, op->mask.height));
1159
1160	do {
1161		int nbox_this_time;
1162
1163		nbox_this_time = gen5_get_rectangles(sna, op, nbox,
1164						     gen5_bind_surfaces);
1165		nbox -= nbox_this_time;
1166
1167		do {
1168			struct sna_composite_rectangles r;
1169
1170			DBG(("  %s: (%d, %d), (%d, %d)\n",
1171			     __FUNCTION__,
1172			     box->x1, box->y1, box->x2, box->y2));
1173
1174			r.dst.x = box->x1;
1175			r.dst.y = box->y1;
1176			r.width  = box->x2 - box->x1;
1177			r.height = box->y2 - box->y1;
1178			r.mask = r.src = r.dst;
1179			op->prim_emit(sna, op, &r);
1180			box++;
1181		} while (--nbox_this_time);
1182	} while (nbox);
1183}
1184
1185static void
1186gen5_render_composite_boxes(struct sna *sna,
1187			    const struct sna_composite_op *op,
1188			    const BoxRec *box, int nbox)
1189{
1190	DBG(("%s: nbox=%d\n", __FUNCTION__, nbox));
1191
1192	do {
1193		int nbox_this_time;
1194		float *v;
1195
1196		nbox_this_time = gen5_get_rectangles(sna, op, nbox,
1197						     gen5_bind_surfaces);
1198		assert(nbox_this_time);
1199		nbox -= nbox_this_time;
1200
1201		v = sna->render.vertices + sna->render.vertex_used;
1202		sna->render.vertex_used += nbox_this_time * op->floats_per_rect;
1203
1204		op->emit_boxes(op, box, nbox_this_time, v);
1205		box += nbox_this_time;
1206	} while (nbox);
1207}
1208
1209static void
1210gen5_render_composite_boxes__thread(struct sna *sna,
1211				    const struct sna_composite_op *op,
1212				    const BoxRec *box, int nbox)
1213{
1214	DBG(("%s: nbox=%d\n", __FUNCTION__, nbox));
1215
1216	sna_vertex_lock(&sna->render);
1217	do {
1218		int nbox_this_time;
1219		float *v;
1220
1221		nbox_this_time = gen5_get_rectangles(sna, op, nbox,
1222						     gen5_bind_surfaces);
1223		assert(nbox_this_time);
1224		nbox -= nbox_this_time;
1225
1226		v = sna->render.vertices + sna->render.vertex_used;
1227		sna->render.vertex_used += nbox_this_time * op->floats_per_rect;
1228
1229		sna_vertex_acquire__locked(&sna->render);
1230		sna_vertex_unlock(&sna->render);
1231
1232		op->emit_boxes(op, box, nbox_this_time, v);
1233		box += nbox_this_time;
1234
1235		sna_vertex_lock(&sna->render);
1236		sna_vertex_release__locked(&sna->render);
1237	} while (nbox);
1238	sna_vertex_unlock(&sna->render);
1239}
1240
1241#ifndef MAX
1242#define MAX(a,b) ((a) > (b) ? (a) : (b))
1243#endif
1244
1245static uint32_t gen5_bind_video_source(struct sna *sna,
1246				       struct kgem_bo *src_bo,
1247				       uint32_t src_offset,
1248				       int src_width,
1249				       int src_height,
1250				       int src_pitch,
1251				       uint32_t src_surf_format)
1252{
1253	struct gen5_surface_state *ss;
1254
1255	sna->kgem.surface -= sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t);
1256
1257	ss = memset(sna->kgem.batch + sna->kgem.surface, 0, sizeof(*ss));
1258	ss->ss0.surface_type = GEN5_SURFACE_2D;
1259	ss->ss0.surface_format = src_surf_format;
1260	ss->ss0.color_blend = 1;
1261
1262	ss->ss1.base_addr =
1263		kgem_add_reloc(&sna->kgem,
1264			       sna->kgem.surface + 1,
1265			       src_bo,
1266			       I915_GEM_DOMAIN_SAMPLER << 16,
1267			       src_offset);
1268
1269	ss->ss2.width  = src_width - 1;
1270	ss->ss2.height = src_height - 1;
1271	ss->ss3.pitch  = src_pitch - 1;
1272
1273	return sna->kgem.surface * sizeof(uint32_t);
1274}
1275
1276static void gen5_video_bind_surfaces(struct sna *sna,
1277				     const struct sna_composite_op *op)
1278{
1279	bool dirty = kgem_bo_is_dirty(op->dst.bo);
1280	struct sna_video_frame *frame = op->priv;
1281	uint32_t src_surf_format;
1282	uint32_t src_surf_base[6];
1283	int src_width[6];
1284	int src_height[6];
1285	int src_pitch[6];
1286	uint32_t *binding_table;
1287	uint16_t offset;
1288	int n_src, n;
1289
1290	src_surf_base[0] = 0;
1291	src_surf_base[1] = 0;
1292	src_surf_base[2] = frame->VBufOffset;
1293	src_surf_base[3] = frame->VBufOffset;
1294	src_surf_base[4] = frame->UBufOffset;
1295	src_surf_base[5] = frame->UBufOffset;
1296
1297	if (is_planar_fourcc(frame->id)) {
1298		src_surf_format = GEN5_SURFACEFORMAT_R8_UNORM;
1299		src_width[1]  = src_width[0]  = frame->width;
1300		src_height[1] = src_height[0] = frame->height;
1301		src_pitch[1]  = src_pitch[0]  = frame->pitch[1];
1302		src_width[4]  = src_width[5]  = src_width[2]  = src_width[3] =
1303			frame->width / 2;
1304		src_height[4] = src_height[5] = src_height[2] = src_height[3] =
1305			frame->height / 2;
1306		src_pitch[4]  = src_pitch[5]  = src_pitch[2]  = src_pitch[3] =
1307			frame->pitch[0];
1308		n_src = 6;
1309	} else {
1310		if (frame->id == FOURCC_UYVY)
1311			src_surf_format = GEN5_SURFACEFORMAT_YCRCB_SWAPY;
1312		else
1313			src_surf_format = GEN5_SURFACEFORMAT_YCRCB_NORMAL;
1314
1315		src_width[0]  = frame->width;
1316		src_height[0] = frame->height;
1317		src_pitch[0]  = frame->pitch[0];
1318		n_src = 1;
1319	}
1320
1321	gen5_get_batch(sna, op);
1322
1323	binding_table = gen5_composite_get_binding_table(sna, &offset);
1324	binding_table[0] =
1325		gen5_bind_bo(sna,
1326			     op->dst.bo, op->dst.width, op->dst.height,
1327			     gen5_get_dest_format(op->dst.format),
1328			     true);
1329	for (n = 0; n < n_src; n++) {
1330		binding_table[1+n] =
1331			gen5_bind_video_source(sna,
1332					       frame->bo,
1333					       src_surf_base[n],
1334					       src_width[n],
1335					       src_height[n],
1336					       src_pitch[n],
1337					       src_surf_format);
1338	}
1339
1340	gen5_emit_state(sna, op, offset | dirty);
1341}
1342
1343static bool
1344gen5_render_video(struct sna *sna,
1345		  struct sna_video *video,
1346		  struct sna_video_frame *frame,
1347		  RegionPtr dstRegion,
1348		  PixmapPtr pixmap)
1349{
1350	struct sna_composite_op tmp;
1351	struct sna_pixmap *priv = sna_pixmap(pixmap);
1352	int dst_width = dstRegion->extents.x2 - dstRegion->extents.x1;
1353	int dst_height = dstRegion->extents.y2 - dstRegion->extents.y1;
1354	int src_width = frame->src.x2 - frame->src.x1;
1355	int src_height = frame->src.y2 - frame->src.y1;
1356	float src_offset_x, src_offset_y;
1357	float src_scale_x, src_scale_y;
1358	int nbox, pix_xoff, pix_yoff;
1359	const BoxRec *box;
1360
1361	DBG(("%s: %dx%d -> %dx%d\n", __FUNCTION__,
1362	     src_width, src_height, dst_width, dst_height));
1363
1364	assert(priv->gpu_bo);
1365	memset(&tmp, 0, sizeof(tmp));
1366
1367	tmp.op = PictOpSrc;
1368	tmp.dst.pixmap = pixmap;
1369	tmp.dst.width  = pixmap->drawable.width;
1370	tmp.dst.height = pixmap->drawable.height;
1371	tmp.dst.format = sna_format_for_depth(pixmap->drawable.depth);
1372	tmp.dst.bo = priv->gpu_bo;
1373
1374	if (src_width == dst_width && src_height == dst_height)
1375		tmp.src.filter = SAMPLER_FILTER_NEAREST;
1376	else
1377		tmp.src.filter = SAMPLER_FILTER_BILINEAR;
1378	tmp.src.repeat = SAMPLER_EXTEND_PAD;
1379	tmp.src.bo = frame->bo;
1380	tmp.mask.bo = NULL;
1381	tmp.u.gen5.wm_kernel =
1382		is_planar_fourcc(frame->id) ? WM_KERNEL_VIDEO_PLANAR : WM_KERNEL_VIDEO_PACKED;
1383	tmp.u.gen5.ve_id = 2;
1384	tmp.is_affine = true;
1385	tmp.floats_per_vertex = 3;
1386	tmp.floats_per_rect = 9;
1387	tmp.priv = frame;
1388
1389	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
1390		kgem_submit(&sna->kgem);
1391		if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL))
1392			return false;
1393	}
1394
1395	gen5_align_vertex(sna, &tmp);
1396	gen5_video_bind_surfaces(sna, &tmp);
1397
1398	/* Set up the offset for translating from the given region (in screen
1399	 * coordinates) to the backing pixmap.
1400	 */
1401#ifdef COMPOSITE
1402	pix_xoff = -pixmap->screen_x + pixmap->drawable.x;
1403	pix_yoff = -pixmap->screen_y + pixmap->drawable.y;
1404#else
1405	pix_xoff = 0;
1406	pix_yoff = 0;
1407#endif
1408
1409	src_scale_x = (float)src_width / dst_width / frame->width;
1410	src_offset_x = (float)frame->src.x1 / frame->width - dstRegion->extents.x1 * src_scale_x;
1411
1412	src_scale_y = (float)src_height / dst_height / frame->height;
1413	src_offset_y = (float)frame->src.y1 / frame->height - dstRegion->extents.y1 * src_scale_y;
1414
1415	box = region_rects(dstRegion);
1416	nbox = region_num_rects(dstRegion);
1417	while (nbox--) {
1418		BoxRec r;
1419
1420		r.x1 = box->x1 + pix_xoff;
1421		r.x2 = box->x2 + pix_xoff;
1422		r.y1 = box->y1 + pix_yoff;
1423		r.y2 = box->y2 + pix_yoff;
1424
1425		gen5_get_rectangles(sna, &tmp, 1, gen5_video_bind_surfaces);
1426
1427		OUT_VERTEX(r.x2, r.y2);
1428		OUT_VERTEX_F(box->x2 * src_scale_x + src_offset_x);
1429		OUT_VERTEX_F(box->y2 * src_scale_y + src_offset_y);
1430
1431		OUT_VERTEX(r.x1, r.y2);
1432		OUT_VERTEX_F(box->x1 * src_scale_x + src_offset_x);
1433		OUT_VERTEX_F(box->y2 * src_scale_y + src_offset_y);
1434
1435		OUT_VERTEX(r.x1, r.y1);
1436		OUT_VERTEX_F(box->x1 * src_scale_x + src_offset_x);
1437		OUT_VERTEX_F(box->y1 * src_scale_y + src_offset_y);
1438
1439		if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
1440			sna_damage_add_box(&priv->gpu_damage, &r);
1441			sna_damage_subtract_box(&priv->cpu_damage, &r);
1442		}
1443		box++;
1444	}
1445
1446	gen4_vertex_flush(sna);
1447	return true;
1448}
1449
1450static int
1451gen5_composite_picture(struct sna *sna,
1452		       PicturePtr picture,
1453		       struct sna_composite_channel *channel,
1454		       int x, int y,
1455		       int w, int h,
1456		       int dst_x, int dst_y,
1457		       bool precise)
1458{
1459	PixmapPtr pixmap;
1460	uint32_t color;
1461	int16_t dx, dy;
1462
1463	DBG(("%s: (%d, %d)x(%d, %d), dst=(%d, %d)\n",
1464	     __FUNCTION__, x, y, w, h, dst_x, dst_y));
1465
1466	channel->is_solid = false;
1467	channel->card_format = -1;
1468
1469	if (sna_picture_is_solid(picture, &color))
1470		return gen4_channel_init_solid(sna, channel, color);
1471
1472	if (picture->pDrawable == NULL) {
1473		int ret;
1474
1475		if (picture->pSourcePict->type == SourcePictTypeLinear)
1476			return gen4_channel_init_linear(sna, picture, channel,
1477							x, y,
1478							w, h,
1479							dst_x, dst_y);
1480
1481		DBG(("%s -- fixup, gradient\n", __FUNCTION__));
1482		ret = -1;
1483		if (!precise)
1484			ret = sna_render_picture_approximate_gradient(sna, picture, channel,
1485								      x, y, w, h, dst_x, dst_y);
1486		if (ret == -1)
1487			ret = sna_render_picture_fixup(sna, picture, channel,
1488						       x, y, w, h, dst_x, dst_y);
1489		return ret;
1490	}
1491
1492	if (picture->alphaMap) {
1493		DBG(("%s -- fallback, alphamap\n", __FUNCTION__));
1494		return sna_render_picture_fixup(sna, picture, channel,
1495						x, y, w, h, dst_x, dst_y);
1496	}
1497
1498	if (!gen5_check_repeat(picture))
1499		return sna_render_picture_fixup(sna, picture, channel,
1500						x, y, w, h, dst_x, dst_y);
1501
1502	if (!gen5_check_filter(picture))
1503		return sna_render_picture_fixup(sna, picture, channel,
1504						x, y, w, h, dst_x, dst_y);
1505
1506	channel->repeat = picture->repeat ? picture->repeatType : RepeatNone;
1507	channel->filter = picture->filter;
1508
1509	pixmap = get_drawable_pixmap(picture->pDrawable);
1510	get_drawable_deltas(picture->pDrawable, pixmap, &dx, &dy);
1511
1512	x += dx + picture->pDrawable->x;
1513	y += dy + picture->pDrawable->y;
1514
1515	channel->is_affine = sna_transform_is_affine(picture->transform);
1516	if (sna_transform_is_imprecise_integer_translation(picture->transform, picture->filter, precise, &dx, &dy)) {
1517		DBG(("%s: integer translation (%d, %d), removing\n",
1518		     __FUNCTION__, dx, dy));
1519		x += dx;
1520		y += dy;
1521		channel->transform = NULL;
1522		channel->filter = PictFilterNearest;
1523
1524		if (channel->repeat ||
1525		    (x >= 0 &&
1526		     y >= 0 &&
1527		     x + w < pixmap->drawable.width &&
1528		     y + h < pixmap->drawable.height)) {
1529			struct sna_pixmap *priv = sna_pixmap(pixmap);
1530			if (priv && priv->clear) {
1531				DBG(("%s: converting large pixmap source into solid [%08x]\n", __FUNCTION__, priv->clear_color));
1532				return gen4_channel_init_solid(sna, channel, priv->clear_color);
1533			}
1534		}
1535	} else
1536		channel->transform = picture->transform;
1537
1538	channel->pict_format = picture->format;
1539	channel->card_format = gen5_get_card_format(picture->format);
1540	if (channel->card_format == -1)
1541		return sna_render_picture_convert(sna, picture, channel, pixmap,
1542						  x, y, w, h, dst_x, dst_y,
1543						  false);
1544
1545	if (too_large(pixmap->drawable.width, pixmap->drawable.height))
1546		return sna_render_picture_extract(sna, picture, channel,
1547						  x, y, w, h, dst_x, dst_y);
1548
1549	DBG(("%s: pixmap, repeat=%d, filter=%d, transform?=%d [affine? %d], format=%08x\n",
1550	     __FUNCTION__,
1551	     channel->repeat, channel->filter,
1552	     channel->transform != NULL, channel->is_affine,
1553	     channel->pict_format));
1554	if (channel->transform) {
1555		DBG(("%s: transform=[%f %f %f, %f %f %f, %f %f %f]\n",
1556		     __FUNCTION__,
1557		     channel->transform->matrix[0][0] / 65536.,
1558		     channel->transform->matrix[0][1] / 65536.,
1559		     channel->transform->matrix[0][2] / 65536.,
1560		     channel->transform->matrix[1][0] / 65536.,
1561		     channel->transform->matrix[1][1] / 65536.,
1562		     channel->transform->matrix[1][2] / 65536.,
1563		     channel->transform->matrix[2][0] / 65536.,
1564		     channel->transform->matrix[2][1] / 65536.,
1565		     channel->transform->matrix[2][2] / 65536.));
1566	}
1567
1568	return sna_render_pixmap_bo(sna, channel, pixmap,
1569				    x, y, w, h, dst_x, dst_y);
1570}
1571
1572static void gen5_composite_channel_convert(struct sna_composite_channel *channel)
1573{
1574	channel->repeat = gen5_repeat(channel->repeat);
1575	channel->filter = gen5_filter(channel->filter);
1576	if (channel->card_format == (unsigned)-1)
1577		channel->card_format = gen5_get_card_format(channel->pict_format);
1578}
1579
1580static void
1581gen5_render_composite_done(struct sna *sna,
1582			   const struct sna_composite_op *op)
1583{
1584	if (sna->render.vertex_offset) {
1585		gen4_vertex_flush(sna);
1586		gen5_magic_ca_pass(sna,op);
1587	}
1588
1589	DBG(("%s()\n", __FUNCTION__));
1590
1591	if (op->mask.bo)
1592		kgem_bo_destroy(&sna->kgem, op->mask.bo);
1593	if (op->src.bo)
1594		kgem_bo_destroy(&sna->kgem, op->src.bo);
1595
1596	sna_render_composite_redirect_done(sna, op);
1597}
1598
1599static bool
1600gen5_composite_set_target(struct sna *sna,
1601			  struct sna_composite_op *op,
1602			  PicturePtr dst,
1603			  int x, int y, int w, int h,
1604			  bool partial)
1605{
1606	BoxRec box;
1607	unsigned hint;
1608
1609	op->dst.pixmap = get_drawable_pixmap(dst->pDrawable);
1610	op->dst.width  = op->dst.pixmap->drawable.width;
1611	op->dst.height = op->dst.pixmap->drawable.height;
1612	op->dst.format = dst->format;
1613	if (w && h) {
1614		box.x1 = x;
1615		box.y1 = y;
1616		box.x2 = x + w;
1617		box.y2 = y + h;
1618	} else
1619		sna_render_picture_extents(dst, &box);
1620
1621	hint = PREFER_GPU | FORCE_GPU | RENDER_GPU;
1622	if (!partial) {
1623		hint |= IGNORE_DAMAGE;
1624		if (w == op->dst.width && h == op->dst.height)
1625			hint |= REPLACES;
1626	}
1627
1628	op->dst.bo = sna_drawable_use_bo(dst->pDrawable, hint, &box, &op->damage);
1629	if (op->dst.bo == NULL)
1630		return false;
1631
1632	if (hint & REPLACES) {
1633		struct sna_pixmap *priv = sna_pixmap(op->dst.pixmap);
1634		kgem_bo_pair_undo(&sna->kgem, priv->gpu_bo, priv->cpu_bo);
1635	}
1636
1637	get_drawable_deltas(dst->pDrawable, op->dst.pixmap,
1638			    &op->dst.x, &op->dst.y);
1639
1640	DBG(("%s: pixmap=%ld, format=%08x, size=%dx%d, pitch=%d, delta=(%d,%d),damage=%p\n",
1641	     __FUNCTION__,
1642	     op->dst.pixmap->drawable.serialNumber, (int)op->dst.format,
1643	     op->dst.width, op->dst.height,
1644	     op->dst.bo->pitch,
1645	     op->dst.x, op->dst.y,
1646	     op->damage ? *op->damage : (void *)-1));
1647
1648	assert(op->dst.bo->proxy == NULL);
1649
1650	if (too_large(op->dst.width, op->dst.height) &&
1651	    !sna_render_composite_redirect(sna, op, x, y, w, h, partial))
1652		return false;
1653
1654	return true;
1655}
1656
1657static bool
1658is_gradient(PicturePtr picture, bool precise)
1659{
1660	if (picture->pDrawable)
1661		return false;
1662
1663	switch (picture->pSourcePict->type) {
1664	case SourcePictTypeSolidFill:
1665	case SourcePictTypeLinear:
1666		return false;
1667	default:
1668		return precise;
1669	}
1670}
1671
1672static bool
1673has_alphamap(PicturePtr p)
1674{
1675	return p->alphaMap != NULL;
1676}
1677
1678static bool
1679need_upload(struct sna *sna, PicturePtr p)
1680{
1681	return p->pDrawable && untransformed(p) &&
1682		!is_gpu(sna, p->pDrawable, PREFER_GPU_RENDER);
1683}
1684
1685static bool
1686source_is_busy(PixmapPtr pixmap)
1687{
1688	struct sna_pixmap *priv = sna_pixmap(pixmap);
1689	if (priv == NULL)
1690		return false;
1691
1692	if (priv->clear)
1693		return false;
1694
1695	if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
1696		return true;
1697
1698	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
1699		return true;
1700
1701	return priv->gpu_damage && !priv->cpu_damage;
1702}
1703
1704static bool
1705source_fallback(struct sna *sna, PicturePtr p, PixmapPtr pixmap, bool precise)
1706{
1707	if (sna_picture_is_solid(p, NULL))
1708		return false;
1709
1710	if (is_gradient(p, precise) ||
1711	    !gen5_check_repeat(p) ||
1712	    !gen5_check_format(p->format))
1713		return true;
1714
1715	if (pixmap && source_is_busy(pixmap))
1716		return false;
1717
1718	return has_alphamap(p) || !gen5_check_filter(p) || need_upload(sna, p);
1719}
1720
1721static bool
1722gen5_composite_fallback(struct sna *sna,
1723			PicturePtr src,
1724			PicturePtr mask,
1725			PicturePtr dst)
1726{
1727	PixmapPtr src_pixmap;
1728	PixmapPtr mask_pixmap;
1729	PixmapPtr dst_pixmap;
1730	bool src_fallback, mask_fallback;
1731
1732	if (!gen5_check_dst_format(dst->format)) {
1733		DBG(("%s: unknown destination format: %d\n",
1734		     __FUNCTION__, dst->format));
1735		return true;
1736	}
1737
1738	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
1739
1740	src_pixmap = src->pDrawable ? get_drawable_pixmap(src->pDrawable) : NULL;
1741	src_fallback = source_fallback(sna, src, src_pixmap,
1742				       dst->polyMode == PolyModePrecise);
1743
1744	if (mask) {
1745		mask_pixmap = mask->pDrawable ? get_drawable_pixmap(mask->pDrawable) : NULL;
1746		mask_fallback = source_fallback(sna, mask, mask_pixmap,
1747						dst->polyMode == PolyModePrecise);
1748	} else {
1749		mask_pixmap = NULL;
1750		mask_fallback = false;
1751	}
1752
1753	/* If we are using the destination as a source and need to
1754	 * readback in order to upload the source, do it all
1755	 * on the cpu.
1756	 */
1757	if (src_pixmap == dst_pixmap && src_fallback) {
1758		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
1759		return true;
1760	}
1761	if (mask_pixmap == dst_pixmap && mask_fallback) {
1762		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
1763		return true;
1764	}
1765
1766	/* If anything is on the GPU, push everything out to the GPU */
1767	if (dst_use_gpu(dst_pixmap)) {
1768		DBG(("%s: dst is already on the GPU, try to use GPU\n",
1769		     __FUNCTION__));
1770		return false;
1771	}
1772
1773	if (src_pixmap && !src_fallback) {
1774		DBG(("%s: src is already on the GPU, try to use GPU\n",
1775		     __FUNCTION__));
1776		return false;
1777	}
1778	if (mask_pixmap && !mask_fallback) {
1779		DBG(("%s: mask is already on the GPU, try to use GPU\n",
1780		     __FUNCTION__));
1781		return false;
1782	}
1783
1784	/* However if the dst is not on the GPU and we need to
1785	 * render one of the sources using the CPU, we may
1786	 * as well do the entire operation in place onthe CPU.
1787	 */
1788	if (src_fallback) {
1789		DBG(("%s: dst is on the CPU and src will fallback\n",
1790		     __FUNCTION__));
1791		return true;
1792	}
1793
1794	if (mask_fallback) {
1795		DBG(("%s: dst is on the CPU and mask will fallback\n",
1796		     __FUNCTION__));
1797		return true;
1798	}
1799
1800	if (too_large(dst_pixmap->drawable.width,
1801		      dst_pixmap->drawable.height) &&
1802	    dst_is_cpu(dst_pixmap)) {
1803		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
1804		return true;
1805	}
1806
1807	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
1808	     __FUNCTION__));
1809	return dst_use_cpu(dst_pixmap);
1810}
1811
1812static int
1813reuse_source(struct sna *sna,
1814	     PicturePtr src, struct sna_composite_channel *sc, int src_x, int src_y,
1815	     PicturePtr mask, struct sna_composite_channel *mc, int msk_x, int msk_y)
1816{
1817	uint32_t color;
1818
1819	if (src_x != msk_x || src_y != msk_y)
1820		return false;
1821
1822	if (src == mask) {
1823		DBG(("%s: mask is source\n", __FUNCTION__));
1824		*mc = *sc;
1825		mc->bo = kgem_bo_reference(mc->bo);
1826		return true;
1827	}
1828
1829	if (sna_picture_is_solid(mask, &color))
1830		return gen4_channel_init_solid(sna, mc, color);
1831
1832	if (sc->is_solid)
1833		return false;
1834
1835	if (src->pDrawable == NULL || mask->pDrawable != src->pDrawable)
1836		return false;
1837
1838	DBG(("%s: mask reuses source drawable\n", __FUNCTION__));
1839
1840	if (!sna_transform_equal(src->transform, mask->transform))
1841		return false;
1842
1843	if (!sna_picture_alphamap_equal(src, mask))
1844		return false;
1845
1846	if (!gen5_check_repeat(mask))
1847		return false;
1848
1849	if (!gen5_check_filter(mask))
1850		return false;
1851
1852	if (!gen5_check_format(mask->format))
1853		return false;
1854
1855	DBG(("%s: reusing source channel for mask with a twist\n",
1856	     __FUNCTION__));
1857
1858	*mc = *sc;
1859	mc->repeat = gen5_repeat(mask->repeat ? mask->repeatType : RepeatNone);
1860	mc->filter = gen5_filter(mask->filter);
1861	mc->pict_format = mask->format;
1862	mc->card_format = gen5_get_card_format(mask->format);
1863	mc->bo = kgem_bo_reference(mc->bo);
1864	return true;
1865}
1866
1867static bool
1868gen5_render_composite(struct sna *sna,
1869		      uint8_t op,
1870		      PicturePtr src,
1871		      PicturePtr mask,
1872		      PicturePtr dst,
1873		      int16_t src_x, int16_t src_y,
1874		      int16_t msk_x, int16_t msk_y,
1875		      int16_t dst_x, int16_t dst_y,
1876		      int16_t width, int16_t height,
1877		      unsigned flags,
1878		      struct sna_composite_op *tmp)
1879{
1880	DBG(("%s: %dx%d, current mode=%d\n", __FUNCTION__,
1881	     width, height, sna->kgem.mode));
1882
1883	if (op >= ARRAY_SIZE(gen5_blend_op)) {
1884		DBG(("%s: unhandled blend op %d\n", __FUNCTION__, op));
1885		return false;
1886	}
1887
1888	if (mask == NULL &&
1889	    sna_blt_composite(sna, op,
1890			      src, dst,
1891			      src_x, src_y,
1892			      dst_x, dst_y,
1893			      width, height,
1894			      flags, tmp))
1895		return true;
1896
1897	if (gen5_composite_fallback(sna, src, mask, dst))
1898		goto fallback;
1899
1900	if (need_tiling(sna, width, height))
1901		return sna_tiling_composite(op, src, mask, dst,
1902					    src_x, src_y,
1903					    msk_x, msk_y,
1904					    dst_x, dst_y,
1905					    width, height,
1906					    tmp);
1907
1908	if (!gen5_composite_set_target(sna, tmp, dst,
1909				       dst_x, dst_y, width, height,
1910				       flags & COMPOSITE_PARTIAL || op > PictOpSrc)) {
1911		DBG(("%s: failed to set composite target\n", __FUNCTION__));
1912		goto fallback;
1913	}
1914
1915	DBG(("%s: preparing source\n", __FUNCTION__));
1916	tmp->op = op;
1917	switch (gen5_composite_picture(sna, src, &tmp->src,
1918				       src_x, src_y,
1919				       width, height,
1920				       dst_x, dst_y,
1921				       dst->polyMode == PolyModePrecise)) {
1922	case -1:
1923		DBG(("%s: failed to prepare source picture\n", __FUNCTION__));
1924		goto cleanup_dst;
1925	case 0:
1926		if (!gen4_channel_init_solid(sna, &tmp->src, 0))
1927			goto cleanup_dst;
1928		/* fall through to fixup */
1929	case 1:
1930		if (mask == NULL &&
1931		    sna_blt_composite__convert(sna,
1932					       dst_x, dst_y, width, height,
1933					       tmp))
1934			return true;
1935
1936		gen5_composite_channel_convert(&tmp->src);
1937		break;
1938	}
1939
1940	tmp->is_affine = tmp->src.is_affine;
1941	tmp->has_component_alpha = false;
1942	tmp->need_magic_ca_pass = false;
1943
1944	if (mask) {
1945		if (mask->componentAlpha && PICT_FORMAT_RGB(mask->format)) {
1946			tmp->has_component_alpha = true;
1947
1948			/* Check if it's component alpha that relies on a source alpha and on
1949			 * the source value.  We can only get one of those into the single
1950			 * source value that we get to blend with.
1951			 */
1952			if (gen5_blend_op[op].src_alpha &&
1953			    (gen5_blend_op[op].src_blend != GEN5_BLENDFACTOR_ZERO)) {
1954				if (op != PictOpOver) {
1955					DBG(("%s: unhandled CA blend op %d\n", __FUNCTION__, op));
1956					goto cleanup_src;
1957				}
1958
1959				tmp->need_magic_ca_pass = true;
1960				tmp->op = PictOpOutReverse;
1961			}
1962		}
1963
1964		if (!reuse_source(sna,
1965				  src, &tmp->src, src_x, src_y,
1966				  mask, &tmp->mask, msk_x, msk_y)) {
1967			DBG(("%s: preparing mask\n", __FUNCTION__));
1968			switch (gen5_composite_picture(sna, mask, &tmp->mask,
1969						       msk_x, msk_y,
1970						       width, height,
1971						       dst_x, dst_y,
1972						       dst->polyMode == PolyModePrecise)) {
1973			case -1:
1974				DBG(("%s: failed to prepare mask picture\n", __FUNCTION__));
1975				goto cleanup_src;
1976			case 0:
1977				if (!gen4_channel_init_solid(sna, &tmp->mask, 0))
1978					goto cleanup_src;
1979				/* fall through to fixup */
1980			case 1:
1981				gen5_composite_channel_convert(&tmp->mask);
1982				break;
1983			}
1984		}
1985
1986		tmp->is_affine &= tmp->mask.is_affine;
1987	}
1988
1989	tmp->u.gen5.wm_kernel =
1990		gen5_choose_composite_kernel(tmp->op,
1991					     tmp->mask.bo != NULL,
1992					     tmp->has_component_alpha,
1993					     tmp->is_affine);
1994	tmp->u.gen5.ve_id = gen4_choose_composite_emitter(sna, tmp);
1995
1996	tmp->blt   = gen5_render_composite_blt;
1997	tmp->box   = gen5_render_composite_box;
1998	tmp->boxes = gen5_render_composite_boxes__blt;
1999	if (tmp->emit_boxes) {
2000		tmp->boxes = gen5_render_composite_boxes;
2001		tmp->thread_boxes = gen5_render_composite_boxes__thread;
2002	}
2003	tmp->done  = gen5_render_composite_done;
2004
2005	if (!kgem_check_bo(&sna->kgem,
2006			   tmp->dst.bo, tmp->src.bo, tmp->mask.bo, NULL)) {
2007		kgem_submit(&sna->kgem);
2008		if (!kgem_check_bo(&sna->kgem,
2009				   tmp->dst.bo, tmp->src.bo, tmp->mask.bo, NULL))
2010			goto cleanup_mask;
2011	}
2012
2013	gen5_align_vertex(sna, tmp);
2014	gen5_bind_surfaces(sna, tmp);
2015	return true;
2016
2017cleanup_mask:
2018	if (tmp->mask.bo) {
2019		kgem_bo_destroy(&sna->kgem, tmp->mask.bo);
2020		tmp->mask.bo = NULL;
2021	}
2022cleanup_src:
2023	if (tmp->src.bo) {
2024		kgem_bo_destroy(&sna->kgem, tmp->src.bo);
2025		tmp->src.bo = NULL;
2026	}
2027cleanup_dst:
2028	if (tmp->redirect.real_bo) {
2029		kgem_bo_destroy(&sna->kgem, tmp->dst.bo);
2030		tmp->redirect.real_bo = NULL;
2031	}
2032fallback:
2033	return (mask == NULL &&
2034		sna_blt_composite(sna, op,
2035				  src, dst,
2036				  src_x, src_y,
2037				  dst_x, dst_y,
2038				  width, height,
2039				  flags | COMPOSITE_FALLBACK, tmp));
2040}
2041
2042#if !NO_COMPOSITE_SPANS
2043fastcall static void
2044gen5_render_composite_spans_box(struct sna *sna,
2045				const struct sna_composite_spans_op *op,
2046				const BoxRec *box, float opacity)
2047{
2048	DBG(("%s: src=+(%d, %d), opacity=%f, dst=+(%d, %d), box=(%d, %d) x (%d, %d)\n",
2049	     __FUNCTION__,
2050	     op->base.src.offset[0], op->base.src.offset[1],
2051	     opacity,
2052	     op->base.dst.x, op->base.dst.y,
2053	     box->x1, box->y1,
2054	     box->x2 - box->x1,
2055	     box->y2 - box->y1));
2056
2057	gen5_get_rectangles(sna, &op->base, 1, gen5_bind_surfaces);
2058	op->prim_emit(sna, op, box, opacity);
2059}
2060
2061static void
2062gen5_render_composite_spans_boxes(struct sna *sna,
2063				  const struct sna_composite_spans_op *op,
2064				  const BoxRec *box, int nbox,
2065				  float opacity)
2066{
2067	DBG(("%s: nbox=%d, src=+(%d, %d), opacity=%f, dst=+(%d, %d)\n",
2068	     __FUNCTION__, nbox,
2069	     op->base.src.offset[0], op->base.src.offset[1],
2070	     opacity,
2071	     op->base.dst.x, op->base.dst.y));
2072
2073	do {
2074		int nbox_this_time;
2075
2076		nbox_this_time = gen5_get_rectangles(sna, &op->base, nbox,
2077						     gen5_bind_surfaces);
2078		nbox -= nbox_this_time;
2079
2080		do {
2081			DBG(("  %s: (%d, %d) x (%d, %d)\n", __FUNCTION__,
2082			     box->x1, box->y1,
2083			     box->x2 - box->x1,
2084			     box->y2 - box->y1));
2085
2086			op->prim_emit(sna, op, box++, opacity);
2087		} while (--nbox_this_time);
2088	} while (nbox);
2089}
2090
2091fastcall static void
2092gen5_render_composite_spans_boxes__thread(struct sna *sna,
2093					  const struct sna_composite_spans_op *op,
2094					  const struct sna_opacity_box *box,
2095					  int nbox)
2096{
2097	DBG(("%s: nbox=%d, src=+(%d, %d), dst=+(%d, %d)\n",
2098	     __FUNCTION__, nbox,
2099	     op->base.src.offset[0], op->base.src.offset[1],
2100	     op->base.dst.x, op->base.dst.y));
2101
2102	sna_vertex_lock(&sna->render);
2103	do {
2104		int nbox_this_time;
2105		float *v;
2106
2107		nbox_this_time = gen5_get_rectangles(sna, &op->base, nbox,
2108						     gen5_bind_surfaces);
2109		assert(nbox_this_time);
2110		nbox -= nbox_this_time;
2111
2112		v = sna->render.vertices + sna->render.vertex_used;
2113		sna->render.vertex_used += nbox_this_time * op->base.floats_per_rect;
2114
2115		sna_vertex_acquire__locked(&sna->render);
2116		sna_vertex_unlock(&sna->render);
2117
2118		op->emit_boxes(op, box, nbox_this_time, v);
2119		box += nbox_this_time;
2120
2121		sna_vertex_lock(&sna->render);
2122		sna_vertex_release__locked(&sna->render);
2123	} while (nbox);
2124	sna_vertex_unlock(&sna->render);
2125}
2126
2127fastcall static void
2128gen5_render_composite_spans_done(struct sna *sna,
2129				 const struct sna_composite_spans_op *op)
2130{
2131	if (sna->render.vertex_offset)
2132		gen4_vertex_flush(sna);
2133
2134	DBG(("%s()\n", __FUNCTION__));
2135
2136	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
2137	sna_render_composite_redirect_done(sna, &op->base);
2138}
2139
2140static bool
2141gen5_check_composite_spans(struct sna *sna,
2142			   uint8_t op, PicturePtr src, PicturePtr dst,
2143			   int16_t width, int16_t height,
2144			   unsigned flags)
2145{
2146	DBG(("%s: op=%d, width=%d, height=%d, flags=%x\n",
2147	     __FUNCTION__, op, width, height, flags));
2148
2149	if (op >= ARRAY_SIZE(gen5_blend_op))
2150		return false;
2151
2152	if (gen5_composite_fallback(sna, src, NULL, dst)) {
2153		DBG(("%s: operation would fallback\n", __FUNCTION__));
2154		return false;
2155	}
2156
2157	if (need_tiling(sna, width, height) &&
2158	    !is_gpu(sna, dst->pDrawable, PREFER_GPU_SPANS)) {
2159		DBG(("%s: fallback, tiled operation not on GPU\n",
2160		     __FUNCTION__));
2161		return false;
2162	}
2163
2164	if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0) {
2165		struct sna_pixmap *priv = sna_pixmap_from_drawable(dst->pDrawable);
2166		assert(priv);
2167
2168		if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
2169			return true;
2170
2171		if (flags & COMPOSITE_SPANS_INPLACE_HINT)
2172			return false;
2173
2174		if ((sna->render.prefer_gpu & PREFER_GPU_SPANS) == 0 &&
2175		    dst->format == PICT_a8)
2176			return false;
2177
2178		return priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo);
2179	}
2180
2181	return true;
2182}
2183
2184static bool
2185gen5_render_composite_spans(struct sna *sna,
2186			    uint8_t op,
2187			    PicturePtr src,
2188			    PicturePtr dst,
2189			    int16_t src_x,  int16_t src_y,
2190			    int16_t dst_x,  int16_t dst_y,
2191			    int16_t width,  int16_t height,
2192			    unsigned flags,
2193			    struct sna_composite_spans_op *tmp)
2194{
2195	DBG(("%s: %dx%d with flags=%x, current mode=%d\n", __FUNCTION__,
2196	     width, height, flags, sna->kgem.ring));
2197
2198	assert(gen5_check_composite_spans(sna, op, src, dst, width, height, flags));
2199
2200	if (need_tiling(sna, width, height)) {
2201		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
2202		     __FUNCTION__, width, height));
2203		return sna_tiling_composite_spans(op, src, dst,
2204						  src_x, src_y, dst_x, dst_y,
2205						  width, height, flags, tmp);
2206	}
2207
2208	tmp->base.op = op;
2209	if (!gen5_composite_set_target(sna, &tmp->base, dst,
2210				       dst_x, dst_y, width, height,
2211				       true))
2212		return false;
2213
2214	switch (gen5_composite_picture(sna, src, &tmp->base.src,
2215				       src_x, src_y,
2216				       width, height,
2217				       dst_x, dst_y,
2218				       dst->polyMode == PolyModePrecise)) {
2219	case -1:
2220		goto cleanup_dst;
2221	case 0:
2222		if (!gen4_channel_init_solid(sna, &tmp->base.src, 0))
2223			goto cleanup_dst;
2224		/* fall through to fixup */
2225	case 1:
2226		gen5_composite_channel_convert(&tmp->base.src);
2227		break;
2228	}
2229
2230	tmp->base.mask.bo = NULL;
2231
2232	tmp->base.is_affine = tmp->base.src.is_affine;
2233	tmp->base.has_component_alpha = false;
2234	tmp->base.need_magic_ca_pass = false;
2235
2236	tmp->base.u.gen5.ve_id = gen4_choose_spans_emitter(sna, tmp);
2237	tmp->base.u.gen5.wm_kernel = WM_KERNEL_OPACITY | !tmp->base.is_affine;
2238
2239	tmp->box   = gen5_render_composite_spans_box;
2240	tmp->boxes = gen5_render_composite_spans_boxes;
2241	if (tmp->emit_boxes)
2242		tmp->thread_boxes = gen5_render_composite_spans_boxes__thread;
2243	tmp->done  = gen5_render_composite_spans_done;
2244
2245	if (!kgem_check_bo(&sna->kgem,
2246			   tmp->base.dst.bo, tmp->base.src.bo,
2247			   NULL))  {
2248		kgem_submit(&sna->kgem);
2249		if (!kgem_check_bo(&sna->kgem,
2250				   tmp->base.dst.bo, tmp->base.src.bo,
2251				   NULL))
2252			goto cleanup_src;
2253	}
2254
2255	gen5_align_vertex(sna, &tmp->base);
2256	gen5_bind_surfaces(sna, &tmp->base);
2257	return true;
2258
2259cleanup_src:
2260	if (tmp->base.src.bo)
2261		kgem_bo_destroy(&sna->kgem, tmp->base.src.bo);
2262cleanup_dst:
2263	if (tmp->base.redirect.real_bo)
2264		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
2265	return false;
2266}
2267#endif
2268
2269static void
2270gen5_copy_bind_surfaces(struct sna *sna,
2271			const struct sna_composite_op *op)
2272{
2273	bool dirty = kgem_bo_is_dirty(op->dst.bo);
2274	uint32_t *binding_table;
2275	uint16_t offset;
2276
2277	gen5_get_batch(sna, op);
2278
2279	binding_table = gen5_composite_get_binding_table(sna, &offset);
2280
2281	binding_table[0] =
2282		gen5_bind_bo(sna,
2283			     op->dst.bo, op->dst.width, op->dst.height,
2284			     gen5_get_dest_format(op->dst.format),
2285			     true);
2286	binding_table[1] =
2287		gen5_bind_bo(sna,
2288			     op->src.bo, op->src.width, op->src.height,
2289			     op->src.card_format,
2290			     false);
2291
2292	if (sna->kgem.surface == offset &&
2293	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen5.surface_table) == *(uint64_t*)binding_table) {
2294		sna->kgem.surface += sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t);
2295		offset = sna->render_state.gen5.surface_table;
2296	}
2297
2298	gen5_emit_state(sna, op, offset | dirty);
2299}
2300
2301static bool
2302gen5_render_copy_boxes(struct sna *sna, uint8_t alu,
2303		       const DrawableRec *src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
2304		       const DrawableRec *dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
2305		       const BoxRec *box, int n, unsigned flags)
2306{
2307	struct sna_composite_op tmp;
2308
2309	DBG(("%s alu=%d, src=%ld:handle=%d, dst=%ld:handle=%d boxes=%d x [((%d, %d), (%d, %d))...], flags=%x\n",
2310	     __FUNCTION__, alu,
2311	     src->serialNumber, src_bo->handle,
2312	     dst->serialNumber, dst_bo->handle,
2313	     n, box->x1, box->y1, box->x2, box->y2,
2314	     flags));
2315
2316	if (sna_blt_compare_depth(src, dst) &&
2317	    sna_blt_copy_boxes(sna, alu,
2318			       src_bo, src_dx, src_dy,
2319			       dst_bo, dst_dx, dst_dy,
2320			       dst->bitsPerPixel,
2321			       box, n))
2322		return true;
2323
2324	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo) {
2325fallback_blt:
2326		if (!sna_blt_compare_depth(src, dst))
2327			return false;
2328
2329		return sna_blt_copy_boxes_fallback(sna, alu,
2330						   src, src_bo, src_dx, src_dy,
2331						   dst, dst_bo, dst_dx, dst_dy,
2332						   box, n);
2333	}
2334
2335	memset(&tmp, 0, sizeof(tmp));
2336
2337	if (dst->depth == src->depth) {
2338		tmp.dst.format = sna_render_format_for_depth(dst->depth);
2339		tmp.src.pict_format = tmp.dst.format;
2340	} else {
2341		tmp.dst.format = sna_format_for_depth(dst->depth);
2342		tmp.src.pict_format = sna_format_for_depth(src->depth);
2343	}
2344	if (!gen5_check_format(tmp.src.pict_format)) {
2345		DBG(("%s: unsupported source format, %x, use BLT\n",
2346		     __FUNCTION__, tmp.src.pict_format));
2347		goto fallback_blt;
2348	}
2349
2350	DBG(("%s (%d, %d)->(%d, %d) x %d\n",
2351	     __FUNCTION__, src_dx, src_dy, dst_dx, dst_dy, n));
2352
2353	tmp.op = alu == GXcopy ? PictOpSrc : PictOpClear;
2354
2355	tmp.dst.pixmap = (PixmapPtr)dst;
2356	tmp.dst.width  = dst->width;
2357	tmp.dst.height = dst->height;
2358	tmp.dst.x = tmp.dst.y = 0;
2359	tmp.dst.bo = dst_bo;
2360	tmp.damage = NULL;
2361
2362	sna_render_composite_redirect_init(&tmp);
2363	if (too_large(tmp.dst.width, tmp.dst.height)) {
2364		BoxRec extents = box[0];
2365		int i;
2366
2367		for (i = 1; i < n; i++) {
2368			if (box[i].x1 < extents.x1)
2369				extents.x1 = box[i].x1;
2370			if (box[i].y1 < extents.y1)
2371				extents.y1 = box[i].y1;
2372
2373			if (box[i].x2 > extents.x2)
2374				extents.x2 = box[i].x2;
2375			if (box[i].y2 > extents.y2)
2376				extents.y2 = box[i].y2;
2377		}
2378		if (!sna_render_composite_redirect(sna, &tmp,
2379						   extents.x1 + dst_dx,
2380						   extents.y1 + dst_dy,
2381						   extents.x2 - extents.x1,
2382						   extents.y2 - extents.y1,
2383						   n > 1))
2384			goto fallback_tiled;
2385	}
2386
2387	tmp.src.filter = SAMPLER_FILTER_NEAREST;
2388	tmp.src.repeat = SAMPLER_EXTEND_NONE;
2389	tmp.src.card_format = gen5_get_card_format(tmp.src.pict_format);
2390	if (too_large(src->width, src->height)) {
2391		BoxRec extents = box[0];
2392		int i;
2393
2394		for (i = 1; i < n; i++) {
2395			if (box[i].x1 < extents.x1)
2396				extents.x1 = box[i].x1;
2397			if (box[i].y1 < extents.y1)
2398				extents.y1 = box[i].y1;
2399
2400			if (box[i].x2 > extents.x2)
2401				extents.x2 = box[i].x2;
2402			if (box[i].y2 > extents.y2)
2403				extents.y2 = box[i].y2;
2404		}
2405
2406		if (!sna_render_pixmap_partial(sna, src, src_bo, &tmp.src,
2407					       extents.x1 + src_dx,
2408					       extents.y1 + src_dy,
2409					       extents.x2 - extents.x1,
2410					       extents.y2 - extents.y1))
2411			goto fallback_tiled_dst;
2412	} else {
2413		tmp.src.bo = kgem_bo_reference(src_bo);
2414		tmp.src.width  = src->width;
2415		tmp.src.height = src->height;
2416		tmp.src.offset[0] = tmp.src.offset[1] = 0;
2417		tmp.src.scale[0] = 1.f/src->width;
2418		tmp.src.scale[1] = 1.f/src->height;
2419	}
2420
2421	tmp.is_affine = true;
2422	tmp.floats_per_vertex = 3;
2423	tmp.floats_per_rect = 9;
2424	tmp.u.gen5.wm_kernel = WM_KERNEL;
2425	tmp.u.gen5.ve_id = 2;
2426
2427	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
2428		kgem_submit(&sna->kgem);
2429		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
2430			DBG(("%s: aperture check failed\n", __FUNCTION__));
2431			kgem_bo_destroy(&sna->kgem, tmp.src.bo);
2432			if (tmp.redirect.real_bo)
2433				kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
2434
2435			goto fallback_blt;
2436		}
2437	}
2438
2439	dst_dx += tmp.dst.x;
2440	dst_dy += tmp.dst.y;
2441	tmp.dst.x = tmp.dst.y = 0;
2442
2443	src_dx += tmp.src.offset[0];
2444	src_dy += tmp.src.offset[1];
2445
2446	gen5_align_vertex(sna, &tmp);
2447	gen5_copy_bind_surfaces(sna, &tmp);
2448
2449	do {
2450		int n_this_time;
2451
2452		n_this_time = gen5_get_rectangles(sna, &tmp, n,
2453						  gen5_copy_bind_surfaces);
2454		n -= n_this_time;
2455
2456		do {
2457			DBG(("	(%d, %d) -> (%d, %d) + (%d, %d)\n",
2458			     box->x1 + src_dx, box->y1 + src_dy,
2459			     box->x1 + dst_dx, box->y1 + dst_dy,
2460			     box->x2 - box->x1, box->y2 - box->y1));
2461			OUT_VERTEX(box->x2 + dst_dx, box->y2 + dst_dy);
2462			OUT_VERTEX_F((box->x2 + src_dx) * tmp.src.scale[0]);
2463			OUT_VERTEX_F((box->y2 + src_dy) * tmp.src.scale[1]);
2464
2465			OUT_VERTEX(box->x1 + dst_dx, box->y2 + dst_dy);
2466			OUT_VERTEX_F((box->x1 + src_dx) * tmp.src.scale[0]);
2467			OUT_VERTEX_F((box->y2 + src_dy) * tmp.src.scale[1]);
2468
2469			OUT_VERTEX(box->x1 + dst_dx, box->y1 + dst_dy);
2470			OUT_VERTEX_F((box->x1 + src_dx) * tmp.src.scale[0]);
2471			OUT_VERTEX_F((box->y1 + src_dy) * tmp.src.scale[1]);
2472
2473			box++;
2474		} while (--n_this_time);
2475	} while (n);
2476
2477	gen4_vertex_flush(sna);
2478	sna_render_composite_redirect_done(sna, &tmp);
2479	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
2480	return true;
2481
2482fallback_tiled_dst:
2483	if (tmp.redirect.real_bo)
2484		kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
2485fallback_tiled:
2486	if (sna_blt_compare_depth(src, dst) &&
2487	    sna_blt_copy_boxes(sna, alu,
2488			       src_bo, src_dx, src_dy,
2489			       dst_bo, dst_dx, dst_dy,
2490			       dst->bitsPerPixel,
2491			       box, n))
2492		return true;
2493
2494	DBG(("%s: tiled fallback\n", __FUNCTION__));
2495	return sna_tiling_copy_boxes(sna, alu,
2496				     src, src_bo, src_dx, src_dy,
2497				     dst, dst_bo, dst_dx, dst_dy,
2498				     box, n);
2499}
2500
2501static void
2502gen5_render_copy_blt(struct sna *sna,
2503		     const struct sna_copy_op *op,
2504		     int16_t sx, int16_t sy,
2505		     int16_t w,  int16_t h,
2506		     int16_t dx, int16_t dy)
2507{
2508	DBG(("%s: src=(%d, %d), dst=(%d, %d), size=(%d, %d)\n", __FUNCTION__,
2509	     sx, sy, dx, dy, w, h));
2510
2511	gen5_get_rectangles(sna, &op->base, 1, gen5_copy_bind_surfaces);
2512
2513	OUT_VERTEX(dx+w, dy+h);
2514	OUT_VERTEX_F((sx+w)*op->base.src.scale[0]);
2515	OUT_VERTEX_F((sy+h)*op->base.src.scale[1]);
2516
2517	OUT_VERTEX(dx, dy+h);
2518	OUT_VERTEX_F(sx*op->base.src.scale[0]);
2519	OUT_VERTEX_F((sy+h)*op->base.src.scale[1]);
2520
2521	OUT_VERTEX(dx, dy);
2522	OUT_VERTEX_F(sx*op->base.src.scale[0]);
2523	OUT_VERTEX_F(sy*op->base.src.scale[1]);
2524}
2525
2526static void
2527gen5_render_copy_done(struct sna *sna,
2528		      const struct sna_copy_op *op)
2529{
2530	if (sna->render.vertex_offset)
2531		gen4_vertex_flush(sna);
2532
2533	DBG(("%s()\n", __FUNCTION__));
2534}
2535
2536static bool
2537gen5_render_copy(struct sna *sna, uint8_t alu,
2538		 PixmapPtr src, struct kgem_bo *src_bo,
2539		 PixmapPtr dst, struct kgem_bo *dst_bo,
2540		 struct sna_copy_op *op)
2541{
2542	DBG(("%s (alu=%d)\n", __FUNCTION__, alu));
2543
2544	if (sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
2545	    sna_blt_copy(sna, alu,
2546			 src_bo, dst_bo,
2547			 dst->drawable.bitsPerPixel,
2548			 op))
2549		return true;
2550
2551	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo ||
2552	    too_large(src->drawable.width, src->drawable.height) ||
2553	    too_large(dst->drawable.width, dst->drawable.height)) {
2554fallback:
2555		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
2556			return false;
2557
2558		return sna_blt_copy(sna, alu, src_bo, dst_bo,
2559				    dst->drawable.bitsPerPixel,
2560				    op);
2561	}
2562
2563	if (dst->drawable.depth == src->drawable.depth) {
2564		op->base.dst.format = sna_render_format_for_depth(dst->drawable.depth);
2565		op->base.src.pict_format = op->base.dst.format;
2566	} else {
2567		op->base.dst.format = sna_format_for_depth(dst->drawable.depth);
2568		op->base.src.pict_format = sna_format_for_depth(src->drawable.depth);
2569	}
2570	if (!gen5_check_format(op->base.src.pict_format))
2571		goto fallback;
2572
2573	op->base.op = alu == GXcopy ? PictOpSrc : PictOpClear;
2574
2575	op->base.dst.pixmap = dst;
2576	op->base.dst.width  = dst->drawable.width;
2577	op->base.dst.height = dst->drawable.height;
2578	op->base.dst.bo = dst_bo;
2579
2580	op->base.src.bo = src_bo;
2581	op->base.src.card_format =
2582		gen5_get_card_format(op->base.src.pict_format);
2583	op->base.src.width  = src->drawable.width;
2584	op->base.src.height = src->drawable.height;
2585	op->base.src.scale[0] = 1.f/src->drawable.width;
2586	op->base.src.scale[1] = 1.f/src->drawable.height;
2587	op->base.src.filter = SAMPLER_FILTER_NEAREST;
2588	op->base.src.repeat = SAMPLER_EXTEND_NONE;
2589
2590	op->base.is_affine = true;
2591	op->base.floats_per_vertex = 3;
2592	op->base.floats_per_rect = 9;
2593	op->base.u.gen5.wm_kernel = WM_KERNEL;
2594	op->base.u.gen5.ve_id = 2;
2595
2596	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
2597		kgem_submit(&sna->kgem);
2598		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
2599			goto fallback;
2600	}
2601
2602	if (kgem_bo_is_dirty(src_bo)) {
2603		if (sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
2604		    sna_blt_copy(sna, alu,
2605				 src_bo, dst_bo,
2606				 dst->drawable.bitsPerPixel,
2607				 op))
2608			return true;
2609	}
2610
2611	gen5_align_vertex(sna, &op->base);
2612	gen5_copy_bind_surfaces(sna, &op->base);
2613
2614	op->blt  = gen5_render_copy_blt;
2615	op->done = gen5_render_copy_done;
2616	return true;
2617}
2618
2619static void
2620gen5_fill_bind_surfaces(struct sna *sna,
2621			const struct sna_composite_op *op)
2622{
2623	bool dirty = kgem_bo_is_dirty(op->dst.bo);
2624	uint32_t *binding_table;
2625	uint16_t offset;
2626
2627	gen5_get_batch(sna, op);
2628
2629	binding_table = gen5_composite_get_binding_table(sna, &offset);
2630
2631	binding_table[0] =
2632		gen5_bind_bo(sna,
2633			     op->dst.bo, op->dst.width, op->dst.height,
2634			     gen5_get_dest_format(op->dst.format),
2635			     true);
2636	binding_table[1] =
2637		gen5_bind_bo(sna,
2638			     op->src.bo, 1, 1,
2639			     GEN5_SURFACEFORMAT_B8G8R8A8_UNORM,
2640			     false);
2641
2642	if (sna->kgem.surface == offset &&
2643	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen5.surface_table) == *(uint64_t*)binding_table) {
2644		sna->kgem.surface +=
2645			sizeof(struct gen5_surface_state_padded)/sizeof(uint32_t);
2646		offset = sna->render_state.gen5.surface_table;
2647	}
2648
2649	gen5_emit_state(sna, op, offset | dirty);
2650}
2651
2652static inline bool prefer_blt_fill(struct sna *sna)
2653{
2654#if PREFER_BLT_FILL
2655	return true;
2656#else
2657	return sna->kgem.mode != KGEM_RENDER;
2658#endif
2659}
2660
2661static bool
2662gen5_render_fill_boxes(struct sna *sna,
2663		       CARD8 op,
2664		       PictFormat format,
2665		       const xRenderColor *color,
2666		       const DrawableRec *dst, struct kgem_bo *dst_bo,
2667		       const BoxRec *box, int n)
2668{
2669	struct sna_composite_op tmp;
2670	uint32_t pixel;
2671
2672	DBG(("%s op=%x, color=(%04x,%04x,%04x,%04x), boxes=%d x [((%d, %d), (%d, %d))...]\n",
2673	     __FUNCTION__, op,
2674	     color->red, color->green, color->blue, color->alpha,
2675	     n, box->x1, box->y1, box->x2, box->y2));
2676
2677	if (op >= ARRAY_SIZE(gen5_blend_op)) {
2678		DBG(("%s: fallback due to unhandled blend op: %d\n",
2679		     __FUNCTION__, op));
2680		return false;
2681	}
2682
2683	if (op <= PictOpSrc &&
2684	    (prefer_blt_fill(sna) ||
2685	     too_large(dst->width, dst->height) ||
2686	     !gen5_check_dst_format(format))) {
2687		uint8_t alu = GXinvalid;
2688
2689		pixel = 0;
2690		if (op == PictOpClear)
2691			alu = GXclear;
2692		else if (sna_get_pixel_from_rgba(&pixel,
2693						 color->red,
2694						 color->green,
2695						 color->blue,
2696						 color->alpha,
2697						 format))
2698			alu = GXcopy;
2699
2700		if (alu != GXinvalid &&
2701		    sna_blt_fill_boxes(sna, alu,
2702				       dst_bo, dst->bitsPerPixel,
2703				       pixel, box, n))
2704			return true;
2705
2706		if (!gen5_check_dst_format(format))
2707			return false;
2708
2709		if (too_large(dst->width, dst->height))
2710			return sna_tiling_fill_boxes(sna, op, format, color,
2711						     dst, dst_bo, box, n);
2712	}
2713
2714	if (op == PictOpClear) {
2715		pixel = 0;
2716		op = PictOpSrc;
2717	} else if (!sna_get_pixel_from_rgba(&pixel,
2718					    color->red,
2719					    color->green,
2720					    color->blue,
2721					    color->alpha,
2722					    PICT_a8r8g8b8))
2723		return false;
2724
2725	DBG(("%s(%08x x %d)\n", __FUNCTION__, pixel, n));
2726
2727	memset(&tmp, 0, sizeof(tmp));
2728
2729	tmp.op = op;
2730
2731	tmp.dst.pixmap = (PixmapPtr)dst;
2732	tmp.dst.width  = dst->width;
2733	tmp.dst.height = dst->height;
2734	tmp.dst.format = format;
2735	tmp.dst.bo = dst_bo;
2736
2737	tmp.src.bo = sna_render_get_solid(sna, pixel);
2738	tmp.src.filter = SAMPLER_FILTER_NEAREST;
2739	tmp.src.repeat = SAMPLER_EXTEND_REPEAT;
2740
2741	tmp.is_affine = true;
2742	tmp.floats_per_vertex = 2;
2743	tmp.floats_per_rect = 6;
2744	tmp.u.gen5.wm_kernel = WM_KERNEL;
2745	tmp.u.gen5.ve_id = 1;
2746
2747	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
2748		kgem_submit(&sna->kgem);
2749		if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
2750			kgem_bo_destroy(&sna->kgem, tmp.src.bo);
2751			return false;
2752		}
2753	}
2754
2755	gen5_align_vertex(sna, &tmp);
2756	gen5_fill_bind_surfaces(sna, &tmp);
2757
2758	do {
2759		int n_this_time;
2760
2761		n_this_time = gen5_get_rectangles(sna, &tmp, n,
2762						  gen5_fill_bind_surfaces);
2763		n -= n_this_time;
2764
2765		do {
2766			DBG(("	(%d, %d), (%d, %d)\n",
2767			     box->x1, box->y1, box->x2, box->y2));
2768			OUT_VERTEX(box->x2, box->y2);
2769			OUT_VERTEX_F(.5);
2770
2771			OUT_VERTEX(box->x1, box->y2);
2772			OUT_VERTEX_F(.5);
2773
2774			OUT_VERTEX(box->x1, box->y1);
2775			OUT_VERTEX_F(.5);
2776
2777			box++;
2778		} while (--n_this_time);
2779	} while (n);
2780
2781	gen4_vertex_flush(sna);
2782	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
2783	return true;
2784}
2785
2786static void
2787gen5_render_fill_op_blt(struct sna *sna,
2788			const struct sna_fill_op *op,
2789			int16_t x, int16_t y, int16_t w, int16_t h)
2790{
2791	DBG(("%s (%d, %d)x(%d, %d)\n", __FUNCTION__, x,y,w,h));
2792
2793	gen5_get_rectangles(sna, &op->base, 1, gen5_fill_bind_surfaces);
2794
2795	OUT_VERTEX(x+w, y+h);
2796	OUT_VERTEX_F(.5);
2797
2798	OUT_VERTEX(x, y+h);
2799	OUT_VERTEX_F(.5);
2800
2801	OUT_VERTEX(x, y);
2802	OUT_VERTEX_F(.5);
2803}
2804
2805fastcall static void
2806gen5_render_fill_op_box(struct sna *sna,
2807			const struct sna_fill_op *op,
2808			const BoxRec *box)
2809{
2810	DBG(("%s: (%d, %d),(%d, %d)\n", __FUNCTION__,
2811	     box->x1, box->y1, box->x2, box->y2));
2812
2813	gen5_get_rectangles(sna, &op->base, 1, gen5_fill_bind_surfaces);
2814
2815	OUT_VERTEX(box->x2, box->y2);
2816	OUT_VERTEX_F(.5);
2817
2818	OUT_VERTEX(box->x1, box->y2);
2819	OUT_VERTEX_F(.5);
2820
2821	OUT_VERTEX(box->x1, box->y1);
2822	OUT_VERTEX_F(.5);
2823}
2824
2825fastcall static void
2826gen5_render_fill_op_boxes(struct sna *sna,
2827			  const struct sna_fill_op *op,
2828			  const BoxRec *box,
2829			  int nbox)
2830{
2831	DBG(("%s: (%d, %d),(%d, %d)... x %d\n", __FUNCTION__,
2832	     box->x1, box->y1, box->x2, box->y2, nbox));
2833
2834	do {
2835		int nbox_this_time;
2836
2837		nbox_this_time = gen5_get_rectangles(sna, &op->base, nbox,
2838						     gen5_fill_bind_surfaces);
2839		nbox -= nbox_this_time;
2840
2841		do {
2842			OUT_VERTEX(box->x2, box->y2);
2843			OUT_VERTEX_F(.5);
2844
2845			OUT_VERTEX(box->x1, box->y2);
2846			OUT_VERTEX_F(.5);
2847
2848			OUT_VERTEX(box->x1, box->y1);
2849			OUT_VERTEX_F(.5);
2850			box++;
2851		} while (--nbox_this_time);
2852	} while (nbox);
2853}
2854
2855static void
2856gen5_render_fill_op_done(struct sna *sna,
2857			 const struct sna_fill_op *op)
2858{
2859	if (sna->render.vertex_offset)
2860		gen4_vertex_flush(sna);
2861	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
2862
2863	DBG(("%s()\n", __FUNCTION__));
2864}
2865
2866static bool
2867gen5_render_fill(struct sna *sna, uint8_t alu,
2868		 PixmapPtr dst, struct kgem_bo *dst_bo,
2869		 uint32_t color, unsigned flags,
2870		 struct sna_fill_op *op)
2871{
2872	DBG(("%s(alu=%d, color=%08x)\n", __FUNCTION__, alu, color));
2873
2874	if (prefer_blt_fill(sna) &&
2875	    sna_blt_fill(sna, alu,
2876			 dst_bo, dst->drawable.bitsPerPixel,
2877			 color,
2878			 op))
2879		return true;
2880
2881	if (!(alu == GXcopy || alu == GXclear) ||
2882	    too_large(dst->drawable.width, dst->drawable.height))
2883		return sna_blt_fill(sna, alu,
2884				    dst_bo, dst->drawable.bitsPerPixel,
2885				    color,
2886				    op);
2887
2888	if (alu == GXclear)
2889		color = 0;
2890
2891	op->base.op = color == 0 ? PictOpClear : PictOpSrc;
2892
2893	op->base.dst.pixmap = dst;
2894	op->base.dst.width  = dst->drawable.width;
2895	op->base.dst.height = dst->drawable.height;
2896	op->base.dst.format = sna_format_for_depth(dst->drawable.depth);
2897	op->base.dst.bo = dst_bo;
2898	op->base.dst.x = op->base.dst.y = 0;
2899
2900	op->base.need_magic_ca_pass = 0;
2901	op->base.has_component_alpha = 0;
2902
2903	op->base.src.bo =
2904		sna_render_get_solid(sna,
2905				     sna_rgba_for_color(color,
2906							dst->drawable.depth));
2907	op->base.src.filter = SAMPLER_FILTER_NEAREST;
2908	op->base.src.repeat = SAMPLER_EXTEND_REPEAT;
2909
2910	op->base.mask.bo = NULL;
2911	op->base.mask.filter = SAMPLER_FILTER_NEAREST;
2912	op->base.mask.repeat = SAMPLER_EXTEND_NONE;
2913
2914	op->base.is_affine = true;
2915	op->base.floats_per_vertex = 2;
2916	op->base.floats_per_rect = 6;
2917	op->base.u.gen5.wm_kernel = WM_KERNEL;
2918	op->base.u.gen5.ve_id = 1;
2919
2920	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
2921		kgem_submit(&sna->kgem);
2922		if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
2923			kgem_bo_destroy(&sna->kgem, op->base.src.bo);
2924			return false;
2925		}
2926	}
2927
2928	gen5_align_vertex(sna, &op->base);
2929	gen5_fill_bind_surfaces(sna, &op->base);
2930
2931	op->blt   = gen5_render_fill_op_blt;
2932	op->box   = gen5_render_fill_op_box;
2933	op->boxes = gen5_render_fill_op_boxes;
2934	op->points = NULL;
2935	op->done  = gen5_render_fill_op_done;
2936	return true;
2937}
2938
2939static bool
2940gen5_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
2941			     uint32_t color,
2942			     int16_t x1, int16_t y1, int16_t x2, int16_t y2,
2943			     uint8_t alu)
2944{
2945	BoxRec box;
2946
2947	box.x1 = x1;
2948	box.y1 = y1;
2949	box.x2 = x2;
2950	box.y2 = y2;
2951
2952	return sna_blt_fill_boxes(sna, alu,
2953				  bo, dst->drawable.bitsPerPixel,
2954				  color, &box, 1);
2955}
2956
2957static bool
2958gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
2959		     uint32_t color,
2960		     int16_t x1, int16_t y1,
2961		     int16_t x2, int16_t y2,
2962		     uint8_t alu)
2963{
2964	struct sna_composite_op tmp;
2965
2966#if NO_FILL_ONE
2967	return gen5_render_fill_one_try_blt(sna, dst, bo, color,
2968					    x1, y1, x2, y2, alu);
2969#endif
2970
2971	/* Prefer to use the BLT if already engaged */
2972	if (prefer_blt_fill(sna) &&
2973	    gen5_render_fill_one_try_blt(sna, dst, bo, color,
2974					 x1, y1, x2, y2, alu))
2975		return true;
2976
2977	/* Must use the BLT if we can't RENDER... */
2978	if (!(alu == GXcopy || alu == GXclear) ||
2979	    too_large(dst->drawable.width, dst->drawable.height))
2980		return gen5_render_fill_one_try_blt(sna, dst, bo, color,
2981						    x1, y1, x2, y2, alu);
2982
2983	if (alu == GXclear)
2984		color = 0;
2985
2986	tmp.op = color == 0 ? PictOpClear : PictOpSrc;
2987
2988	tmp.dst.pixmap = dst;
2989	tmp.dst.width  = dst->drawable.width;
2990	tmp.dst.height = dst->drawable.height;
2991	tmp.dst.format = sna_format_for_depth(dst->drawable.depth);
2992	tmp.dst.bo = bo;
2993	tmp.dst.x = tmp.dst.y = 0;
2994
2995	tmp.src.bo =
2996		sna_render_get_solid(sna,
2997				     sna_rgba_for_color(color,
2998							dst->drawable.depth));
2999	tmp.src.filter = SAMPLER_FILTER_NEAREST;
3000	tmp.src.repeat = SAMPLER_EXTEND_REPEAT;
3001
3002	tmp.mask.bo = NULL;
3003	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
3004	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
3005
3006	tmp.is_affine = true;
3007	tmp.floats_per_vertex = 2;
3008	tmp.floats_per_rect = 6;
3009	tmp.has_component_alpha = 0;
3010	tmp.need_magic_ca_pass = false;
3011
3012	tmp.u.gen5.wm_kernel = WM_KERNEL;
3013	tmp.u.gen5.ve_id = 1;
3014
3015	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
3016		kgem_submit(&sna->kgem);
3017		if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
3018			kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3019			return false;
3020		}
3021	}
3022
3023	gen5_align_vertex(sna, &tmp);
3024	gen5_fill_bind_surfaces(sna, &tmp);
3025
3026	gen5_get_rectangles(sna, &tmp, 1, gen5_fill_bind_surfaces);
3027
3028	DBG(("	(%d, %d), (%d, %d)\n", x1, y1, x2, y2));
3029	OUT_VERTEX(x2, y2);
3030	OUT_VERTEX_F(.5);
3031
3032	OUT_VERTEX(x1, y2);
3033	OUT_VERTEX_F(.5);
3034
3035	OUT_VERTEX(x1, y1);
3036	OUT_VERTEX_F(.5);
3037
3038	gen4_vertex_flush(sna);
3039	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
3040
3041	return true;
3042}
3043static void
3044gen5_render_context_switch(struct kgem *kgem,
3045			   int new_mode)
3046{
3047	if (!kgem->nbatch)
3048		return;
3049
3050	/* WaNonPipelinedStateCommandFlush
3051	 *
3052	 * Ironlake has a limitation that a 3D or Media command can't
3053	 * be the first command after a BLT, unless it's
3054	 * non-pipelined.
3055	 *
3056	 * We do this by ensuring that the non-pipelined drawrect
3057	 * is always emitted first following a switch from BLT.
3058	 */
3059	if (kgem->mode == KGEM_BLT) {
3060		struct sna *sna = to_sna_from_kgem(kgem);
3061		DBG(("%s: forcing drawrect on next state emission\n",
3062		     __FUNCTION__));
3063		sna->render_state.gen5.drawrect_limit = -1;
3064	}
3065
3066	if (kgem_ring_is_idle(kgem, kgem->ring)) {
3067		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
3068		_kgem_submit(kgem);
3069	}
3070}
3071
3072static void gen5_render_reset(struct sna *sna)
3073{
3074	sna->render_state.gen5.needs_invariant = true;
3075	sna->render_state.gen5.ve_id = -1;
3076	sna->render_state.gen5.last_primitive = -1;
3077	sna->render_state.gen5.last_pipelined_pointers = 0;
3078
3079	sna->render_state.gen5.drawrect_offset = -1;
3080	sna->render_state.gen5.drawrect_limit = -1;
3081	sna->render_state.gen5.surface_table = -1;
3082
3083	if (sna->render.vbo && !kgem_bo_can_map(&sna->kgem, sna->render.vbo)) {
3084		DBG(("%s: discarding unmappable vbo\n", __FUNCTION__));
3085		discard_vbo(sna);
3086	}
3087
3088	sna->render.vertex_offset = 0;
3089	sna->render.nvertex_reloc = 0;
3090	sna->render.vb_id = 0;
3091}
3092
3093static void gen5_render_fini(struct sna *sna)
3094{
3095	kgem_bo_destroy(&sna->kgem, sna->render_state.gen5.general_bo);
3096}
3097
3098static uint32_t gen5_create_vs_unit_state(struct sna_static_stream *stream)
3099{
3100	struct gen5_vs_unit_state *vs = sna_static_stream_map(stream, sizeof(*vs), 32);
3101
3102	/* Set up the vertex shader to be disabled (passthrough) */
3103	vs->thread4.nr_urb_entries = URB_VS_ENTRIES >> 2;
3104	vs->thread4.urb_entry_allocation_size = URB_VS_ENTRY_SIZE - 1;
3105	vs->vs6.vs_enable = 0;
3106	vs->vs6.vert_cache_disable = 1;
3107
3108	return sna_static_stream_offsetof(stream, vs);
3109}
3110
3111static uint32_t gen5_create_sf_state(struct sna_static_stream *stream,
3112				     uint32_t kernel)
3113{
3114	struct gen5_sf_unit_state *sf_state;
3115
3116	sf_state = sna_static_stream_map(stream, sizeof(*sf_state), 32);
3117
3118	sf_state->thread0.grf_reg_count = GEN5_GRF_BLOCKS(SF_KERNEL_NUM_GRF);
3119	sf_state->thread0.kernel_start_pointer = kernel >> 6;
3120
3121	sf_state->thread3.const_urb_entry_read_length = 0;	/* no const URBs */
3122	sf_state->thread3.const_urb_entry_read_offset = 0;	/* no const URBs */
3123	sf_state->thread3.urb_entry_read_length = 1;	/* 1 URB per vertex */
3124	/* don't smash vertex header, read start from dw8 */
3125	sf_state->thread3.urb_entry_read_offset = 1;
3126	sf_state->thread3.dispatch_grf_start_reg = 3;
3127	sf_state->thread4.max_threads = SF_MAX_THREADS - 1;
3128	sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
3129	sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES;
3130	sf_state->sf5.viewport_transform = false;	/* skip viewport */
3131	sf_state->sf6.cull_mode = GEN5_CULLMODE_NONE;
3132	sf_state->sf6.scissor = 0;
3133	sf_state->sf7.trifan_pv = 2;
3134	sf_state->sf6.dest_org_vbias = 0x8;
3135	sf_state->sf6.dest_org_hbias = 0x8;
3136
3137	return sna_static_stream_offsetof(stream, sf_state);
3138}
3139
3140static uint32_t gen5_create_sampler_state(struct sna_static_stream *stream,
3141					  sampler_filter_t src_filter,
3142					  sampler_extend_t src_extend,
3143					  sampler_filter_t mask_filter,
3144					  sampler_extend_t mask_extend)
3145{
3146	struct gen5_sampler_state *sampler_state;
3147
3148	sampler_state = sna_static_stream_map(stream,
3149					      sizeof(struct gen5_sampler_state) * 2,
3150					      32);
3151	sampler_state_init(&sampler_state[0], src_filter, src_extend);
3152	sampler_state_init(&sampler_state[1], mask_filter, mask_extend);
3153
3154	return sna_static_stream_offsetof(stream, sampler_state);
3155}
3156
3157static void gen5_init_wm_state(struct gen5_wm_unit_state *state,
3158			       bool has_mask,
3159			       uint32_t kernel,
3160			       uint32_t sampler)
3161{
3162	state->thread0.grf_reg_count = GEN5_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
3163	state->thread0.kernel_start_pointer = kernel >> 6;
3164
3165	state->thread1.single_program_flow = 0;
3166
3167	/* scratch space is not used in our kernel */
3168	state->thread2.scratch_space_base_pointer = 0;
3169	state->thread2.per_thread_scratch_space = 0;
3170
3171	state->thread3.const_urb_entry_read_length = 0;
3172	state->thread3.const_urb_entry_read_offset = 0;
3173
3174	state->thread3.urb_entry_read_offset = 0;
3175	/* wm kernel use urb from 3, see wm_program in compiler module */
3176	state->thread3.dispatch_grf_start_reg = 3;	/* must match kernel */
3177
3178	state->wm4.sampler_count = 0;	/* hardware requirement */
3179
3180	state->wm4.sampler_state_pointer = sampler >> 5;
3181	state->wm5.max_threads = PS_MAX_THREADS - 1;
3182	state->wm5.transposed_urb_read = 0;
3183	state->wm5.thread_dispatch_enable = 1;
3184	/* just use 16-pixel dispatch (4 subspans), don't need to change kernel
3185	 * start point
3186	 */
3187	state->wm5.enable_16_pix = 1;
3188	state->wm5.enable_8_pix = 0;
3189	state->wm5.early_depth_test = 1;
3190
3191	/* Each pair of attributes (src/mask coords) is two URB entries */
3192	if (has_mask) {
3193		state->thread1.binding_table_entry_count = 3;	/* 2 tex and fb */
3194		state->thread3.urb_entry_read_length = 4;
3195	} else {
3196		state->thread1.binding_table_entry_count = 2;	/* 1 tex and fb */
3197		state->thread3.urb_entry_read_length = 2;
3198	}
3199
3200	/* binding table entry count is only used for prefetching,
3201	 * and it has to be set 0 for Ironlake
3202	 */
3203	state->thread1.binding_table_entry_count = 0;
3204}
3205
3206static uint32_t gen5_create_cc_unit_state(struct sna_static_stream *stream)
3207{
3208	uint8_t *ptr, *base;
3209	int i, j;
3210
3211	base = ptr =
3212		sna_static_stream_map(stream,
3213				      GEN5_BLENDFACTOR_COUNT*GEN5_BLENDFACTOR_COUNT*64,
3214				      64);
3215
3216	for (i = 0; i < GEN5_BLENDFACTOR_COUNT; i++) {
3217		for (j = 0; j < GEN5_BLENDFACTOR_COUNT; j++) {
3218			struct gen5_cc_unit_state *state =
3219				(struct gen5_cc_unit_state *)ptr;
3220
3221			state->cc3.blend_enable =
3222				!(j == GEN5_BLENDFACTOR_ZERO && i == GEN5_BLENDFACTOR_ONE);
3223
3224			state->cc5.logicop_func = 0xc;	/* COPY */
3225			state->cc5.ia_blend_function = GEN5_BLENDFUNCTION_ADD;
3226
3227			/* Fill in alpha blend factors same as color, for the future. */
3228			state->cc5.ia_src_blend_factor = i;
3229			state->cc5.ia_dest_blend_factor = j;
3230
3231			state->cc6.blend_function = GEN5_BLENDFUNCTION_ADD;
3232			state->cc6.clamp_post_alpha_blend = 1;
3233			state->cc6.clamp_pre_alpha_blend = 1;
3234			state->cc6.src_blend_factor = i;
3235			state->cc6.dest_blend_factor = j;
3236
3237			ptr += 64;
3238		}
3239	}
3240
3241	return sna_static_stream_offsetof(stream, base);
3242}
3243
3244static bool gen5_render_setup(struct sna *sna)
3245{
3246	struct gen5_render_state *state = &sna->render_state.gen5;
3247	struct sna_static_stream general;
3248	struct gen5_wm_unit_state_padded *wm_state;
3249	uint32_t sf[2], wm[KERNEL_COUNT];
3250	int i, j, k, l, m;
3251
3252	sna_static_stream_init(&general);
3253
3254	/* Zero pad the start. If you see an offset of 0x0 in the batchbuffer
3255	 * dumps, you know it points to zero.
3256	 */
3257	null_create(&general);
3258
3259	/* Set up the two SF states (one for blending with a mask, one without) */
3260	sf[0] = sna_static_stream_compile_sf(sna, &general, brw_sf_kernel__nomask);
3261	sf[1] = sna_static_stream_compile_sf(sna, &general, brw_sf_kernel__mask);
3262
3263	for (m = 0; m < KERNEL_COUNT; m++) {
3264		if (wm_kernels[m].size) {
3265			wm[m] = sna_static_stream_add(&general,
3266						      wm_kernels[m].data,
3267						      wm_kernels[m].size,
3268						      64);
3269		} else {
3270			wm[m] = sna_static_stream_compile_wm(sna, &general,
3271							     wm_kernels[m].data,
3272							     16);
3273		}
3274		assert(wm[m]);
3275	}
3276
3277	state->vs = gen5_create_vs_unit_state(&general);
3278
3279	state->sf[0] = gen5_create_sf_state(&general, sf[0]);
3280	state->sf[1] = gen5_create_sf_state(&general, sf[1]);
3281
3282
3283	/* Set up the WM states: each filter/extend type for source and mask, per
3284	 * kernel.
3285	 */
3286	wm_state = sna_static_stream_map(&general,
3287					  sizeof(*wm_state) * KERNEL_COUNT *
3288					  FILTER_COUNT * EXTEND_COUNT *
3289					  FILTER_COUNT * EXTEND_COUNT,
3290					  64);
3291	state->wm = sna_static_stream_offsetof(&general, wm_state);
3292	for (i = 0; i < FILTER_COUNT; i++) {
3293		for (j = 0; j < EXTEND_COUNT; j++) {
3294			for (k = 0; k < FILTER_COUNT; k++) {
3295				for (l = 0; l < EXTEND_COUNT; l++) {
3296					uint32_t sampler_state;
3297
3298					sampler_state =
3299						gen5_create_sampler_state(&general,
3300									  i, j,
3301									  k, l);
3302
3303					for (m = 0; m < KERNEL_COUNT; m++) {
3304						gen5_init_wm_state(&wm_state->state,
3305								   wm_kernels[m].has_mask,
3306								   wm[m], sampler_state);
3307						wm_state++;
3308					}
3309				}
3310			}
3311		}
3312	}
3313
3314	state->cc = gen5_create_cc_unit_state(&general);
3315
3316	state->general_bo = sna_static_stream_fini(sna, &general);
3317	return state->general_bo != NULL;
3318}
3319
3320const char *gen5_render_init(struct sna *sna, const char *backend)
3321{
3322	if (!gen5_render_setup(sna))
3323		return backend;
3324
3325	sna->kgem.context_switch = gen5_render_context_switch;
3326	sna->kgem.retire = gen4_render_retire;
3327	sna->kgem.expire = gen4_render_expire;
3328
3329#if !NO_COMPOSITE
3330	sna->render.composite = gen5_render_composite;
3331	sna->render.prefer_gpu |= PREFER_GPU_RENDER;
3332#endif
3333#if !NO_COMPOSITE_SPANS
3334	sna->render.check_composite_spans = gen5_check_composite_spans;
3335	sna->render.composite_spans = gen5_render_composite_spans;
3336	if (intel_get_device_id(sna->scrn) == 0x0044)
3337		sna->render.prefer_gpu |= PREFER_GPU_SPANS;
3338#endif
3339	sna->render.video = gen5_render_video;
3340
3341	sna->render.copy_boxes = gen5_render_copy_boxes;
3342	sna->render.copy = gen5_render_copy;
3343
3344	sna->render.fill_boxes = gen5_render_fill_boxes;
3345	sna->render.fill = gen5_render_fill;
3346	sna->render.fill_one = gen5_render_fill_one;
3347
3348	sna->render.flush = gen4_render_flush;
3349	sna->render.reset = gen5_render_reset;
3350	sna->render.fini = gen5_render_fini;
3351
3352	sna->render.max_3d_size = MAX_3D_SIZE;
3353	sna->render.max_3d_pitch = 1 << 18;
3354	return "Ironlake (gen5)";
3355}
3356