1#ifndef SNA_RENDER_INLINE_H
2#define SNA_RENDER_INLINE_H
3
4static inline bool need_tiling(struct sna *sna, int16_t width, int16_t height)
5{
6	/* Is the damage area too large to fit in 3D pipeline,
7	 * and so do we need to split the operation up into tiles?
8	 */
9	return (width > sna->render.max_3d_size ||
10		height > sna->render.max_3d_size);
11}
12
13static inline bool need_redirect(struct sna *sna, PixmapPtr dst)
14{
15	/* Is the pixmap too large to render to? */
16	return (dst->drawable.width > sna->render.max_3d_size ||
17		dst->drawable.height > sna->render.max_3d_size);
18}
19
20static force_inline float pack_2s(int16_t x, int16_t y)
21{
22	union {
23		struct sna_coordinate p;
24		float f;
25	} u;
26	u.p.x = x;
27	u.p.y = y;
28	return u.f;
29}
30
31static force_inline int vertex_space(struct sna *sna)
32{
33	return sna->render.vertex_size - sna->render.vertex_used;
34}
35static force_inline void vertex_emit(struct sna *sna, float v)
36{
37	assert(sna->render.vertex_used < sna->render.vertex_size);
38	sna->render.vertices[sna->render.vertex_used++] = v;
39}
40static force_inline void vertex_emit_2s(struct sna *sna, int16_t x, int16_t y)
41{
42	vertex_emit(sna, pack_2s(x, y));
43}
44
45static force_inline int batch_space(struct sna *sna)
46{
47	assert(sna->kgem.nbatch <= KGEM_BATCH_SIZE(&sna->kgem));
48	assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED <= sna->kgem.surface);
49	return sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED;
50}
51
52static force_inline void batch_emit(struct sna *sna, uint32_t dword)
53{
54	assert(sna->kgem.mode != KGEM_NONE);
55	assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED < sna->kgem.surface);
56	sna->kgem.batch[sna->kgem.nbatch++] = dword;
57}
58
59static force_inline void batch_emit64(struct sna *sna, uint64_t qword)
60{
61	assert(sna->kgem.mode != KGEM_NONE);
62	assert(sna->kgem.nbatch + 2 + KGEM_BATCH_RESERVED < sna->kgem.surface);
63	*(uint64_t *)(sna->kgem.batch+sna->kgem.nbatch) = qword;
64	sna->kgem.nbatch += 2;
65}
66
67static force_inline void batch_emit_float(struct sna *sna, float f)
68{
69	union {
70		uint32_t dw;
71		float f;
72	} u;
73	u.f = f;
74	batch_emit(sna, u.dw);
75}
76
77static inline bool
78is_gpu(struct sna *sna, DrawablePtr drawable, unsigned prefer)
79{
80	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
81
82	if (priv == NULL || priv->clear || priv->cpu)
83		return false;
84
85	if (priv->cpu_damage == NULL)
86		return true;
87
88	if (priv->gpu_damage && !priv->gpu_bo->proxy &&
89	    (sna->render.prefer_gpu & prefer))
90		return true;
91
92	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
93		return true;
94
95	if (DAMAGE_IS_ALL(priv->cpu_damage))
96		return false;
97
98	return priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo);
99}
100
101static inline bool
102too_small(struct sna_pixmap *priv)
103{
104	assert(priv);
105
106	if (priv->gpu_bo)
107		return false;
108
109	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
110		return false;
111
112	return (priv->create & KGEM_CAN_CREATE_GPU) == 0;
113}
114
115static inline bool
116can_render_to_picture(PicturePtr dst)
117{
118	if (dst->alphaMap) {
119		DBG(("%s(pixmap=%ld) -- no, has alphamap\n", __FUNCTION__,
120		     get_drawable_pixmap(dst->pDrawable)->drawable.serialNumber));
121		return false;
122	}
123
124	switch (PICT_FORMAT_TYPE(dst->format)) {
125	case PICT_TYPE_COLOR:
126	case PICT_TYPE_GRAY:
127	case PICT_TYPE_OTHER:
128		DBG(("%s(pixmap=%ld) -- no, has palette\n", __FUNCTION__,
129		     get_drawable_pixmap(dst->pDrawable)->drawable.serialNumber));
130		return false;
131	default:
132		break;
133	}
134
135	return true;
136}
137
138
139static inline bool
140is_gpu_dst(struct sna_pixmap *priv)
141{
142	assert(priv);
143
144	if (too_small(priv))
145		return false;
146
147	if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
148		return true;
149
150	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
151		return true;
152
153	if (DAMAGE_IS_ALL(priv->cpu_damage))
154		return false;
155
156	return priv->gpu_damage != NULL || !priv->cpu;
157}
158
159static inline bool
160unattached(DrawablePtr drawable)
161{
162	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
163	return priv == NULL || (priv->gpu_damage == NULL && priv->cpu_damage && !priv->cpu_bo);
164}
165
166static inline bool
167picture_is_gpu(struct sna *sna, PicturePtr picture, unsigned flags)
168{
169	if (!picture)
170		return false;
171
172	if (!picture->pDrawable) {
173		switch (flags) {
174		case PREFER_GPU_RENDER:
175			switch (picture->pSourcePict->type) {
176			case SourcePictTypeSolidFill:
177			case SourcePictTypeLinear:
178				return false;
179			default:
180				return true;
181			}
182		case PREFER_GPU_SPANS:
183			return true;
184		default:
185			return false;
186		}
187	} else {
188		if (picture->repeat &&
189		    (picture->pDrawable->width | picture->pDrawable->height) == 1)
190			return flags == PREFER_GPU_SPANS;
191	}
192
193	return is_gpu(sna, picture->pDrawable, flags);
194}
195
196static inline bool
197picture_is_cpu(struct sna *sna, PicturePtr picture)
198{
199	if (!picture->pDrawable)
200		return false;
201
202	return !is_gpu(sna, picture->pDrawable, PREFER_GPU_RENDER);
203}
204
205static inline bool sna_blt_compare_depth(const DrawableRec *src, const DrawableRec *dst)
206{
207	if (src->depth == dst->depth)
208		return true;
209
210	/* Also allow for the alpha to be discarded on a copy */
211	if (src->bitsPerPixel != dst->bitsPerPixel)
212		return false;
213
214	if (dst->depth == 24 && src->depth == 32)
215		return true;
216
217	/* Note that a depth-16 pixmap is r5g6b5, not x1r5g5b5. */
218
219	return false;
220}
221
222static inline struct kgem_bo *
223sna_render_get_alpha_gradient(struct sna *sna)
224{
225	return kgem_bo_reference(sna->render.alpha_cache.cache_bo);
226}
227
228static inline void
229sna_render_picture_extents(PicturePtr p, BoxRec *box)
230{
231	box->x1 = p->pDrawable->x;
232	box->y1 = p->pDrawable->y;
233	box->x2 = bound(box->x1, p->pDrawable->width);
234	box->y2 = bound(box->y1, p->pDrawable->height);
235
236	if (box->x1 < p->pCompositeClip->extents.x1)
237		box->x1 = p->pCompositeClip->extents.x1;
238	if (box->y1 < p->pCompositeClip->extents.y1)
239		box->y1 = p->pCompositeClip->extents.y1;
240
241	if (box->x2 > p->pCompositeClip->extents.x2)
242		box->x2 = p->pCompositeClip->extents.x2;
243	if (box->y2 > p->pCompositeClip->extents.y2)
244		box->y2 = p->pCompositeClip->extents.y2;
245
246	assert(box->x2 > box->x1 && box->y2 > box->y1);
247}
248
249static inline void
250sna_render_reduce_damage(struct sna_composite_op *op,
251			 int dst_x, int dst_y,
252			 int width, int height)
253{
254	BoxRec r;
255
256	if (op->damage == NULL || *op->damage == NULL)
257		return;
258
259	if (DAMAGE_IS_ALL(*op->damage)) {
260		DBG(("%s: damage-all, dicarding damage\n",
261		     __FUNCTION__));
262		op->damage = NULL;
263		return;
264	}
265
266	if (width == 0 || height == 0)
267		return;
268
269	r.x1 = dst_x + op->dst.x;
270	r.x2 = r.x1 + width;
271
272	r.y1 = dst_y + op->dst.y;
273	r.y2 = r.y1 + height;
274
275	if (sna_damage_contains_box__no_reduce(*op->damage, &r)) {
276		DBG(("%s: damage contains render extents, dicarding damage\n",
277		     __FUNCTION__));
278		op->damage = NULL;
279	}
280}
281
282inline static uint32_t
283color_convert(uint32_t pixel,
284	      uint32_t src_format,
285	      uint32_t dst_format)
286{
287	DBG(("%s: src=%08x [%08x]\n", __FUNCTION__, pixel, src_format));
288
289	if (src_format != dst_format) {
290		uint16_t red, green, blue, alpha;
291
292		if (!sna_get_rgba_from_pixel(pixel,
293					     &red, &green, &blue, &alpha,
294					     src_format))
295			return 0;
296
297		if (!sna_get_pixel_from_rgba(&pixel,
298					     red, green, blue, alpha,
299					     dst_format))
300			return 0;
301	}
302
303	DBG(("%s: dst=%08x [%08x]\n", __FUNCTION__, pixel, dst_format));
304	return pixel;
305}
306
307inline static bool dst_use_gpu(PixmapPtr pixmap)
308{
309	struct sna_pixmap *priv = sna_pixmap(pixmap);
310	if (priv == NULL)
311		return false;
312
313	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
314		return true;
315
316	if (priv->clear)
317		return false;
318
319	if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
320		return true;
321
322	return priv->gpu_damage && (!priv->cpu || !priv->cpu_damage);
323}
324
325inline static bool dst_use_cpu(PixmapPtr pixmap)
326{
327	struct sna_pixmap *priv = sna_pixmap(pixmap);
328	if (priv == NULL || priv->shm)
329		return true;
330
331	return priv->cpu_damage && priv->cpu;
332}
333
334inline static bool dst_is_cpu(PixmapPtr pixmap)
335{
336	struct sna_pixmap *priv = sna_pixmap(pixmap);
337	return priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage);
338}
339
340inline static bool
341untransformed(PicturePtr p)
342{
343	return !p->transform || pixman_transform_is_int_translate(p->transform);
344}
345
346inline static void
347boxes_extents(const BoxRec *box, int n, BoxRec *extents)
348{
349	*extents = box[0];
350	while (--n) {
351		box++;
352
353		if (box->x1 < extents->x1)
354			extents->x1 = box->x1;
355		if (box->x2 > extents->x2)
356			extents->x2 = box->x2;
357
358		if (box->y1 < extents->y1)
359			extents->y1 = box->y1;
360		if (box->y2 > extents->y2)
361			extents->y2 = box->y2;
362	}
363}
364
365inline static bool
366overlaps(struct sna *sna,
367	 struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
368	 struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
369	 const BoxRec *box, int n, unsigned flags,
370	 BoxRec *extents)
371{
372	if (src_bo != dst_bo)
373		return false;
374
375	if (flags & COPY_NO_OVERLAP)
376		return false;
377
378	boxes_extents(box, n, extents);
379	return (extents->x2 + src_dx > extents->x1 + dst_dx &&
380		extents->x1 + src_dx < extents->x2 + dst_dx &&
381		extents->y2 + src_dy > extents->y1 + dst_dy &&
382		extents->y1 + src_dy < extents->y2 + dst_dy);
383}
384
385static inline long get_picture_id(PicturePtr picture)
386{
387	return picture && picture->pDrawable ? get_drawable_pixmap(picture->pDrawable)->drawable.serialNumber : 0;
388}
389
390#endif /* SNA_RENDER_INLINE_H */
391