Lines Matching refs:intel

41 static inline int intel_batch_space(intel_screen_private *intel)
43 return (intel->batch_bo->size - BATCH_RESERVED) - (4*intel->batch_used);
46 static inline int intel_vertex_space(intel_screen_private *intel)
48 return intel->vertex_bo ? intel->vertex_bo->size - (4*intel->vertex_used) : 0;
52 intel_batch_require_space(ScrnInfoPtr scrn, intel_screen_private *intel, int sz)
54 assert(sz < intel->batch_bo->size - 8);
55 if (intel_batch_space(intel) < sz)
61 intel_screen_private *intel = intel_get_screen_private(scrn);
63 assert(!intel->in_batch_atomic);
65 if (intel->current_batch != RENDER_BATCH) {
66 if (intel->current_batch && intel->context_switch)
67 intel->context_switch(intel, RENDER_BATCH);
70 intel_batch_require_space(scrn, intel, sz * 4);
71 intel->current_batch = RENDER_BATCH;
73 intel->in_batch_atomic = TRUE;
74 intel->batch_atomic_limit = intel->batch_used + sz;
79 intel_screen_private *intel = intel_get_screen_private(scrn);
81 assert(intel->in_batch_atomic);
82 assert(intel->batch_used <= intel->batch_atomic_limit);
83 intel->in_batch_atomic = FALSE;
86 static inline void intel_batch_emit_dword(intel_screen_private *intel, uint32_t dword)
88 intel->batch_ptr[intel->batch_used++] = dword;
91 static inline void intel_batch_align(intel_screen_private *intel, uint32_t align)
98 if ((delta = intel->batch_used & (align - 1))) {
100 memset (intel->batch_ptr + intel->batch_used, 0, 4*delta);
101 intel->batch_used += delta;
106 intel_batch_emit_reloc(intel_screen_private *intel,
114 drm_intel_bo_emit_reloc_fence(intel->batch_bo,
115 intel->batch_used * 4,
119 drm_intel_bo_emit_reloc(intel->batch_bo, intel->batch_used * 4,
125 intel_batch_emit_dword(intel, offset);
126 if (INTEL_INFO(intel)->gen >= 0100)
127 intel_batch_emit_dword(intel, offset >> 32);
131 intel_batch_mark_pixmap_domains(intel_screen_private *intel,
139 list_add(&priv->batch, &intel->batch_pixmaps);
144 intel->needs_flush |= write_domain != 0;
148 intel_batch_emit_reloc_pixmap(intel_screen_private *intel, PixmapPtr pixmap,
154 intel_batch_mark_pixmap_domains(intel, priv, read_domains, write_domain);
156 intel_batch_emit_reloc(intel, priv->bo,
161 #define ALIGN_BATCH(align) intel_batch_align(intel, align);
162 #define OUT_BATCH(dword) intel_batch_emit_dword(intel, dword)
165 intel_batch_emit_reloc(intel, bo, read_domains, write_domains, delta, 0)
168 intel_batch_emit_reloc(intel, bo, read_domains, write_domains, delta, 1)
171 intel_batch_emit_reloc_pixmap(intel, pixmap, reads, write, delta, 0)
174 intel_batch_emit_reloc_pixmap(intel, pixmap, reads, write, delta, 1)
189 if (intel->batch_emitting != 0) \
192 assert(!intel->in_batch_atomic); \
193 if (intel->current_batch != batch_idx) { \
194 if (intel->current_batch && intel->context_switch) \
195 intel->context_switch(intel, batch_idx); \
197 intel_batch_require_space(scrn, intel, (n) * 4); \
198 intel->current_batch = batch_idx; \
199 intel->batch_emitting = (n); \
200 intel->batch_emit_start = intel->batch_used; \
207 if (INTEL_INFO(intel)->gen < 060) { \
218 ((intel->BR_tiling[0] == I915_TILING_Y) ? BCS_SWCTRL_DST_Y : 0) | \
219 ((intel->BR_tiling[1] == I915_TILING_Y) ? BCS_SWCTRL_SRC_Y : 0)); \
224 if (intel->batch_emitting == 0) \
227 if (intel->batch_used > \
228 intel->batch_emit_start + intel->batch_emitting) \
231 intel->batch_used - intel->batch_emit_start, \
232 intel->batch_emitting); \
233 if (intel->batch_used < intel->batch_emit_start + \
234 intel->batch_emitting) \
237 intel->batch_used - intel->batch_emit_start, \
238 intel->batch_emitting); \
239 intel->batch_emitting = 0; \
242 void intel_next_vertex(intel_screen_private *intel);
243 static inline void intel_vertex_emit(intel_screen_private *intel, float v)
245 intel->vertex_ptr[intel->vertex_used++] = v;
247 #define OUT_VERTEX(v) intel_vertex_emit(intel, v)