freedreno_context.h revision 01e04c3f
1/* 2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robclark@freedesktop.org> 25 */ 26 27#ifndef FREEDRENO_CONTEXT_H_ 28#define FREEDRENO_CONTEXT_H_ 29 30#include "pipe/p_context.h" 31#include "indices/u_primconvert.h" 32#include "util/u_blitter.h" 33#include "util/list.h" 34#include "util/slab.h" 35#include "util/u_string.h" 36 37#include "freedreno_batch.h" 38#include "freedreno_screen.h" 39#include "freedreno_gmem.h" 40#include "freedreno_util.h" 41 42#define BORDER_COLOR_UPLOAD_SIZE (2 * PIPE_MAX_SAMPLERS * BORDERCOLOR_SIZE) 43 44struct fd_vertex_stateobj; 45 46struct fd_texture_stateobj { 47 struct pipe_sampler_view *textures[PIPE_MAX_SAMPLERS]; 48 unsigned num_textures; 49 unsigned valid_textures; 50 struct pipe_sampler_state *samplers[PIPE_MAX_SAMPLERS]; 51 unsigned num_samplers; 52 unsigned valid_samplers; 53 /* number of samples per sampler, 2 bits per sampler: */ 54 uint32_t samples; 55}; 56 57struct fd_program_stateobj { 58 void *vp, *fp; 59 60 /* rest only used by fd2.. split out: */ 61 uint8_t num_exports; 62 /* Indexed by semantic name or TGSI_SEMANTIC_COUNT + semantic index 63 * for TGSI_SEMANTIC_GENERIC. Special vs exports (position and point- 64 * size) are not included in this 65 */ 66 uint8_t export_linkage[63]; 67}; 68 69struct fd_constbuf_stateobj { 70 struct pipe_constant_buffer cb[PIPE_MAX_CONSTANT_BUFFERS]; 71 uint32_t enabled_mask; 72}; 73 74struct fd_shaderbuf_stateobj { 75 struct pipe_shader_buffer sb[PIPE_MAX_SHADER_BUFFERS]; 76 uint32_t enabled_mask; 77}; 78 79struct fd_shaderimg_stateobj { 80 struct pipe_image_view si[PIPE_MAX_SHADER_IMAGES]; 81 uint32_t enabled_mask; 82}; 83 84struct fd_vertexbuf_stateobj { 85 struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS]; 86 unsigned count; 87 uint32_t enabled_mask; 88}; 89 90struct fd_vertex_stateobj { 91 struct pipe_vertex_element pipe[PIPE_MAX_ATTRIBS]; 92 unsigned num_elements; 93}; 94 95struct fd_streamout_stateobj { 96 struct pipe_stream_output_target *targets[PIPE_MAX_SO_BUFFERS]; 97 unsigned num_targets; 98 /* Track offset from vtxcnt for streamout data. This counter 99 * is just incremented by # of vertices on each draw until 100 * reset or new streamout buffer bound. 101 * 102 * When we eventually have GS, the CPU won't actually know the 103 * number of vertices per draw, so I think we'll have to do 104 * something more clever. 105 */ 106 unsigned offsets[PIPE_MAX_SO_BUFFERS]; 107}; 108 109#define MAX_GLOBAL_BUFFERS 16 110struct fd_global_bindings_stateobj { 111 struct pipe_resource *buf[MAX_GLOBAL_BUFFERS]; 112 uint32_t enabled_mask; 113}; 114 115/* group together the vertex and vertexbuf state.. for ease of passing 116 * around, and because various internal operations (gmem<->mem, etc) 117 * need their own vertex state: 118 */ 119struct fd_vertex_state { 120 struct fd_vertex_stateobj *vtx; 121 struct fd_vertexbuf_stateobj vertexbuf; 122}; 123 124/* global 3d pipeline dirty state: */ 125enum fd_dirty_3d_state { 126 FD_DIRTY_BLEND = BIT(0), 127 FD_DIRTY_RASTERIZER = BIT(1), 128 FD_DIRTY_ZSA = BIT(2), 129 FD_DIRTY_BLEND_COLOR = BIT(3), 130 FD_DIRTY_STENCIL_REF = BIT(4), 131 FD_DIRTY_SAMPLE_MASK = BIT(5), 132 FD_DIRTY_FRAMEBUFFER = BIT(6), 133 FD_DIRTY_STIPPLE = BIT(7), 134 FD_DIRTY_VIEWPORT = BIT(8), 135 FD_DIRTY_VTXSTATE = BIT(9), 136 FD_DIRTY_VTXBUF = BIT(10), 137 138 FD_DIRTY_SCISSOR = BIT(12), 139 FD_DIRTY_STREAMOUT = BIT(13), 140 FD_DIRTY_UCP = BIT(14), 141 FD_DIRTY_BLEND_DUAL = BIT(15), 142 143 /* These are a bit redundent with fd_dirty_shader_state, and possibly 144 * should be removed. (But OTOH kinda convenient in some places) 145 */ 146 FD_DIRTY_PROG = BIT(16), 147 FD_DIRTY_CONST = BIT(17), 148 FD_DIRTY_TEX = BIT(18), 149 150 /* only used by a2xx.. possibly can be removed.. */ 151 FD_DIRTY_TEXSTATE = BIT(19), 152}; 153 154/* per shader-stage dirty state: */ 155enum fd_dirty_shader_state { 156 FD_DIRTY_SHADER_PROG = BIT(0), 157 FD_DIRTY_SHADER_CONST = BIT(1), 158 FD_DIRTY_SHADER_TEX = BIT(2), 159 FD_DIRTY_SHADER_SSBO = BIT(3), 160 FD_DIRTY_SHADER_IMAGE = BIT(4), 161}; 162 163struct fd_context { 164 struct pipe_context base; 165 166 struct fd_device *dev; 167 struct fd_screen *screen; 168 struct fd_pipe *pipe; 169 170 struct util_queue flush_queue; 171 172 struct blitter_context *blitter; 173 void *clear_rs_state; 174 struct primconvert_context *primconvert; 175 176 /* slab for pipe_transfer allocations: */ 177 struct slab_child_pool transfer_pool; 178 179 /** 180 * query related state: 181 */ 182 /*@{*/ 183 /* slabs for fd_hw_sample and fd_hw_sample_period allocations: */ 184 struct slab_mempool sample_pool; 185 struct slab_mempool sample_period_pool; 186 187 /* sample-providers for hw queries: */ 188 const struct fd_hw_sample_provider *hw_sample_providers[MAX_HW_SAMPLE_PROVIDERS]; 189 190 /* list of active queries: */ 191 struct list_head hw_active_queries; 192 193 /* sample-providers for accumulating hw queries: */ 194 const struct fd_acc_sample_provider *acc_sample_providers[MAX_HW_SAMPLE_PROVIDERS]; 195 196 /* list of active accumulating queries: */ 197 struct list_head acc_active_queries; 198 /*@}*/ 199 200 /* table with PIPE_PRIM_MAX entries mapping PIPE_PRIM_x to 201 * DI_PT_x value to use for draw initiator. There are some 202 * slight differences between generation: 203 */ 204 const uint8_t *primtypes; 205 uint32_t primtype_mask; 206 207 /* shaders used by clear, and gmem->mem blits: */ 208 struct fd_program_stateobj solid_prog; // TODO move to screen? 209 210 /* shaders used by mem->gmem blits: */ 211 struct fd_program_stateobj blit_prog[MAX_RENDER_TARGETS]; // TODO move to screen? 212 struct fd_program_stateobj blit_z, blit_zs; 213 214 /* Stats/counters: 215 */ 216 struct { 217 uint64_t prims_emitted; 218 uint64_t prims_generated; 219 uint64_t draw_calls; 220 uint64_t batch_total, batch_sysmem, batch_gmem, batch_nondraw, batch_restore; 221 uint64_t staging_uploads, shadow_uploads; 222 uint64_t vs_regs, fs_regs; 223 } stats; 224 225 /* Current batch.. the rule here is that you can deref ctx->batch 226 * in codepaths from pipe_context entrypoints. But not in code- 227 * paths from fd_batch_flush() (basically, the stuff that gets 228 * called from GMEM code), since in those code-paths the batch 229 * you care about is not necessarily the same as ctx->batch. 230 */ 231 struct fd_batch *batch; 232 233 /* NULL if there has been rendering since last flush. Otherwise 234 * keeps a reference to the last fence so we can re-use it rather 235 * than having to flush no-op batch. 236 */ 237 struct pipe_fence_handle *last_fence; 238 239 /* Are we in process of shadowing a resource? Used to detect recursion 240 * in transfer_map, and skip unneeded synchronization. 241 */ 242 bool in_shadow : 1; 243 244 /* Ie. in blit situation where we no longer care about previous framebuffer 245 * contents. Main point is to eliminate blits from fd_try_shadow_resource(). 246 * For example, in case of texture upload + gen-mipmaps. 247 */ 248 bool in_blit : 1; 249 250 struct pipe_scissor_state scissor; 251 252 /* we don't have a disable/enable bit for scissor, so instead we keep 253 * a disabled-scissor state which matches the entire bound framebuffer 254 * and use that when scissor is not enabled. 255 */ 256 struct pipe_scissor_state disabled_scissor; 257 258 /* Current gmem/tiling configuration.. gets updated on render_tiles() 259 * if out of date with current maximal-scissor/cpp: 260 * 261 * (NOTE: this is kind of related to the batch, but moving it there 262 * means we'd always have to recalc tiles ever batch) 263 */ 264 struct fd_gmem_stateobj gmem; 265 struct fd_vsc_pipe vsc_pipe[32]; 266 struct fd_tile tile[512]; 267 268 /* which state objects need to be re-emit'd: */ 269 enum fd_dirty_3d_state dirty; 270 271 /* per shader-stage dirty status: */ 272 enum fd_dirty_shader_state dirty_shader[PIPE_SHADER_TYPES]; 273 274 void *compute; 275 struct pipe_blend_state *blend; 276 struct pipe_rasterizer_state *rasterizer; 277 struct pipe_depth_stencil_alpha_state *zsa; 278 279 struct fd_texture_stateobj tex[PIPE_SHADER_TYPES]; 280 281 struct fd_program_stateobj prog; 282 283 struct fd_vertex_state vtx; 284 285 struct pipe_blend_color blend_color; 286 struct pipe_stencil_ref stencil_ref; 287 unsigned sample_mask; 288 /* local context fb state, for when ctx->batch is null: */ 289 struct pipe_framebuffer_state framebuffer; 290 struct pipe_poly_stipple stipple; 291 struct pipe_viewport_state viewport; 292 struct fd_constbuf_stateobj constbuf[PIPE_SHADER_TYPES]; 293 struct fd_shaderbuf_stateobj shaderbuf[PIPE_SHADER_TYPES]; 294 struct fd_shaderimg_stateobj shaderimg[PIPE_SHADER_TYPES]; 295 struct fd_streamout_stateobj streamout; 296 struct fd_global_bindings_stateobj global_bindings; 297 struct pipe_clip_state ucp; 298 299 struct pipe_query *cond_query; 300 bool cond_cond; /* inverted rendering condition */ 301 uint cond_mode; 302 303 struct pipe_debug_callback debug; 304 305 /* GMEM/tile handling fxns: */ 306 void (*emit_tile_init)(struct fd_batch *batch); 307 void (*emit_tile_prep)(struct fd_batch *batch, struct fd_tile *tile); 308 void (*emit_tile_mem2gmem)(struct fd_batch *batch, struct fd_tile *tile); 309 void (*emit_tile_renderprep)(struct fd_batch *batch, struct fd_tile *tile); 310 void (*emit_tile_gmem2mem)(struct fd_batch *batch, struct fd_tile *tile); 311 void (*emit_tile_fini)(struct fd_batch *batch); /* optional */ 312 313 /* optional, for GMEM bypass: */ 314 void (*emit_sysmem_prep)(struct fd_batch *batch); 315 void (*emit_sysmem_fini)(struct fd_batch *batch); 316 317 /* draw: */ 318 bool (*draw_vbo)(struct fd_context *ctx, const struct pipe_draw_info *info, 319 unsigned index_offset); 320 bool (*clear)(struct fd_context *ctx, unsigned buffers, 321 const union pipe_color_union *color, double depth, unsigned stencil); 322 323 /* compute: */ 324 void (*launch_grid)(struct fd_context *ctx, const struct pipe_grid_info *info); 325 326 /* constant emit: (note currently not used/needed for a2xx) */ 327 void (*emit_const)(struct fd_ringbuffer *ring, enum shader_t type, 328 uint32_t regid, uint32_t offset, uint32_t sizedwords, 329 const uint32_t *dwords, struct pipe_resource *prsc); 330 /* emit bo addresses as constant: */ 331 void (*emit_const_bo)(struct fd_ringbuffer *ring, enum shader_t type, boolean write, 332 uint32_t regid, uint32_t num, struct pipe_resource **prscs, uint32_t *offsets); 333 334 /* indirect-branch emit: */ 335 void (*emit_ib)(struct fd_ringbuffer *ring, struct fd_ringbuffer *target); 336 337 /* query: */ 338 struct fd_query * (*create_query)(struct fd_context *ctx, unsigned query_type); 339 void (*query_prepare)(struct fd_batch *batch, uint32_t num_tiles); 340 void (*query_prepare_tile)(struct fd_batch *batch, uint32_t n, 341 struct fd_ringbuffer *ring); 342 void (*query_set_stage)(struct fd_batch *batch, enum fd_render_stage stage); 343 344 /* blitter: */ 345 void (*blit)(struct fd_context *ctx, const struct pipe_blit_info *info); 346 347 /* simple gpu "memcpy": */ 348 void (*mem_to_mem)(struct fd_ringbuffer *ring, struct pipe_resource *dst, 349 unsigned dst_off, struct pipe_resource *src, unsigned src_off, 350 unsigned sizedwords); 351 352 /* 353 * Common pre-cooked VBO state (used for a3xx and later): 354 */ 355 356 /* for clear/gmem->mem vertices, and mem->gmem */ 357 struct pipe_resource *solid_vbuf; 358 359 /* for mem->gmem tex coords: */ 360 struct pipe_resource *blit_texcoord_vbuf; 361 362 /* vertex state for solid_vbuf: 363 * - solid_vbuf / 12 / R32G32B32_FLOAT 364 */ 365 struct fd_vertex_state solid_vbuf_state; 366 367 /* vertex state for blit_prog: 368 * - blit_texcoord_vbuf / 8 / R32G32_FLOAT 369 * - solid_vbuf / 12 / R32G32B32_FLOAT 370 */ 371 struct fd_vertex_state blit_vbuf_state; 372}; 373 374static inline struct fd_context * 375fd_context(struct pipe_context *pctx) 376{ 377 return (struct fd_context *)pctx; 378} 379 380static inline void 381fd_context_assert_locked(struct fd_context *ctx) 382{ 383 pipe_mutex_assert_locked(ctx->screen->lock); 384} 385 386static inline void 387fd_context_lock(struct fd_context *ctx) 388{ 389 mtx_lock(&ctx->screen->lock); 390} 391 392static inline void 393fd_context_unlock(struct fd_context *ctx) 394{ 395 mtx_unlock(&ctx->screen->lock); 396} 397 398/* mark all state dirty: */ 399static inline void 400fd_context_all_dirty(struct fd_context *ctx) 401{ 402 ctx->dirty = ~0; 403 for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++) 404 ctx->dirty_shader[i] = ~0; 405} 406 407static inline void 408fd_context_all_clean(struct fd_context *ctx) 409{ 410 ctx->dirty = 0; 411 for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++) { 412 /* don't mark compute state as clean, since it is not emitted 413 * during normal draw call. The places that call _all_dirty(), 414 * it is safe to mark compute state dirty as well, but the 415 * inverse is not true. 416 */ 417 if (i == PIPE_SHADER_COMPUTE) 418 continue; 419 ctx->dirty_shader[i] = 0; 420 } 421} 422 423static inline struct pipe_scissor_state * 424fd_context_get_scissor(struct fd_context *ctx) 425{ 426 if (ctx->rasterizer && ctx->rasterizer->scissor) 427 return &ctx->scissor; 428 return &ctx->disabled_scissor; 429} 430 431static inline bool 432fd_supported_prim(struct fd_context *ctx, unsigned prim) 433{ 434 return (1 << prim) & ctx->primtype_mask; 435} 436 437static inline struct fd_batch * 438fd_context_batch(struct fd_context *ctx) 439{ 440 if (unlikely(!ctx->batch)) { 441 struct fd_batch *batch = 442 fd_batch_from_fb(&ctx->screen->batch_cache, ctx, &ctx->framebuffer); 443 util_copy_framebuffer_state(&batch->framebuffer, &ctx->framebuffer); 444 ctx->batch = batch; 445 fd_context_all_dirty(ctx); 446 } 447 return ctx->batch; 448} 449 450static inline void 451fd_batch_set_stage(struct fd_batch *batch, enum fd_render_stage stage) 452{ 453 struct fd_context *ctx = batch->ctx; 454 455 /* special case: internal blits (like mipmap level generation) 456 * go through normal draw path (via util_blitter_blit()).. but 457 * we need to ignore the FD_STAGE_DRAW which will be set, so we 458 * don't enable queries which should be paused during internal 459 * blits: 460 */ 461 if ((batch->stage == FD_STAGE_BLIT) && 462 (stage != FD_STAGE_NULL)) 463 return; 464 465 if (ctx->query_set_stage) 466 ctx->query_set_stage(batch, stage); 467 468 batch->stage = stage; 469} 470 471void fd_context_setup_common_vbos(struct fd_context *ctx); 472void fd_context_cleanup_common_vbos(struct fd_context *ctx); 473 474struct pipe_context * fd_context_init(struct fd_context *ctx, 475 struct pipe_screen *pscreen, const uint8_t *primtypes, 476 void *priv, unsigned flags); 477 478void fd_context_destroy(struct pipe_context *pctx); 479 480#endif /* FREEDRENO_CONTEXT_H_ */ 481