1/* 2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robclark@freedesktop.org> 25 */ 26 27#ifndef FREEDRENO_CONTEXT_H_ 28#define FREEDRENO_CONTEXT_H_ 29 30#include "pipe/p_context.h" 31#include "indices/u_primconvert.h" 32#include "util/u_blitter.h" 33#include "util/list.h" 34#include "util/slab.h" 35#include "util/u_string.h" 36 37#include "freedreno_batch.h" 38#include "freedreno_screen.h" 39#include "freedreno_gmem.h" 40#include "freedreno_util.h" 41 42#define BORDER_COLOR_UPLOAD_SIZE (2 * PIPE_MAX_SAMPLERS * BORDERCOLOR_SIZE) 43 44struct fd_vertex_stateobj; 45 46struct fd_texture_stateobj { 47 struct pipe_sampler_view *textures[PIPE_MAX_SAMPLERS]; 48 unsigned num_textures; 49 unsigned valid_textures; 50 struct pipe_sampler_state *samplers[PIPE_MAX_SAMPLERS]; 51 unsigned num_samplers; 52 unsigned valid_samplers; 53 /* number of samples per sampler, 2 bits per sampler: */ 54 uint32_t samples; 55}; 56 57struct fd_program_stateobj { 58 void *vp, *fp; 59}; 60 61struct fd_constbuf_stateobj { 62 struct pipe_constant_buffer cb[PIPE_MAX_CONSTANT_BUFFERS]; 63 uint32_t enabled_mask; 64}; 65 66struct fd_shaderbuf_stateobj { 67 struct pipe_shader_buffer sb[PIPE_MAX_SHADER_BUFFERS]; 68 uint32_t enabled_mask; 69}; 70 71struct fd_shaderimg_stateobj { 72 struct pipe_image_view si[PIPE_MAX_SHADER_IMAGES]; 73 uint32_t enabled_mask; 74}; 75 76struct fd_vertexbuf_stateobj { 77 struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS]; 78 unsigned count; 79 uint32_t enabled_mask; 80}; 81 82struct fd_vertex_stateobj { 83 struct pipe_vertex_element pipe[PIPE_MAX_ATTRIBS]; 84 unsigned num_elements; 85}; 86 87struct fd_streamout_stateobj { 88 struct pipe_stream_output_target *targets[PIPE_MAX_SO_BUFFERS]; 89 unsigned num_targets; 90 /* Track offset from vtxcnt for streamout data. This counter 91 * is just incremented by # of vertices on each draw until 92 * reset or new streamout buffer bound. 93 * 94 * When we eventually have GS, the CPU won't actually know the 95 * number of vertices per draw, so I think we'll have to do 96 * something more clever. 97 */ 98 unsigned offsets[PIPE_MAX_SO_BUFFERS]; 99}; 100 101#define MAX_GLOBAL_BUFFERS 16 102struct fd_global_bindings_stateobj { 103 struct pipe_resource *buf[MAX_GLOBAL_BUFFERS]; 104 uint32_t enabled_mask; 105}; 106 107/* group together the vertex and vertexbuf state.. for ease of passing 108 * around, and because various internal operations (gmem<->mem, etc) 109 * need their own vertex state: 110 */ 111struct fd_vertex_state { 112 struct fd_vertex_stateobj *vtx; 113 struct fd_vertexbuf_stateobj vertexbuf; 114}; 115 116/* global 3d pipeline dirty state: */ 117enum fd_dirty_3d_state { 118 FD_DIRTY_BLEND = BIT(0), 119 FD_DIRTY_RASTERIZER = BIT(1), 120 FD_DIRTY_ZSA = BIT(2), 121 FD_DIRTY_BLEND_COLOR = BIT(3), 122 FD_DIRTY_STENCIL_REF = BIT(4), 123 FD_DIRTY_SAMPLE_MASK = BIT(5), 124 FD_DIRTY_FRAMEBUFFER = BIT(6), 125 FD_DIRTY_STIPPLE = BIT(7), 126 FD_DIRTY_VIEWPORT = BIT(8), 127 FD_DIRTY_VTXSTATE = BIT(9), 128 FD_DIRTY_VTXBUF = BIT(10), 129 FD_DIRTY_MIN_SAMPLES = BIT(11), 130 131 FD_DIRTY_SCISSOR = BIT(12), 132 FD_DIRTY_STREAMOUT = BIT(13), 133 FD_DIRTY_UCP = BIT(14), 134 FD_DIRTY_BLEND_DUAL = BIT(15), 135 136 /* These are a bit redundent with fd_dirty_shader_state, and possibly 137 * should be removed. (But OTOH kinda convenient in some places) 138 */ 139 FD_DIRTY_PROG = BIT(16), 140 FD_DIRTY_CONST = BIT(17), 141 FD_DIRTY_TEX = BIT(18), 142 143 /* only used by a2xx.. possibly can be removed.. */ 144 FD_DIRTY_TEXSTATE = BIT(19), 145}; 146 147/* per shader-stage dirty state: */ 148enum fd_dirty_shader_state { 149 FD_DIRTY_SHADER_PROG = BIT(0), 150 FD_DIRTY_SHADER_CONST = BIT(1), 151 FD_DIRTY_SHADER_TEX = BIT(2), 152 FD_DIRTY_SHADER_SSBO = BIT(3), 153 FD_DIRTY_SHADER_IMAGE = BIT(4), 154}; 155 156struct fd_context { 157 struct pipe_context base; 158 159 struct fd_device *dev; 160 struct fd_screen *screen; 161 struct fd_pipe *pipe; 162 163 struct util_queue flush_queue; 164 165 struct blitter_context *blitter; 166 void *clear_rs_state; 167 struct primconvert_context *primconvert; 168 169 /* slab for pipe_transfer allocations: */ 170 struct slab_child_pool transfer_pool; 171 172 /** 173 * query related state: 174 */ 175 /*@{*/ 176 /* slabs for fd_hw_sample and fd_hw_sample_period allocations: */ 177 struct slab_mempool sample_pool; 178 struct slab_mempool sample_period_pool; 179 180 /* sample-providers for hw queries: */ 181 const struct fd_hw_sample_provider *hw_sample_providers[MAX_HW_SAMPLE_PROVIDERS]; 182 183 /* list of active queries: */ 184 struct list_head hw_active_queries; 185 186 /* sample-providers for accumulating hw queries: */ 187 const struct fd_acc_sample_provider *acc_sample_providers[MAX_HW_SAMPLE_PROVIDERS]; 188 189 /* list of active accumulating queries: */ 190 struct list_head acc_active_queries; 191 /*@}*/ 192 193 /* table with PIPE_PRIM_MAX entries mapping PIPE_PRIM_x to 194 * DI_PT_x value to use for draw initiator. There are some 195 * slight differences between generation: 196 */ 197 const uint8_t *primtypes; 198 uint32_t primtype_mask; 199 200 /* shaders used by clear, and gmem->mem blits: */ 201 struct fd_program_stateobj solid_prog; // TODO move to screen? 202 203 /* shaders used by mem->gmem blits: */ 204 struct fd_program_stateobj blit_prog[MAX_RENDER_TARGETS]; // TODO move to screen? 205 struct fd_program_stateobj blit_z, blit_zs; 206 207 /* Stats/counters: 208 */ 209 struct { 210 uint64_t prims_emitted; 211 uint64_t prims_generated; 212 uint64_t draw_calls; 213 uint64_t batch_total, batch_sysmem, batch_gmem, batch_nondraw, batch_restore; 214 uint64_t staging_uploads, shadow_uploads; 215 uint64_t vs_regs, fs_regs; 216 } stats; 217 218 /* Current batch.. the rule here is that you can deref ctx->batch 219 * in codepaths from pipe_context entrypoints. But not in code- 220 * paths from fd_batch_flush() (basically, the stuff that gets 221 * called from GMEM code), since in those code-paths the batch 222 * you care about is not necessarily the same as ctx->batch. 223 */ 224 struct fd_batch *batch; 225 226 /* NULL if there has been rendering since last flush. Otherwise 227 * keeps a reference to the last fence so we can re-use it rather 228 * than having to flush no-op batch. 229 */ 230 struct pipe_fence_handle *last_fence; 231 232 /* track last known reset status globally and per-context to 233 * determine if more resets occurred since then. If global reset 234 * count increases, it means some other context crashed. If 235 * per-context reset count increases, it means we crashed the 236 * gpu. 237 */ 238 uint32_t context_reset_count, global_reset_count; 239 240 /* Are we in process of shadowing a resource? Used to detect recursion 241 * in transfer_map, and skip unneeded synchronization. 242 */ 243 bool in_shadow : 1; 244 245 /* Ie. in blit situation where we no longer care about previous framebuffer 246 * contents. Main point is to eliminate blits from fd_try_shadow_resource(). 247 * For example, in case of texture upload + gen-mipmaps. 248 */ 249 bool in_blit : 1; 250 251 struct pipe_scissor_state scissor; 252 253 /* we don't have a disable/enable bit for scissor, so instead we keep 254 * a disabled-scissor state which matches the entire bound framebuffer 255 * and use that when scissor is not enabled. 256 */ 257 struct pipe_scissor_state disabled_scissor; 258 259 /* Current gmem/tiling configuration.. gets updated on render_tiles() 260 * if out of date with current maximal-scissor/cpp: 261 * 262 * (NOTE: this is kind of related to the batch, but moving it there 263 * means we'd always have to recalc tiles ever batch) 264 */ 265 struct fd_gmem_stateobj gmem; 266 struct fd_vsc_pipe vsc_pipe[32]; 267 struct fd_tile tile[512]; 268 269 /* which state objects need to be re-emit'd: */ 270 enum fd_dirty_3d_state dirty; 271 272 /* per shader-stage dirty status: */ 273 enum fd_dirty_shader_state dirty_shader[PIPE_SHADER_TYPES]; 274 275 void *compute; 276 struct pipe_blend_state *blend; 277 struct pipe_rasterizer_state *rasterizer; 278 struct pipe_depth_stencil_alpha_state *zsa; 279 280 struct fd_texture_stateobj tex[PIPE_SHADER_TYPES]; 281 282 struct fd_program_stateobj prog; 283 284 struct fd_vertex_state vtx; 285 286 struct pipe_blend_color blend_color; 287 struct pipe_stencil_ref stencil_ref; 288 unsigned sample_mask; 289 unsigned min_samples; 290 /* local context fb state, for when ctx->batch is null: */ 291 struct pipe_framebuffer_state framebuffer; 292 struct pipe_poly_stipple stipple; 293 struct pipe_viewport_state viewport; 294 struct pipe_scissor_state viewport_scissor; 295 struct fd_constbuf_stateobj constbuf[PIPE_SHADER_TYPES]; 296 struct fd_shaderbuf_stateobj shaderbuf[PIPE_SHADER_TYPES]; 297 struct fd_shaderimg_stateobj shaderimg[PIPE_SHADER_TYPES]; 298 struct fd_streamout_stateobj streamout; 299 struct fd_global_bindings_stateobj global_bindings; 300 struct pipe_clip_state ucp; 301 302 struct pipe_query *cond_query; 303 bool cond_cond; /* inverted rendering condition */ 304 uint cond_mode; 305 306 struct pipe_debug_callback debug; 307 308 /* GMEM/tile handling fxns: */ 309 void (*emit_tile_init)(struct fd_batch *batch); 310 void (*emit_tile_prep)(struct fd_batch *batch, struct fd_tile *tile); 311 void (*emit_tile_mem2gmem)(struct fd_batch *batch, struct fd_tile *tile); 312 void (*emit_tile_renderprep)(struct fd_batch *batch, struct fd_tile *tile); 313 void (*emit_tile_gmem2mem)(struct fd_batch *batch, struct fd_tile *tile); 314 void (*emit_tile_fini)(struct fd_batch *batch); /* optional */ 315 316 /* optional, for GMEM bypass: */ 317 void (*emit_sysmem_prep)(struct fd_batch *batch); 318 void (*emit_sysmem_fini)(struct fd_batch *batch); 319 320 /* draw: */ 321 bool (*draw_vbo)(struct fd_context *ctx, const struct pipe_draw_info *info, 322 unsigned index_offset); 323 bool (*clear)(struct fd_context *ctx, unsigned buffers, 324 const union pipe_color_union *color, double depth, unsigned stencil); 325 326 /* compute: */ 327 void (*launch_grid)(struct fd_context *ctx, const struct pipe_grid_info *info); 328 329 /* constant emit: (note currently not used/needed for a2xx) */ 330 void (*emit_const)(struct fd_ringbuffer *ring, gl_shader_stage type, 331 uint32_t regid, uint32_t offset, uint32_t sizedwords, 332 const uint32_t *dwords, struct pipe_resource *prsc); 333 /* emit bo addresses as constant: */ 334 void (*emit_const_bo)(struct fd_ringbuffer *ring, gl_shader_stage type, boolean write, 335 uint32_t regid, uint32_t num, struct pipe_resource **prscs, uint32_t *offsets); 336 337 /* indirect-branch emit: */ 338 void (*emit_ib)(struct fd_ringbuffer *ring, struct fd_ringbuffer *target); 339 340 /* query: */ 341 struct fd_query * (*create_query)(struct fd_context *ctx, unsigned query_type); 342 void (*query_prepare)(struct fd_batch *batch, uint32_t num_tiles); 343 void (*query_prepare_tile)(struct fd_batch *batch, uint32_t n, 344 struct fd_ringbuffer *ring); 345 void (*query_set_stage)(struct fd_batch *batch, enum fd_render_stage stage); 346 347 /* blitter: */ 348 bool (*blit)(struct fd_context *ctx, const struct pipe_blit_info *info); 349 350 /* simple gpu "memcpy": */ 351 void (*mem_to_mem)(struct fd_ringbuffer *ring, struct pipe_resource *dst, 352 unsigned dst_off, struct pipe_resource *src, unsigned src_off, 353 unsigned sizedwords); 354 355 /* handling for barriers: */ 356 void (*framebuffer_barrier)(struct fd_context *ctx); 357 358 /* 359 * Common pre-cooked VBO state (used for a3xx and later): 360 */ 361 362 /* for clear/gmem->mem vertices, and mem->gmem */ 363 struct pipe_resource *solid_vbuf; 364 365 /* for mem->gmem tex coords: */ 366 struct pipe_resource *blit_texcoord_vbuf; 367 368 /* vertex state for solid_vbuf: 369 * - solid_vbuf / 12 / R32G32B32_FLOAT 370 */ 371 struct fd_vertex_state solid_vbuf_state; 372 373 /* vertex state for blit_prog: 374 * - blit_texcoord_vbuf / 8 / R32G32_FLOAT 375 * - solid_vbuf / 12 / R32G32B32_FLOAT 376 */ 377 struct fd_vertex_state blit_vbuf_state; 378}; 379 380static inline struct fd_context * 381fd_context(struct pipe_context *pctx) 382{ 383 return (struct fd_context *)pctx; 384} 385 386static inline void 387fd_context_assert_locked(struct fd_context *ctx) 388{ 389 pipe_mutex_assert_locked(ctx->screen->lock); 390} 391 392static inline void 393fd_context_lock(struct fd_context *ctx) 394{ 395 mtx_lock(&ctx->screen->lock); 396} 397 398static inline void 399fd_context_unlock(struct fd_context *ctx) 400{ 401 mtx_unlock(&ctx->screen->lock); 402} 403 404/* mark all state dirty: */ 405static inline void 406fd_context_all_dirty(struct fd_context *ctx) 407{ 408 ctx->dirty = ~0; 409 for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++) 410 ctx->dirty_shader[i] = ~0; 411} 412 413static inline void 414fd_context_all_clean(struct fd_context *ctx) 415{ 416 ctx->dirty = 0; 417 for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++) { 418 /* don't mark compute state as clean, since it is not emitted 419 * during normal draw call. The places that call _all_dirty(), 420 * it is safe to mark compute state dirty as well, but the 421 * inverse is not true. 422 */ 423 if (i == PIPE_SHADER_COMPUTE) 424 continue; 425 ctx->dirty_shader[i] = 0; 426 } 427} 428 429static inline struct pipe_scissor_state * 430fd_context_get_scissor(struct fd_context *ctx) 431{ 432 if (ctx->rasterizer && ctx->rasterizer->scissor) 433 return &ctx->scissor; 434 return &ctx->disabled_scissor; 435} 436 437static inline bool 438fd_supported_prim(struct fd_context *ctx, unsigned prim) 439{ 440 return (1 << prim) & ctx->primtype_mask; 441} 442 443static inline struct fd_batch * 444fd_context_batch(struct fd_context *ctx) 445{ 446 if (unlikely(!ctx->batch)) { 447 struct fd_batch *batch = 448 fd_batch_from_fb(&ctx->screen->batch_cache, ctx, &ctx->framebuffer); 449 util_copy_framebuffer_state(&batch->framebuffer, &ctx->framebuffer); 450 ctx->batch = batch; 451 fd_context_all_dirty(ctx); 452 } 453 return ctx->batch; 454} 455 456static inline void 457fd_batch_set_stage(struct fd_batch *batch, enum fd_render_stage stage) 458{ 459 struct fd_context *ctx = batch->ctx; 460 461 /* special case: internal blits (like mipmap level generation) 462 * go through normal draw path (via util_blitter_blit()).. but 463 * we need to ignore the FD_STAGE_DRAW which will be set, so we 464 * don't enable queries which should be paused during internal 465 * blits: 466 */ 467 if ((batch->stage == FD_STAGE_BLIT) && 468 (stage != FD_STAGE_NULL)) 469 return; 470 471 if (ctx->query_set_stage) 472 ctx->query_set_stage(batch, stage); 473 474 batch->stage = stage; 475} 476 477void fd_context_setup_common_vbos(struct fd_context *ctx); 478void fd_context_cleanup_common_vbos(struct fd_context *ctx); 479 480struct pipe_context * fd_context_init(struct fd_context *ctx, 481 struct pipe_screen *pscreen, const uint8_t *primtypes, 482 void *priv, unsigned flags); 483 484void fd_context_destroy(struct pipe_context *pctx); 485 486#endif /* FREEDRENO_CONTEXT_H_ */ 487