iris_binder.c revision 9f464c52
1/* 2 * Copyright © 2018 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included 12 * in all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 * DEALINGS IN THE SOFTWARE. 21 */ 22 23/** 24 * @file iris_binder.c 25 * 26 * Shader programs refer to most resources via integer handles. These are 27 * indexes (BTIs) into a "Binding Table", which is simply a list of pointers 28 * to SURFACE_STATE entries. Each shader stage has its own binding table, 29 * set by the 3DSTATE_BINDING_TABLE_POINTERS_* commands. We stream out 30 * binding tables dynamically, storing them in special BOs we call "binders." 31 * 32 * Unfortunately, the hardware designers made 3DSTATE_BINDING_TABLE_POINTERS 33 * only accept a 16-bit pointer. This means that all binding tables have to 34 * live within the 64kB range starting at Surface State Base Address. (The 35 * actual SURFACE_STATE entries can live anywhere in the 4GB zone, as the 36 * binding table entries are full 32-bit pointers.) 37 * 38 * To handle this, we split a 4GB region of VMA into two memory zones. 39 * IRIS_MEMZONE_BINDER is a small region at the bottom able to hold a few 40 * binder BOs. IRIS_MEMZONE_SURFACE contains the rest of the 4GB, and is 41 * always at a higher address than the binders. This allows us to program 42 * Surface State Base Address to the binder BO's address, and offset the 43 * values in the binding table to account for the base not starting at the 44 * beginning of the 4GB region. 45 * 46 * This does mean that we have to emit STATE_BASE_ADDRESS and stall when 47 * we run out of space in the binder, which hopefully won't happen too often. 48 */ 49 50#include <stdlib.h> 51#include "util/u_math.h" 52#include "iris_binder.h" 53#include "iris_bufmgr.h" 54#include "iris_context.h" 55 56#define BTP_ALIGNMENT 32 57 58/* Avoid using offset 0, tools consider it NULL */ 59#define INIT_INSERT_POINT BTP_ALIGNMENT 60 61static bool 62binder_has_space(struct iris_binder *binder, unsigned size) 63{ 64 return binder->insert_point + size <= IRIS_BINDER_SIZE; 65} 66 67static void 68binder_realloc(struct iris_context *ice) 69{ 70 struct iris_screen *screen = (void *) ice->ctx.screen; 71 struct iris_bufmgr *bufmgr = screen->bufmgr; 72 struct iris_binder *binder = &ice->state.binder; 73 74 uint64_t next_address = IRIS_MEMZONE_BINDER_START; 75 76 if (binder->bo) { 77 /* Place the new binder just after the old binder, unless we've hit the 78 * end of the memory zone...then wrap around to the start again. 79 */ 80 next_address = binder->bo->gtt_offset + IRIS_BINDER_SIZE; 81 if (next_address >= IRIS_MEMZONE_SURFACE_START) 82 next_address = IRIS_MEMZONE_BINDER_START; 83 84 iris_bo_unreference(binder->bo); 85 } 86 87 88 binder->bo = 89 iris_bo_alloc(bufmgr, "binder", IRIS_BINDER_SIZE, IRIS_MEMZONE_BINDER); 90 binder->bo->gtt_offset = next_address; 91 binder->map = iris_bo_map(NULL, binder->bo, MAP_WRITE); 92 binder->insert_point = INIT_INSERT_POINT; 93 94 /* Allocating a new binder requires changing Surface State Base Address, 95 * which also invalidates all our previous binding tables - each entry 96 * in those tables is an offset from the old base. 97 * 98 * We do this here so that iris_binder_reserve_3d correctly gets a new 99 * larger total_size when making the updated reservation. 100 */ 101 ice->state.dirty |= IRIS_ALL_DIRTY_BINDINGS; 102} 103 104static uint32_t 105binder_insert(struct iris_binder *binder, unsigned size) 106{ 107 uint32_t offset = binder->insert_point; 108 109 binder->insert_point = align(binder->insert_point + size, BTP_ALIGNMENT); 110 111 return offset; 112} 113 114/** 115 * Reserve a block of space in the binder, given the raw size in bytes. 116 */ 117uint32_t 118iris_binder_reserve(struct iris_context *ice, 119 unsigned size) 120{ 121 struct iris_binder *binder = &ice->state.binder; 122 123 if (!binder_has_space(binder, size)) 124 binder_realloc(ice); 125 126 assert(size > 0); 127 return binder_insert(binder, size); 128} 129 130/** 131 * Reserve and record binder space for 3D pipeline shader stages. 132 * 133 * Note that you must actually populate the new binding tables after 134 * calling this command - the new area is uninitialized. 135 */ 136void 137iris_binder_reserve_3d(struct iris_context *ice) 138{ 139 struct iris_compiled_shader **shaders = ice->shaders.prog; 140 struct iris_binder *binder = &ice->state.binder; 141 unsigned sizes[MESA_SHADER_STAGES] = {}; 142 unsigned total_size; 143 144 /* If nothing is dirty, skip all this. */ 145 if (!(ice->state.dirty & IRIS_ALL_DIRTY_BINDINGS)) 146 return; 147 148 /* Get the binding table sizes for each stage */ 149 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) { 150 if (!shaders[stage]) 151 continue; 152 153 const struct brw_stage_prog_data *prog_data = 154 (const void *) shaders[stage]->prog_data; 155 156 /* Round up the size so our next table has an aligned starting offset */ 157 sizes[stage] = align(prog_data->binding_table.size_bytes, BTP_ALIGNMENT); 158 } 159 160 /* Make space for the new binding tables...this may take two tries. */ 161 while (true) { 162 total_size = 0; 163 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) { 164 if (ice->state.dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) 165 total_size += sizes[stage]; 166 } 167 168 assert(total_size < IRIS_BINDER_SIZE); 169 170 if (total_size == 0) 171 return; 172 173 if (binder_has_space(binder, total_size)) 174 break; 175 176 /* It didn't fit. Allocate a new buffer and try again. Note that 177 * this will flag all bindings dirty, which may increase total_size 178 * on the next iteration. 179 */ 180 binder_realloc(ice); 181 } 182 183 /* Assign space and record the new binding table offsets. */ 184 uint32_t offset = binder_insert(binder, total_size); 185 186 for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) { 187 if (ice->state.dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) { 188 binder->bt_offset[stage] = sizes[stage] > 0 ? offset : 0; 189 offset += sizes[stage]; 190 } 191 } 192} 193 194void 195iris_binder_reserve_compute(struct iris_context *ice) 196{ 197 if (!(ice->state.dirty & IRIS_DIRTY_BINDINGS_CS)) 198 return; 199 200 struct iris_binder *binder = &ice->state.binder; 201 struct brw_stage_prog_data *prog_data = 202 ice->shaders.prog[MESA_SHADER_COMPUTE]->prog_data; 203 204 unsigned size = prog_data->binding_table.size_bytes; 205 206 if (size == 0) 207 return; 208 209 binder->bt_offset[MESA_SHADER_COMPUTE] = iris_binder_reserve(ice, size); 210} 211 212void 213iris_init_binder(struct iris_context *ice) 214{ 215 memset(&ice->state.binder, 0, sizeof(struct iris_binder)); 216 binder_realloc(ice); 217} 218 219void 220iris_destroy_binder(struct iris_binder *binder) 221{ 222 iris_bo_unreference(binder->bo); 223} 224