1b8e80941Smrg/* 2b8e80941Smrg * Copyright © 2018 Intel Corporation 3b8e80941Smrg * 4b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a 5b8e80941Smrg * copy of this software and associated documentation files (the "Software"), 6b8e80941Smrg * to deal in the Software without restriction, including without limitation 7b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the 9b8e80941Smrg * Software is furnished to do so, subject to the following conditions: 10b8e80941Smrg * 11b8e80941Smrg * The above copyright notice and this permission notice shall be included 12b8e80941Smrg * in all copies or substantial portions of the Software. 13b8e80941Smrg * 14b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15b8e80941Smrg * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19b8e80941Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20b8e80941Smrg * DEALINGS IN THE SOFTWARE. 21b8e80941Smrg */ 22b8e80941Smrg 23b8e80941Smrg/** 24b8e80941Smrg * @file iris_binder.c 25b8e80941Smrg * 26b8e80941Smrg * Shader programs refer to most resources via integer handles. These are 27b8e80941Smrg * indexes (BTIs) into a "Binding Table", which is simply a list of pointers 28b8e80941Smrg * to SURFACE_STATE entries. Each shader stage has its own binding table, 29b8e80941Smrg * set by the 3DSTATE_BINDING_TABLE_POINTERS_* commands. We stream out 30b8e80941Smrg * binding tables dynamically, storing them in special BOs we call "binders." 31b8e80941Smrg * 32b8e80941Smrg * Unfortunately, the hardware designers made 3DSTATE_BINDING_TABLE_POINTERS 33b8e80941Smrg * only accept a 16-bit pointer. This means that all binding tables have to 34b8e80941Smrg * live within the 64kB range starting at Surface State Base Address. (The 35b8e80941Smrg * actual SURFACE_STATE entries can live anywhere in the 4GB zone, as the 36b8e80941Smrg * binding table entries are full 32-bit pointers.) 37b8e80941Smrg * 38b8e80941Smrg * To handle this, we split a 4GB region of VMA into two memory zones. 39b8e80941Smrg * IRIS_MEMZONE_BINDER is a small region at the bottom able to hold a few 40b8e80941Smrg * binder BOs. IRIS_MEMZONE_SURFACE contains the rest of the 4GB, and is 41b8e80941Smrg * always at a higher address than the binders. This allows us to program 42b8e80941Smrg * Surface State Base Address to the binder BO's address, and offset the 43b8e80941Smrg * values in the binding table to account for the base not starting at the 44b8e80941Smrg * beginning of the 4GB region. 45b8e80941Smrg * 46b8e80941Smrg * This does mean that we have to emit STATE_BASE_ADDRESS and stall when 47b8e80941Smrg * we run out of space in the binder, which hopefully won't happen too often. 48b8e80941Smrg */ 49b8e80941Smrg 50b8e80941Smrg#include <stdlib.h> 51b8e80941Smrg#include "util/u_math.h" 52b8e80941Smrg#include "iris_binder.h" 53b8e80941Smrg#include "iris_bufmgr.h" 54b8e80941Smrg#include "iris_context.h" 55b8e80941Smrg 56b8e80941Smrg#define BTP_ALIGNMENT 32 57b8e80941Smrg 58b8e80941Smrg/* Avoid using offset 0, tools consider it NULL */ 59b8e80941Smrg#define INIT_INSERT_POINT BTP_ALIGNMENT 60b8e80941Smrg 61b8e80941Smrgstatic bool 62b8e80941Smrgbinder_has_space(struct iris_binder *binder, unsigned size) 63b8e80941Smrg{ 64b8e80941Smrg return binder->insert_point + size <= IRIS_BINDER_SIZE; 65b8e80941Smrg} 66b8e80941Smrg 67b8e80941Smrgstatic void 68b8e80941Smrgbinder_realloc(struct iris_context *ice) 69b8e80941Smrg{ 70b8e80941Smrg struct iris_screen *screen = (void *) ice->ctx.screen; 71b8e80941Smrg struct iris_bufmgr *bufmgr = screen->bufmgr; 72b8e80941Smrg struct iris_binder *binder = &ice->state.binder; 73b8e80941Smrg 74b8e80941Smrg uint64_t next_address = IRIS_MEMZONE_BINDER_START; 75b8e80941Smrg 76b8e80941Smrg if (binder->bo) { 77b8e80941Smrg /* Place the new binder just after the old binder, unless we've hit the 78b8e80941Smrg * end of the memory zone...then wrap around to the start again. 79b8e80941Smrg */ 80b8e80941Smrg next_address = binder->bo->gtt_offset + IRIS_BINDER_SIZE; 81b8e80941Smrg if (next_address >= IRIS_MEMZONE_SURFACE_START) 82b8e80941Smrg next_address = IRIS_MEMZONE_BINDER_START; 83b8e80941Smrg 84b8e80941Smrg iris_bo_unreference(binder->bo); 85b8e80941Smrg } 86b8e80941Smrg 87b8e80941Smrg 88b8e80941Smrg binder->bo = 89b8e80941Smrg iris_bo_alloc(bufmgr, "binder", IRIS_BINDER_SIZE, IRIS_MEMZONE_BINDER); 90b8e80941Smrg binder->bo->gtt_offset = next_address; 91b8e80941Smrg binder->map = iris_bo_map(NULL, binder->bo, MAP_WRITE); 92b8e80941Smrg binder->insert_point = INIT_INSERT_POINT; 93b8e80941Smrg 94b8e80941Smrg /* Allocating a new binder requires changing Surface State Base Address, 95b8e80941Smrg * which also invalidates all our previous binding tables - each entry 96b8e80941Smrg * in those tables is an offset from the old base. 97b8e80941Smrg * 98b8e80941Smrg * We do this here so that iris_binder_reserve_3d correctly gets a new 99b8e80941Smrg * larger total_size when making the updated reservation. 100b8e80941Smrg */ 101b8e80941Smrg ice->state.dirty |= IRIS_ALL_DIRTY_BINDINGS; 102b8e80941Smrg} 103b8e80941Smrg 104b8e80941Smrgstatic uint32_t 105b8e80941Smrgbinder_insert(struct iris_binder *binder, unsigned size) 106b8e80941Smrg{ 107b8e80941Smrg uint32_t offset = binder->insert_point; 108b8e80941Smrg 109b8e80941Smrg binder->insert_point = align(binder->insert_point + size, BTP_ALIGNMENT); 110b8e80941Smrg 111b8e80941Smrg return offset; 112b8e80941Smrg} 113b8e80941Smrg 114b8e80941Smrg/** 115b8e80941Smrg * Reserve a block of space in the binder, given the raw size in bytes. 116b8e80941Smrg */ 117b8e80941Smrguint32_t 118b8e80941Smrgiris_binder_reserve(struct iris_context *ice, 119b8e80941Smrg unsigned size) 120b8e80941Smrg{ 121b8e80941Smrg struct iris_binder *binder = &ice->state.binder; 122b8e80941Smrg 123b8e80941Smrg if (!binder_has_space(binder, size)) 124b8e80941Smrg binder_realloc(ice); 125b8e80941Smrg 126b8e80941Smrg assert(size > 0); 127b8e80941Smrg return binder_insert(binder, size); 128b8e80941Smrg} 129b8e80941Smrg 130b8e80941Smrg/** 131b8e80941Smrg * Reserve and record binder space for 3D pipeline shader stages. 132b8e80941Smrg * 133b8e80941Smrg * Note that you must actually populate the new binding tables after 134b8e80941Smrg * calling this command - the new area is uninitialized. 135b8e80941Smrg */ 136b8e80941Smrgvoid 137b8e80941Smrgiris_binder_reserve_3d(struct iris_context *ice) 138b8e80941Smrg{ 139b8e80941Smrg struct iris_compiled_shader **shaders = ice->shaders.prog; 140b8e80941Smrg struct iris_binder *binder = &ice->state.binder; 141b8e80941Smrg unsigned sizes[MESA_SHADER_STAGES] = {}; 142b8e80941Smrg unsigned total_size; 143b8e80941Smrg 144b8e80941Smrg /* If nothing is dirty, skip all this. */ 145b8e80941Smrg if (!(ice->state.dirty & IRIS_ALL_DIRTY_BINDINGS)) 146b8e80941Smrg return; 147b8e80941Smrg 148b8e80941Smrg /* Get the binding table sizes for each stage */ 149b8e80941Smrg for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) { 150b8e80941Smrg if (!shaders[stage]) 151b8e80941Smrg continue; 152b8e80941Smrg 153b8e80941Smrg const struct brw_stage_prog_data *prog_data = 154b8e80941Smrg (const void *) shaders[stage]->prog_data; 155b8e80941Smrg 156b8e80941Smrg /* Round up the size so our next table has an aligned starting offset */ 157b8e80941Smrg sizes[stage] = align(prog_data->binding_table.size_bytes, BTP_ALIGNMENT); 158b8e80941Smrg } 159b8e80941Smrg 160b8e80941Smrg /* Make space for the new binding tables...this may take two tries. */ 161b8e80941Smrg while (true) { 162b8e80941Smrg total_size = 0; 163b8e80941Smrg for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) { 164b8e80941Smrg if (ice->state.dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) 165b8e80941Smrg total_size += sizes[stage]; 166b8e80941Smrg } 167b8e80941Smrg 168b8e80941Smrg assert(total_size < IRIS_BINDER_SIZE); 169b8e80941Smrg 170b8e80941Smrg if (total_size == 0) 171b8e80941Smrg return; 172b8e80941Smrg 173b8e80941Smrg if (binder_has_space(binder, total_size)) 174b8e80941Smrg break; 175b8e80941Smrg 176b8e80941Smrg /* It didn't fit. Allocate a new buffer and try again. Note that 177b8e80941Smrg * this will flag all bindings dirty, which may increase total_size 178b8e80941Smrg * on the next iteration. 179b8e80941Smrg */ 180b8e80941Smrg binder_realloc(ice); 181b8e80941Smrg } 182b8e80941Smrg 183b8e80941Smrg /* Assign space and record the new binding table offsets. */ 184b8e80941Smrg uint32_t offset = binder_insert(binder, total_size); 185b8e80941Smrg 186b8e80941Smrg for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) { 187b8e80941Smrg if (ice->state.dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) { 188b8e80941Smrg binder->bt_offset[stage] = sizes[stage] > 0 ? offset : 0; 189b8e80941Smrg offset += sizes[stage]; 190b8e80941Smrg } 191b8e80941Smrg } 192b8e80941Smrg} 193b8e80941Smrg 194b8e80941Smrgvoid 195b8e80941Smrgiris_binder_reserve_compute(struct iris_context *ice) 196b8e80941Smrg{ 197b8e80941Smrg if (!(ice->state.dirty & IRIS_DIRTY_BINDINGS_CS)) 198b8e80941Smrg return; 199b8e80941Smrg 200b8e80941Smrg struct iris_binder *binder = &ice->state.binder; 201b8e80941Smrg struct brw_stage_prog_data *prog_data = 202b8e80941Smrg ice->shaders.prog[MESA_SHADER_COMPUTE]->prog_data; 203b8e80941Smrg 204b8e80941Smrg unsigned size = prog_data->binding_table.size_bytes; 205b8e80941Smrg 206b8e80941Smrg if (size == 0) 207b8e80941Smrg return; 208b8e80941Smrg 209b8e80941Smrg binder->bt_offset[MESA_SHADER_COMPUTE] = iris_binder_reserve(ice, size); 210b8e80941Smrg} 211b8e80941Smrg 212b8e80941Smrgvoid 213b8e80941Smrgiris_init_binder(struct iris_context *ice) 214b8e80941Smrg{ 215b8e80941Smrg memset(&ice->state.binder, 0, sizeof(struct iris_binder)); 216b8e80941Smrg binder_realloc(ice); 217b8e80941Smrg} 218b8e80941Smrg 219b8e80941Smrgvoid 220b8e80941Smrgiris_destroy_binder(struct iris_binder *binder) 221b8e80941Smrg{ 222b8e80941Smrg iris_bo_unreference(binder->bo); 223b8e80941Smrg} 224