1/* 2 * Copyright © 2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Chris Wilson <chris@chris-wilson.co.uk> 25 * 26 */ 27 28#ifdef HAVE_CONFIG_H 29#include "config.h" 30#endif 31 32#include "sna.h" 33#include "sna_render.h" 34#include "sna_render_inline.h" 35#include "gen8_vertex.h" 36 37void gen8_vertex_align(struct sna *sna, const struct sna_composite_op *op) 38{ 39 int vertex_index; 40 41 assert(op->floats_per_rect == 3*op->floats_per_vertex); 42 43 vertex_index = (sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex; 44 if ((int)sna->render.vertex_size - vertex_index * op->floats_per_vertex < 2*op->floats_per_rect) { 45 DBG(("%s: flushing vertex buffer: new index=%d, max=%d\n", 46 __FUNCTION__, vertex_index, sna->render.vertex_size / op->floats_per_vertex)); 47 if (gen8_vertex_finish(sna) < 2*op->floats_per_rect) { 48 kgem_submit(&sna->kgem); 49 _kgem_set_mode(&sna->kgem, KGEM_RENDER); 50 } 51 52 vertex_index = (sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex; 53 assert(vertex_index * op->floats_per_vertex <= sna->render.vertex_size); 54 } 55 56 sna->render.vertex_index = vertex_index; 57 sna->render.vertex_used = vertex_index * op->floats_per_vertex; 58} 59 60void gen8_vertex_flush(struct sna *sna) 61{ 62 DBG(("%s[%x] = %d\n", __FUNCTION__, 63 4*sna->render.vertex_offset, 64 sna->render.vertex_index - sna->render.vertex_start)); 65 66 assert(sna->render.vertex_offset); 67 assert(sna->render.vertex_offset <= sna->kgem.nbatch); 68 assert(sna->render.vertex_index > sna->render.vertex_start); 69 assert(sna->render.vertex_used <= sna->render.vertex_size); 70 71 sna->kgem.batch[sna->render.vertex_offset] = 72 sna->render.vertex_index - sna->render.vertex_start; 73 sna->render.vertex_offset = 0; 74} 75 76int gen8_vertex_finish(struct sna *sna) 77{ 78 struct kgem_bo *bo; 79 unsigned int i; 80 unsigned hint, size; 81 82 DBG(("%s: used=%d / %d\n", __FUNCTION__, 83 sna->render.vertex_used, sna->render.vertex_size)); 84 assert(sna->render.vertex_offset == 0); 85 assert(sna->render.vertex_used); 86 assert(sna->render.vertex_used <= sna->render.vertex_size); 87 88 sna_vertex_wait__locked(&sna->render); 89 90 /* Note: we only need dword alignment (currently) */ 91 92 hint = CREATE_GTT_MAP; 93 94 bo = sna->render.vbo; 95 if (bo) { 96 for (i = 0; i < sna->render.nvertex_reloc; i++) { 97 DBG(("%s: reloc[%d] = %d\n", __FUNCTION__, 98 i, sna->render.vertex_reloc[i])); 99 100 *(uint64_t *)(sna->kgem.batch+sna->render.vertex_reloc[i]) = 101 kgem_add_reloc64(&sna->kgem, 102 sna->render.vertex_reloc[i], bo, 103 I915_GEM_DOMAIN_VERTEX << 16, 104 0); 105 } 106 107 assert(!sna->render.active); 108 sna->render.nvertex_reloc = 0; 109 sna->render.vertex_used = 0; 110 sna->render.vertex_index = 0; 111 sna->render.vbo = NULL; 112 sna->render.vb_id = 0; 113 114 kgem_bo_destroy(&sna->kgem, bo); 115 hint |= CREATE_CACHED | CREATE_NO_THROTTLE; 116 } else { 117 if (kgem_is_idle(&sna->kgem)) { 118 sna->render.vertices = sna->render.vertex_data; 119 sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); 120 return 0; 121 } 122 } 123 124 size = 256*1024; 125 assert(!sna->render.active); 126 sna->render.vertices = NULL; 127 sna->render.vbo = kgem_create_linear(&sna->kgem, size, hint); 128 while (sna->render.vbo == NULL && size > 16*1024) { 129 size /= 2; 130 sna->render.vbo = kgem_create_linear(&sna->kgem, size, hint); 131 } 132 if (sna->render.vbo == NULL) 133 sna->render.vbo = kgem_create_linear(&sna->kgem, 134 256*1024, CREATE_GTT_MAP); 135 if (sna->render.vbo) 136 sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo); 137 if (sna->render.vertices == NULL) { 138 if (sna->render.vbo) { 139 kgem_bo_destroy(&sna->kgem, sna->render.vbo); 140 sna->render.vbo = NULL; 141 } 142 sna->render.vertices = sna->render.vertex_data; 143 sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); 144 return 0; 145 } 146 147 if (sna->render.vertex_used) { 148 DBG(("%s: copying initial buffer x %d to handle=%d\n", 149 __FUNCTION__, 150 sna->render.vertex_used, 151 sna->render.vbo->handle)); 152 assert(sizeof(float)*sna->render.vertex_used <= 153 __kgem_bo_size(sna->render.vbo)); 154 memcpy(sna->render.vertices, 155 sna->render.vertex_data, 156 sizeof(float)*sna->render.vertex_used); 157 } 158 159 size = __kgem_bo_size(sna->render.vbo)/4; 160 if (size >= UINT16_MAX) 161 size = UINT16_MAX - 1; 162 163 DBG(("%s: create vbo handle=%d, size=%d\n", 164 __FUNCTION__, sna->render.vbo->handle, size)); 165 166 sna->render.vertex_size = size; 167 return sna->render.vertex_size - sna->render.vertex_used; 168} 169 170void gen8_vertex_close(struct sna *sna) 171{ 172 struct kgem_bo *bo, *free_bo = NULL; 173 unsigned int i, delta = 0; 174 175 assert(sna->render.vertex_offset == 0); 176 if (!sna->render.vb_id) 177 return; 178 179 DBG(("%s: used=%d, vbo active? %d, vb=%x, nreloc=%d\n", 180 __FUNCTION__, sna->render.vertex_used, sna->render.vbo ? sna->render.vbo->handle : 0, 181 sna->render.vb_id, sna->render.nvertex_reloc)); 182 183 assert(!sna->render.active); 184 185 bo = sna->render.vbo; 186 if (bo) { 187 if (sna->render.vertex_size - sna->render.vertex_used < 64) { 188 DBG(("%s: discarding vbo (full), handle=%d\n", __FUNCTION__, sna->render.vbo->handle)); 189 sna->render.vbo = NULL; 190 sna->render.vertices = sna->render.vertex_data; 191 sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); 192 free_bo = bo; 193 } else if (!sna->kgem.has_llc && sna->render.vertices == MAP(bo->map__cpu)) { 194 DBG(("%s: converting CPU map to GTT\n", __FUNCTION__)); 195 sna->render.vertices = 196 kgem_bo_map__gtt(&sna->kgem, sna->render.vbo); 197 if (sna->render.vertices == NULL) { 198 sna->render.vbo = NULL; 199 sna->render.vertices = sna->render.vertex_data; 200 sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); 201 free_bo = bo; 202 } 203 204 } 205 } else { 206 int size; 207 208 size = sna->kgem.nbatch; 209 size += sna->kgem.batch_size - sna->kgem.surface; 210 size += sna->render.vertex_used; 211 212 if (size <= 1024) { 213 DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__, 214 sna->render.vertex_used, sna->kgem.nbatch)); 215 assert(sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface); 216 memcpy(sna->kgem.batch + sna->kgem.nbatch, 217 sna->render.vertex_data, 218 sna->render.vertex_used * 4); 219 delta = sna->kgem.nbatch * 4; 220 bo = NULL; 221 sna->kgem.nbatch += sna->render.vertex_used; 222 } else { 223 size = 256 * 1024; 224 do { 225 bo = kgem_create_linear(&sna->kgem, size, 226 CREATE_GTT_MAP | CREATE_NO_RETIRE | CREATE_NO_THROTTLE | CREATE_CACHED); 227 } while (bo == NULL && (size>>=1) > sizeof(float)*sna->render.vertex_used); 228 229 sna->render.vertices = NULL; 230 if (bo) 231 sna->render.vertices = kgem_bo_map(&sna->kgem, bo); 232 if (sna->render.vertices != NULL) { 233 DBG(("%s: new vbo: %d / %d\n", __FUNCTION__, 234 sna->render.vertex_used, __kgem_bo_size(bo)/4)); 235 236 assert(sizeof(float)*sna->render.vertex_used <= __kgem_bo_size(bo)); 237 memcpy(sna->render.vertices, 238 sna->render.vertex_data, 239 sizeof(float)*sna->render.vertex_used); 240 241 size = __kgem_bo_size(bo)/4; 242 if (size >= UINT16_MAX) 243 size = UINT16_MAX - 1; 244 245 sna->render.vbo = bo; 246 sna->render.vertex_size = size; 247 } else { 248 DBG(("%s: tmp vbo: %d\n", __FUNCTION__, 249 sna->render.vertex_used)); 250 251 if (bo) 252 kgem_bo_destroy(&sna->kgem, bo); 253 254 bo = kgem_create_linear(&sna->kgem, 255 4*sna->render.vertex_used, 256 CREATE_NO_THROTTLE); 257 if (bo && !kgem_bo_write(&sna->kgem, bo, 258 sna->render.vertex_data, 259 4*sna->render.vertex_used)) { 260 kgem_bo_destroy(&sna->kgem, bo); 261 bo = NULL; 262 } 263 264 assert(sna->render.vbo == NULL); 265 sna->render.vertices = sna->render.vertex_data; 266 sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); 267 free_bo = bo; 268 } 269 } 270 } 271 272 assert(sna->render.nvertex_reloc); 273 for (i = 0; i < sna->render.nvertex_reloc; i++) { 274 DBG(("%s: reloc[%d] = %d\n", __FUNCTION__, 275 i, sna->render.vertex_reloc[i])); 276 277 *(uint64_t *)(sna->kgem.batch+sna->render.vertex_reloc[i]) = 278 kgem_add_reloc64(&sna->kgem, 279 sna->render.vertex_reloc[i], bo, 280 I915_GEM_DOMAIN_VERTEX << 16, 281 delta); 282 } 283 sna->render.nvertex_reloc = 0; 284 sna->render.vb_id = 0; 285 286 if (sna->render.vbo == NULL) { 287 assert(!sna->render.active); 288 sna->render.vertex_used = 0; 289 sna->render.vertex_index = 0; 290 assert(sna->render.vertices == sna->render.vertex_data); 291 assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data)); 292 } 293 294 if (free_bo) 295 kgem_bo_destroy(&sna->kgem, free_bo); 296} 297