si_buffer.c revision 7ec681f3
1/* 2 * Copyright 2013 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * on the rights to use, copy, modify, merge, publish, distribute, sub 9 * license, and/or sell copies of the Software, and to permit persons to whom 10 * the Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 22 * USE OR OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 25#include "radeonsi/si_pipe.h" 26#include "util/u_memory.h" 27#include "util/u_transfer.h" 28#include "util/u_upload_mgr.h" 29 30#include <inttypes.h> 31#include <stdio.h> 32 33bool si_cs_is_buffer_referenced(struct si_context *sctx, struct pb_buffer *buf, 34 enum radeon_bo_usage usage) 35{ 36 return sctx->ws->cs_is_buffer_referenced(&sctx->gfx_cs, buf, usage); 37} 38 39void *si_buffer_map(struct si_context *sctx, struct si_resource *resource, 40 unsigned usage) 41{ 42 return sctx->ws->buffer_map(sctx->ws, resource->buf, &sctx->gfx_cs, usage); 43} 44 45void si_init_resource_fields(struct si_screen *sscreen, struct si_resource *res, uint64_t size, 46 unsigned alignment) 47{ 48 struct si_texture *tex = (struct si_texture *)res; 49 50 res->bo_size = size; 51 res->bo_alignment_log2 = util_logbase2(alignment); 52 res->flags = 0; 53 res->texture_handle_allocated = false; 54 res->image_handle_allocated = false; 55 56 switch (res->b.b.usage) { 57 case PIPE_USAGE_STREAM: 58 res->flags |= RADEON_FLAG_GTT_WC; 59 if (sscreen->info.smart_access_memory) 60 res->domains = RADEON_DOMAIN_VRAM; 61 else 62 res->domains = RADEON_DOMAIN_GTT; 63 break; 64 case PIPE_USAGE_STAGING: 65 /* Transfers are likely to occur more often with these 66 * resources. */ 67 res->domains = RADEON_DOMAIN_GTT; 68 break; 69 case PIPE_USAGE_DYNAMIC: 70 /* Older kernels didn't always flush the HDP cache before 71 * CS execution 72 */ 73 if (!sscreen->info.kernel_flushes_hdp_before_ib) { 74 res->domains = RADEON_DOMAIN_GTT; 75 res->flags |= RADEON_FLAG_GTT_WC; 76 break; 77 } 78 FALLTHROUGH; 79 case PIPE_USAGE_DEFAULT: 80 case PIPE_USAGE_IMMUTABLE: 81 default: 82 /* Not listing GTT here improves performance in some 83 * apps. */ 84 res->domains = RADEON_DOMAIN_VRAM; 85 res->flags |= RADEON_FLAG_GTT_WC; 86 break; 87 } 88 89 if (res->b.b.target == PIPE_BUFFER && res->b.b.flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) { 90 /* Use GTT for all persistent mappings with older 91 * kernels, because they didn't always flush the HDP 92 * cache before CS execution. 93 * 94 * Write-combined CPU mappings are fine, the kernel 95 * ensures all CPU writes finish before the GPU 96 * executes a command stream. 97 * 98 * radeon doesn't have good BO move throttling, so put all 99 * persistent buffers into GTT to prevent VRAM CPU page faults. 100 */ 101 if (!sscreen->info.kernel_flushes_hdp_before_ib || !sscreen->info.is_amdgpu) 102 res->domains = RADEON_DOMAIN_GTT; 103 } 104 105 /* Tiled textures are unmappable. Always put them in VRAM. */ 106 if ((res->b.b.target != PIPE_BUFFER && !tex->surface.is_linear) || 107 res->b.b.flags & SI_RESOURCE_FLAG_UNMAPPABLE) { 108 res->domains = RADEON_DOMAIN_VRAM; 109 res->flags |= RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_GTT_WC; 110 } 111 112 /* Displayable and shareable surfaces are not suballocated. */ 113 if (res->b.b.bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT)) 114 res->flags |= RADEON_FLAG_NO_SUBALLOC; /* shareable */ 115 else 116 res->flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING; 117 118 if (res->b.b.bind & PIPE_BIND_PROTECTED || 119 /* Force scanout/depth/stencil buffer allocation to be encrypted */ 120 (sscreen->debug_flags & DBG(TMZ) && 121 res->b.b.bind & (PIPE_BIND_SCANOUT | PIPE_BIND_DEPTH_STENCIL))) 122 res->flags |= RADEON_FLAG_ENCRYPTED; 123 124 if (res->b.b.flags & PIPE_RESOURCE_FLAG_ENCRYPTED) 125 res->flags |= RADEON_FLAG_ENCRYPTED; 126 127 if (sscreen->debug_flags & DBG(NO_WC)) 128 res->flags &= ~RADEON_FLAG_GTT_WC; 129 130 if (res->b.b.flags & SI_RESOURCE_FLAG_READ_ONLY) 131 res->flags |= RADEON_FLAG_READ_ONLY; 132 133 if (res->b.b.flags & SI_RESOURCE_FLAG_32BIT) 134 res->flags |= RADEON_FLAG_32BIT; 135 136 if (res->b.b.flags & SI_RESOURCE_FLAG_DRIVER_INTERNAL) 137 res->flags |= RADEON_FLAG_DRIVER_INTERNAL; 138 139 /* For higher throughput and lower latency over PCIe assuming sequential access. 140 * Only CP DMA and optimized compute benefit from this. 141 * GFX8 and older don't support RADEON_FLAG_UNCACHED. 142 */ 143 if (sscreen->info.chip_class >= GFX9 && 144 res->b.b.flags & SI_RESOURCE_FLAG_UNCACHED) 145 res->flags |= RADEON_FLAG_UNCACHED; 146 147 /* Set expected VRAM and GART usage for the buffer. */ 148 res->memory_usage_kb = MAX2(1, size / 1024); 149 150 if (res->domains & RADEON_DOMAIN_VRAM) { 151 /* We don't want to evict buffers from VRAM by mapping them for CPU access, 152 * because they might never be moved back again. If a buffer is large enough, 153 * upload data by copying from a temporary GTT buffer. 8K might not seem much, 154 * but there can be 100000 buffers. 155 * 156 * This tweak improves performance for viewperf creo & snx. 157 */ 158 if (!sscreen->info.smart_access_memory && 159 sscreen->info.has_dedicated_vram && 160 size >= 8196) 161 res->b.b.flags |= PIPE_RESOURCE_FLAG_DONT_MAP_DIRECTLY; 162 } 163} 164 165bool si_alloc_resource(struct si_screen *sscreen, struct si_resource *res) 166{ 167 struct pb_buffer *old_buf, *new_buf; 168 169 /* Allocate a new resource. */ 170 new_buf = sscreen->ws->buffer_create(sscreen->ws, res->bo_size, 1 << res->bo_alignment_log2, 171 res->domains, res->flags); 172 if (!new_buf) { 173 return false; 174 } 175 176 /* Replace the pointer such that if res->buf wasn't NULL, it won't be 177 * NULL. This should prevent crashes with multiple contexts using 178 * the same buffer where one of the contexts invalidates it while 179 * the others are using it. */ 180 old_buf = res->buf; 181 res->buf = new_buf; /* should be atomic */ 182 res->gpu_address = sscreen->ws->buffer_get_virtual_address(res->buf); 183 184 if (res->flags & RADEON_FLAG_32BIT) { 185 uint64_t start = res->gpu_address; 186 uint64_t last = start + res->bo_size - 1; 187 (void)start; 188 (void)last; 189 190 assert((start >> 32) == sscreen->info.address32_hi); 191 assert((last >> 32) == sscreen->info.address32_hi); 192 } 193 194 radeon_bo_reference(sscreen->ws, &old_buf, NULL); 195 196 util_range_set_empty(&res->valid_buffer_range); 197 res->TC_L2_dirty = false; 198 199 /* Print debug information. */ 200 if (sscreen->debug_flags & DBG(VM) && res->b.b.target == PIPE_BUFFER) { 201 fprintf(stderr, "VM start=0x%" PRIX64 " end=0x%" PRIX64 " | Buffer %" PRIu64 " bytes\n", 202 res->gpu_address, res->gpu_address + res->buf->size, res->buf->size); 203 } 204 205 if (res->b.b.flags & SI_RESOURCE_FLAG_CLEAR) 206 si_screen_clear_buffer(sscreen, &res->b.b, 0, res->bo_size, 0, SI_OP_SYNC_AFTER); 207 208 return true; 209} 210 211static void si_resource_destroy(struct pipe_screen *screen, struct pipe_resource *buf) 212{ 213 if (buf->target == PIPE_BUFFER) { 214 struct si_screen *sscreen = (struct si_screen *)screen; 215 struct si_resource *buffer = si_resource(buf); 216 217 threaded_resource_deinit(buf); 218 util_range_destroy(&buffer->valid_buffer_range); 219 radeon_bo_reference(((struct si_screen*)screen)->ws, &buffer->buf, NULL); 220 util_idalloc_mt_free(&sscreen->buffer_ids, buffer->b.buffer_id_unique); 221 FREE_CL(buffer); 222 } else if (buf->flags & SI_RESOURCE_AUX_PLANE) { 223 struct si_auxiliary_texture *tex = (struct si_auxiliary_texture *)buf; 224 225 radeon_bo_reference(((struct si_screen*)screen)->ws, &tex->buffer, NULL); 226 FREE_CL(tex); 227 } else { 228 struct si_texture *tex = (struct si_texture *)buf; 229 struct si_resource *resource = &tex->buffer; 230 231 si_texture_reference(&tex->flushed_depth_texture, NULL); 232 233 if (tex->cmask_buffer != &tex->buffer) { 234 si_resource_reference(&tex->cmask_buffer, NULL); 235 } 236 radeon_bo_reference(((struct si_screen*)screen)->ws, &resource->buf, NULL); 237 FREE_CL(tex); 238 } 239} 240 241/* Reallocate the buffer a update all resource bindings where the buffer is 242 * bound. 243 * 244 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer 245 * idle by discarding its contents. 246 */ 247static bool si_invalidate_buffer(struct si_context *sctx, struct si_resource *buf) 248{ 249 /* Shared buffers can't be reallocated. */ 250 if (buf->b.is_shared) 251 return false; 252 253 /* Sparse buffers can't be reallocated. */ 254 if (buf->flags & RADEON_FLAG_SPARSE) 255 return false; 256 257 /* In AMD_pinned_memory, the user pointer association only gets 258 * broken when the buffer is explicitly re-allocated. 259 */ 260 if (buf->b.is_user_ptr) 261 return false; 262 263 /* Check if mapping this buffer would cause waiting for the GPU. */ 264 if (si_cs_is_buffer_referenced(sctx, buf->buf, RADEON_USAGE_READWRITE) || 265 !sctx->ws->buffer_wait(sctx->ws, buf->buf, 0, RADEON_USAGE_READWRITE)) { 266 /* Reallocate the buffer in the same pipe_resource. */ 267 si_alloc_resource(sctx->screen, buf); 268 si_rebind_buffer(sctx, &buf->b.b); 269 } else { 270 util_range_set_empty(&buf->valid_buffer_range); 271 } 272 273 return true; 274} 275 276/* Replace the storage of dst with src. */ 277void si_replace_buffer_storage(struct pipe_context *ctx, struct pipe_resource *dst, 278 struct pipe_resource *src, unsigned num_rebinds, uint32_t rebind_mask, 279 uint32_t delete_buffer_id) 280{ 281 struct si_context *sctx = (struct si_context *)ctx; 282 struct si_resource *sdst = si_resource(dst); 283 struct si_resource *ssrc = si_resource(src); 284 285 radeon_bo_reference(sctx->screen->ws, &sdst->buf, ssrc->buf); 286 sdst->gpu_address = ssrc->gpu_address; 287 sdst->b.b.bind = ssrc->b.b.bind; 288 sdst->flags = ssrc->flags; 289 290 assert(sdst->memory_usage_kb == ssrc->memory_usage_kb); 291 assert(sdst->bo_size == ssrc->bo_size); 292 assert(sdst->bo_alignment_log2 == ssrc->bo_alignment_log2); 293 assert(sdst->domains == ssrc->domains); 294 295 si_rebind_buffer(sctx, dst); 296 297 util_idalloc_mt_free(&sctx->screen->buffer_ids, delete_buffer_id); 298} 299 300static void si_invalidate_resource(struct pipe_context *ctx, struct pipe_resource *resource) 301{ 302 struct si_context *sctx = (struct si_context *)ctx; 303 struct si_resource *buf = si_resource(resource); 304 305 /* We currently only do anyting here for buffers */ 306 if (resource->target == PIPE_BUFFER) 307 (void)si_invalidate_buffer(sctx, buf); 308} 309 310static void *si_buffer_get_transfer(struct pipe_context *ctx, struct pipe_resource *resource, 311 unsigned usage, const struct pipe_box *box, 312 struct pipe_transfer **ptransfer, void *data, 313 struct si_resource *staging, unsigned offset) 314{ 315 struct si_context *sctx = (struct si_context *)ctx; 316 struct si_transfer *transfer; 317 318 if (usage & PIPE_MAP_THREAD_SAFE) 319 transfer = malloc(sizeof(*transfer)); 320 else if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC) 321 transfer = slab_alloc(&sctx->pool_transfers_unsync); 322 else 323 transfer = slab_alloc(&sctx->pool_transfers); 324 325 transfer->b.b.resource = NULL; 326 pipe_resource_reference(&transfer->b.b.resource, resource); 327 transfer->b.b.level = 0; 328 transfer->b.b.usage = usage; 329 transfer->b.b.box = *box; 330 transfer->b.b.stride = 0; 331 transfer->b.b.layer_stride = 0; 332 transfer->b.b.offset = offset; 333 transfer->b.staging = NULL; 334 transfer->staging = staging; 335 *ptransfer = &transfer->b.b; 336 return data; 337} 338 339static void *si_buffer_transfer_map(struct pipe_context *ctx, struct pipe_resource *resource, 340 unsigned level, unsigned usage, const struct pipe_box *box, 341 struct pipe_transfer **ptransfer) 342{ 343 struct si_context *sctx = (struct si_context *)ctx; 344 struct si_resource *buf = si_resource(resource); 345 uint8_t *data; 346 347 assert(box->x + box->width <= resource->width0); 348 349 /* From GL_AMD_pinned_memory issues: 350 * 351 * 4) Is glMapBuffer on a shared buffer guaranteed to return the 352 * same system address which was specified at creation time? 353 * 354 * RESOLVED: NO. The GL implementation might return a different 355 * virtual mapping of that memory, although the same physical 356 * page will be used. 357 * 358 * So don't ever use staging buffers. 359 */ 360 if (buf->b.is_user_ptr) 361 usage |= PIPE_MAP_PERSISTENT; 362 if (usage & PIPE_MAP_ONCE) 363 usage |= RADEON_MAP_TEMPORARY; 364 365 /* See if the buffer range being mapped has never been initialized, 366 * in which case it can be mapped unsynchronized. */ 367 if (!(usage & (PIPE_MAP_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) && 368 usage & PIPE_MAP_WRITE && !buf->b.is_shared && 369 !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width)) { 370 usage |= PIPE_MAP_UNSYNCHRONIZED; 371 } 372 373 /* If discarding the entire range, discard the whole resource instead. */ 374 if (usage & PIPE_MAP_DISCARD_RANGE && box->x == 0 && box->width == resource->width0) { 375 usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE; 376 } 377 378 /* If a buffer in VRAM is too large and the range is discarded, don't 379 * map it directly. This makes sure that the buffer stays in VRAM. 380 */ 381 bool force_discard_range = false; 382 if (usage & (PIPE_MAP_DISCARD_WHOLE_RESOURCE | PIPE_MAP_DISCARD_RANGE) && 383 !(usage & PIPE_MAP_PERSISTENT) && 384 buf->b.b.flags & PIPE_RESOURCE_FLAG_DONT_MAP_DIRECTLY) { 385 usage &= ~(PIPE_MAP_DISCARD_WHOLE_RESOURCE | PIPE_MAP_UNSYNCHRONIZED); 386 usage |= PIPE_MAP_DISCARD_RANGE; 387 force_discard_range = true; 388 } 389 390 if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE && 391 !(usage & (PIPE_MAP_UNSYNCHRONIZED | TC_TRANSFER_MAP_NO_INVALIDATE))) { 392 assert(usage & PIPE_MAP_WRITE); 393 394 if (si_invalidate_buffer(sctx, buf)) { 395 /* At this point, the buffer is always idle. */ 396 usage |= PIPE_MAP_UNSYNCHRONIZED; 397 } else { 398 /* Fall back to a temporary buffer. */ 399 usage |= PIPE_MAP_DISCARD_RANGE; 400 } 401 } 402 403 if (usage & PIPE_MAP_DISCARD_RANGE && 404 ((!(usage & (PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_PERSISTENT))) || 405 (buf->flags & RADEON_FLAG_SPARSE))) { 406 assert(usage & PIPE_MAP_WRITE); 407 408 /* Check if mapping this buffer would cause waiting for the GPU. 409 */ 410 if (buf->flags & RADEON_FLAG_SPARSE || force_discard_range || 411 si_cs_is_buffer_referenced(sctx, buf->buf, RADEON_USAGE_READWRITE) || 412 !sctx->ws->buffer_wait(sctx->ws, buf->buf, 0, RADEON_USAGE_READWRITE)) { 413 /* Do a wait-free write-only transfer using a temporary buffer. */ 414 struct u_upload_mgr *uploader; 415 struct si_resource *staging = NULL; 416 unsigned offset; 417 418 /* If we are not called from the driver thread, we have 419 * to use the uploader from u_threaded_context, which is 420 * local to the calling thread. 421 */ 422 if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC) 423 uploader = sctx->tc->base.stream_uploader; 424 else 425 uploader = sctx->b.stream_uploader; 426 427 u_upload_alloc(uploader, 0, box->width + (box->x % SI_MAP_BUFFER_ALIGNMENT), 428 sctx->screen->info.tcc_cache_line_size, &offset, 429 (struct pipe_resource **)&staging, (void **)&data); 430 431 if (staging) { 432 data += box->x % SI_MAP_BUFFER_ALIGNMENT; 433 return si_buffer_get_transfer(ctx, resource, usage, box, ptransfer, data, staging, 434 offset); 435 } else if (buf->flags & RADEON_FLAG_SPARSE) { 436 return NULL; 437 } 438 } else { 439 /* At this point, the buffer is always idle (we checked it above). */ 440 usage |= PIPE_MAP_UNSYNCHRONIZED; 441 } 442 } 443 /* Use a staging buffer in cached GTT for reads. */ 444 else if (((usage & PIPE_MAP_READ) && !(usage & PIPE_MAP_PERSISTENT) && 445 (buf->domains & RADEON_DOMAIN_VRAM || buf->flags & RADEON_FLAG_GTT_WC)) || 446 (buf->flags & RADEON_FLAG_SPARSE)) { 447 struct si_resource *staging; 448 449 assert(!(usage & (TC_TRANSFER_MAP_THREADED_UNSYNC | PIPE_MAP_THREAD_SAFE))); 450 staging = si_aligned_buffer_create(ctx->screen, 451 SI_RESOURCE_FLAG_UNCACHED | SI_RESOURCE_FLAG_DRIVER_INTERNAL, 452 PIPE_USAGE_STAGING, 453 box->width + (box->x % SI_MAP_BUFFER_ALIGNMENT), 256); 454 if (staging) { 455 /* Copy the VRAM buffer to the staging buffer. */ 456 si_copy_buffer(sctx, &staging->b.b, resource, box->x % SI_MAP_BUFFER_ALIGNMENT, 457 box->x, box->width, SI_OP_SYNC_BEFORE_AFTER); 458 459 data = si_buffer_map(sctx, staging, usage & ~PIPE_MAP_UNSYNCHRONIZED); 460 if (!data) { 461 si_resource_reference(&staging, NULL); 462 return NULL; 463 } 464 data += box->x % SI_MAP_BUFFER_ALIGNMENT; 465 466 return si_buffer_get_transfer(ctx, resource, usage, box, ptransfer, data, staging, 0); 467 } else if (buf->flags & RADEON_FLAG_SPARSE) { 468 return NULL; 469 } 470 } 471 472 data = si_buffer_map(sctx, buf, usage); 473 if (!data) { 474 return NULL; 475 } 476 data += box->x; 477 478 return si_buffer_get_transfer(ctx, resource, usage, box, ptransfer, data, NULL, 0); 479} 480 481static void si_buffer_do_flush_region(struct pipe_context *ctx, struct pipe_transfer *transfer, 482 const struct pipe_box *box) 483{ 484 struct si_context *sctx = (struct si_context *)ctx; 485 struct si_transfer *stransfer = (struct si_transfer *)transfer; 486 struct si_resource *buf = si_resource(transfer->resource); 487 488 if (stransfer->staging) { 489 unsigned src_offset = 490 stransfer->b.b.offset + transfer->box.x % SI_MAP_BUFFER_ALIGNMENT + (box->x - transfer->box.x); 491 492 /* Copy the staging buffer into the original one. */ 493 si_copy_buffer(sctx, transfer->resource, &stransfer->staging->b.b, box->x, src_offset, 494 box->width, SI_OP_SYNC_BEFORE_AFTER); 495 } 496 497 util_range_add(&buf->b.b, &buf->valid_buffer_range, box->x, box->x + box->width); 498} 499 500static void si_buffer_flush_region(struct pipe_context *ctx, struct pipe_transfer *transfer, 501 const struct pipe_box *rel_box) 502{ 503 unsigned required_usage = PIPE_MAP_WRITE | PIPE_MAP_FLUSH_EXPLICIT; 504 505 if ((transfer->usage & required_usage) == required_usage) { 506 struct pipe_box box; 507 508 u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box); 509 si_buffer_do_flush_region(ctx, transfer, &box); 510 } 511} 512 513static void si_buffer_transfer_unmap(struct pipe_context *ctx, struct pipe_transfer *transfer) 514{ 515 struct si_context *sctx = (struct si_context *)ctx; 516 struct si_transfer *stransfer = (struct si_transfer *)transfer; 517 518 if (transfer->usage & PIPE_MAP_WRITE && !(transfer->usage & PIPE_MAP_FLUSH_EXPLICIT)) 519 si_buffer_do_flush_region(ctx, transfer, &transfer->box); 520 521 if (transfer->usage & (PIPE_MAP_ONCE | RADEON_MAP_TEMPORARY) && 522 !stransfer->staging) 523 sctx->ws->buffer_unmap(sctx->ws, si_resource(stransfer->b.b.resource)->buf); 524 525 si_resource_reference(&stransfer->staging, NULL); 526 assert(stransfer->b.staging == NULL); /* for threaded context only */ 527 pipe_resource_reference(&transfer->resource, NULL); 528 529 if (transfer->usage & PIPE_MAP_THREAD_SAFE) { 530 free(transfer); 531 } else { 532 /* Don't use pool_transfers_unsync. We are always in the driver 533 * thread. Freeing an object into a different pool is allowed. 534 */ 535 slab_free(&sctx->pool_transfers, transfer); 536 } 537} 538 539static void si_buffer_subdata(struct pipe_context *ctx, struct pipe_resource *buffer, 540 unsigned usage, unsigned offset, unsigned size, const void *data) 541{ 542 struct pipe_transfer *transfer = NULL; 543 struct pipe_box box; 544 uint8_t *map = NULL; 545 546 usage |= PIPE_MAP_WRITE; 547 548 if (!(usage & PIPE_MAP_DIRECTLY)) 549 usage |= PIPE_MAP_DISCARD_RANGE; 550 551 u_box_1d(offset, size, &box); 552 map = si_buffer_transfer_map(ctx, buffer, 0, usage, &box, &transfer); 553 if (!map) 554 return; 555 556 memcpy(map, data, size); 557 si_buffer_transfer_unmap(ctx, transfer); 558} 559 560static struct si_resource *si_alloc_buffer_struct(struct pipe_screen *screen, 561 const struct pipe_resource *templ) 562{ 563 struct si_resource *buf = MALLOC_STRUCT_CL(si_resource); 564 565 buf->b.b = *templ; 566 buf->b.b.next = NULL; 567 pipe_reference_init(&buf->b.b.reference, 1); 568 buf->b.b.screen = screen; 569 570 threaded_resource_init(&buf->b.b); 571 572 buf->buf = NULL; 573 buf->bind_history = 0; 574 buf->TC_L2_dirty = false; 575 util_range_init(&buf->valid_buffer_range); 576 return buf; 577} 578 579static struct pipe_resource *si_buffer_create(struct pipe_screen *screen, 580 const struct pipe_resource *templ, unsigned alignment) 581{ 582 struct si_screen *sscreen = (struct si_screen *)screen; 583 struct si_resource *buf = si_alloc_buffer_struct(screen, templ); 584 585 if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE) 586 buf->b.b.flags |= SI_RESOURCE_FLAG_UNMAPPABLE; 587 588 si_init_resource_fields(sscreen, buf, templ->width0, alignment); 589 590 if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE) 591 buf->flags |= RADEON_FLAG_SPARSE; 592 593 if (!si_alloc_resource(sscreen, buf)) { 594 threaded_resource_deinit(&buf->b.b); 595 FREE_CL(buf); 596 return NULL; 597 } 598 599 buf->b.buffer_id_unique = util_idalloc_mt_alloc(&sscreen->buffer_ids); 600 return &buf->b.b; 601} 602 603struct pipe_resource *pipe_aligned_buffer_create(struct pipe_screen *screen, unsigned flags, 604 unsigned usage, unsigned size, unsigned alignment) 605{ 606 struct pipe_resource buffer; 607 608 memset(&buffer, 0, sizeof buffer); 609 buffer.target = PIPE_BUFFER; 610 buffer.format = PIPE_FORMAT_R8_UNORM; 611 buffer.bind = 0; 612 buffer.usage = usage; 613 buffer.flags = flags; 614 buffer.width0 = size; 615 buffer.height0 = 1; 616 buffer.depth0 = 1; 617 buffer.array_size = 1; 618 return si_buffer_create(screen, &buffer, alignment); 619} 620 621struct si_resource *si_aligned_buffer_create(struct pipe_screen *screen, unsigned flags, 622 unsigned usage, unsigned size, unsigned alignment) 623{ 624 return si_resource(pipe_aligned_buffer_create(screen, flags, usage, size, alignment)); 625} 626 627static struct pipe_resource *si_buffer_from_user_memory(struct pipe_screen *screen, 628 const struct pipe_resource *templ, 629 void *user_memory) 630{ 631 struct si_screen *sscreen = (struct si_screen *)screen; 632 struct radeon_winsys *ws = sscreen->ws; 633 struct si_resource *buf = si_alloc_buffer_struct(screen, templ); 634 635 buf->domains = RADEON_DOMAIN_GTT; 636 buf->flags = 0; 637 buf->b.is_user_ptr = true; 638 util_range_add(&buf->b.b, &buf->valid_buffer_range, 0, templ->width0); 639 util_range_add(&buf->b.b, &buf->b.valid_buffer_range, 0, templ->width0); 640 641 /* Convert a user pointer to a buffer. */ 642 buf->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0); 643 if (!buf->buf) { 644 threaded_resource_deinit(&buf->b.b); 645 FREE_CL(buf); 646 return NULL; 647 } 648 649 buf->gpu_address = ws->buffer_get_virtual_address(buf->buf); 650 buf->memory_usage_kb = templ->width0 / 1024; 651 buf->b.buffer_id_unique = util_idalloc_mt_alloc(&sscreen->buffer_ids); 652 return &buf->b.b; 653} 654 655struct pipe_resource *si_buffer_from_winsys_buffer(struct pipe_screen *screen, 656 const struct pipe_resource *templ, 657 struct pb_buffer *imported_buf, 658 bool dedicated) 659{ 660 struct si_screen *sscreen = (struct si_screen *)screen; 661 struct si_resource *res = si_alloc_buffer_struct(screen, templ); 662 663 if (!res) 664 return 0; 665 666 res->buf = imported_buf; 667 res->gpu_address = sscreen->ws->buffer_get_virtual_address(res->buf); 668 res->bo_size = imported_buf->size; 669 res->bo_alignment_log2 = imported_buf->alignment_log2; 670 res->domains = sscreen->ws->buffer_get_initial_domain(res->buf); 671 672 res->memory_usage_kb = MAX2(1, res->bo_size / 1024); 673 674 if (sscreen->ws->buffer_get_flags) 675 res->flags = sscreen->ws->buffer_get_flags(res->buf); 676 677 if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE) { 678 res->b.b.flags |= SI_RESOURCE_FLAG_UNMAPPABLE; 679 res->flags |= RADEON_FLAG_SPARSE; 680 } 681 682 res->b.buffer_id_unique = util_idalloc_mt_alloc(&sscreen->buffer_ids); 683 return &res->b.b; 684} 685 686static struct pipe_resource *si_resource_create(struct pipe_screen *screen, 687 const struct pipe_resource *templ) 688{ 689 if (templ->target == PIPE_BUFFER) { 690 return si_buffer_create(screen, templ, 256); 691 } else { 692 return si_texture_create(screen, templ); 693 } 694} 695 696static bool si_resource_commit(struct pipe_context *pctx, struct pipe_resource *resource, 697 unsigned level, struct pipe_box *box, bool commit) 698{ 699 struct si_context *ctx = (struct si_context *)pctx; 700 struct si_resource *res = si_resource(resource); 701 702 /* 703 * Since buffer commitment changes cannot be pipelined, we need to 704 * (a) flush any pending commands that refer to the buffer we're about 705 * to change, and 706 * (b) wait for threaded submit to finish, including those that were 707 * triggered by some other, earlier operation. 708 */ 709 if (radeon_emitted(&ctx->gfx_cs, ctx->initial_gfx_cs_size) && 710 ctx->ws->cs_is_buffer_referenced(&ctx->gfx_cs, res->buf, RADEON_USAGE_READWRITE)) { 711 si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL); 712 } 713 ctx->ws->cs_sync_flush(&ctx->gfx_cs); 714 715 assert(resource->target == PIPE_BUFFER); 716 717 return ctx->ws->buffer_commit(ctx->ws, res->buf, box->x, box->width, commit); 718} 719 720void si_init_screen_buffer_functions(struct si_screen *sscreen) 721{ 722 sscreen->b.resource_create = si_resource_create; 723 sscreen->b.resource_destroy = si_resource_destroy; 724 sscreen->b.resource_from_user_memory = si_buffer_from_user_memory; 725} 726 727void si_init_buffer_functions(struct si_context *sctx) 728{ 729 sctx->b.invalidate_resource = si_invalidate_resource; 730 sctx->b.buffer_map = si_buffer_transfer_map; 731 sctx->b.transfer_flush_region = si_buffer_flush_region; 732 sctx->b.buffer_unmap = si_buffer_transfer_unmap; 733 sctx->b.texture_subdata = u_default_texture_subdata; 734 sctx->b.buffer_subdata = si_buffer_subdata; 735 sctx->b.resource_commit = si_resource_commit; 736} 737