1/************************************************************************** 2 * 3 * Copyright 2007 VMware, Inc. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR 22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 29/** 30 * Functions for pixel buffer objects and vertex/element buffer objects. 31 */ 32 33 34#include <inttypes.h> /* for PRId64 macro */ 35 36#include "main/errors.h" 37#include "main/imports.h" 38#include "main/mtypes.h" 39#include "main/arrayobj.h" 40#include "main/bufferobj.h" 41 42#include "st_context.h" 43#include "st_cb_bufferobjects.h" 44#include "st_cb_memoryobjects.h" 45#include "st_debug.h" 46#include "st_util.h" 47 48#include "pipe/p_context.h" 49#include "pipe/p_defines.h" 50#include "util/u_inlines.h" 51 52 53/** 54 * There is some duplication between mesa's bufferobjects and our 55 * bufmgr buffers. Both have an integer handle and a hashtable to 56 * lookup an opaque structure. It would be nice if the handles and 57 * internal structure where somehow shared. 58 */ 59static struct gl_buffer_object * 60st_bufferobj_alloc(struct gl_context *ctx, GLuint name) 61{ 62 struct st_buffer_object *st_obj = ST_CALLOC_STRUCT(st_buffer_object); 63 64 if (!st_obj) 65 return NULL; 66 67 _mesa_initialize_buffer_object(ctx, &st_obj->Base, name); 68 69 return &st_obj->Base; 70} 71 72 73 74/** 75 * Deallocate/free a vertex/pixel buffer object. 76 * Called via glDeleteBuffersARB(). 77 */ 78static void 79st_bufferobj_free(struct gl_context *ctx, struct gl_buffer_object *obj) 80{ 81 struct st_buffer_object *st_obj = st_buffer_object(obj); 82 83 assert(obj->RefCount == 0); 84 _mesa_buffer_unmap_all_mappings(ctx, obj); 85 86 if (st_obj->buffer) 87 pipe_resource_reference(&st_obj->buffer, NULL); 88 89 _mesa_delete_buffer_object(ctx, obj); 90} 91 92 93 94/** 95 * Replace data in a subrange of buffer object. If the data range 96 * specified by size + offset extends beyond the end of the buffer or 97 * if data is NULL, no copy is performed. 98 * Called via glBufferSubDataARB(). 99 */ 100static void 101st_bufferobj_subdata(struct gl_context *ctx, 102 GLintptrARB offset, 103 GLsizeiptrARB size, 104 const void * data, struct gl_buffer_object *obj) 105{ 106 struct st_buffer_object *st_obj = st_buffer_object(obj); 107 108 /* we may be called from VBO code, so double-check params here */ 109 assert(offset >= 0); 110 assert(size >= 0); 111 assert(offset + size <= obj->Size); 112 113 if (!size) 114 return; 115 116 /* 117 * According to ARB_vertex_buffer_object specification, if data is null, 118 * then the contents of the buffer object's data store is undefined. We just 119 * ignore, and leave it unchanged. 120 */ 121 if (!data) 122 return; 123 124 if (!st_obj->buffer) { 125 /* we probably ran out of memory during buffer allocation */ 126 return; 127 } 128 129 /* Now that transfers are per-context, we don't have to figure out 130 * flushing here. Usually drivers won't need to flush in this case 131 * even if the buffer is currently referenced by hardware - they 132 * just queue the upload as dma rather than mapping the underlying 133 * buffer directly. 134 */ 135 pipe_buffer_write(st_context(ctx)->pipe, 136 st_obj->buffer, 137 offset, size, data); 138} 139 140 141/** 142 * Called via glGetBufferSubDataARB(). 143 */ 144static void 145st_bufferobj_get_subdata(struct gl_context *ctx, 146 GLintptrARB offset, 147 GLsizeiptrARB size, 148 void * data, struct gl_buffer_object *obj) 149{ 150 struct st_buffer_object *st_obj = st_buffer_object(obj); 151 152 /* we may be called from VBO code, so double-check params here */ 153 assert(offset >= 0); 154 assert(size >= 0); 155 assert(offset + size <= obj->Size); 156 157 if (!size) 158 return; 159 160 if (!st_obj->buffer) { 161 /* we probably ran out of memory during buffer allocation */ 162 return; 163 } 164 165 pipe_buffer_read(st_context(ctx)->pipe, st_obj->buffer, 166 offset, size, data); 167} 168 169 170/** 171 * Return bitmask of PIPE_BIND_x flags corresponding a GL buffer target. 172 */ 173static unsigned 174buffer_target_to_bind_flags(GLenum target) 175{ 176 switch (target) { 177 case GL_PIXEL_PACK_BUFFER_ARB: 178 case GL_PIXEL_UNPACK_BUFFER_ARB: 179 return PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW; 180 case GL_ARRAY_BUFFER_ARB: 181 return PIPE_BIND_VERTEX_BUFFER; 182 case GL_ELEMENT_ARRAY_BUFFER_ARB: 183 return PIPE_BIND_INDEX_BUFFER; 184 case GL_TEXTURE_BUFFER: 185 return PIPE_BIND_SAMPLER_VIEW; 186 case GL_TRANSFORM_FEEDBACK_BUFFER: 187 return PIPE_BIND_STREAM_OUTPUT; 188 case GL_UNIFORM_BUFFER: 189 return PIPE_BIND_CONSTANT_BUFFER; 190 case GL_DRAW_INDIRECT_BUFFER: 191 case GL_PARAMETER_BUFFER_ARB: 192 return PIPE_BIND_COMMAND_ARGS_BUFFER; 193 case GL_ATOMIC_COUNTER_BUFFER: 194 case GL_SHADER_STORAGE_BUFFER: 195 return PIPE_BIND_SHADER_BUFFER; 196 case GL_QUERY_BUFFER: 197 return PIPE_BIND_QUERY_BUFFER; 198 default: 199 return 0; 200 } 201} 202 203 204/** 205 * Return bitmask of PIPE_RESOURCE_x flags corresponding to GL_MAP_x flags. 206 */ 207static unsigned 208storage_flags_to_buffer_flags(GLbitfield storageFlags) 209{ 210 unsigned flags = 0; 211 if (storageFlags & GL_MAP_PERSISTENT_BIT) 212 flags |= PIPE_RESOURCE_FLAG_MAP_PERSISTENT; 213 if (storageFlags & GL_MAP_COHERENT_BIT) 214 flags |= PIPE_RESOURCE_FLAG_MAP_COHERENT; 215 if (storageFlags & GL_SPARSE_STORAGE_BIT_ARB) 216 flags |= PIPE_RESOURCE_FLAG_SPARSE; 217 return flags; 218} 219 220 221/** 222 * From a buffer object's target, immutability flag, storage flags and 223 * usage hint, return a pipe_resource_usage value (PIPE_USAGE_DYNAMIC, 224 * STREAM, etc). 225 */ 226static enum pipe_resource_usage 227buffer_usage(GLenum target, GLboolean immutable, 228 GLbitfield storageFlags, GLenum usage) 229{ 230 if (immutable) { 231 /* BufferStorage */ 232 if (storageFlags & GL_CLIENT_STORAGE_BIT) { 233 if (storageFlags & GL_MAP_READ_BIT) 234 return PIPE_USAGE_STAGING; 235 else 236 return PIPE_USAGE_STREAM; 237 } else { 238 return PIPE_USAGE_DEFAULT; 239 } 240 } 241 else { 242 /* BufferData */ 243 switch (usage) { 244 case GL_DYNAMIC_DRAW: 245 case GL_DYNAMIC_COPY: 246 return PIPE_USAGE_DYNAMIC; 247 case GL_STREAM_DRAW: 248 case GL_STREAM_COPY: 249 /* XXX: Remove this test and fall-through when we have PBO unpacking 250 * acceleration. Right now, PBO unpacking is done by the CPU, so we 251 * have to make sure CPU reads are fast. 252 */ 253 if (target != GL_PIXEL_UNPACK_BUFFER_ARB) { 254 return PIPE_USAGE_STREAM; 255 } 256 /* fall through */ 257 case GL_STATIC_READ: 258 case GL_DYNAMIC_READ: 259 case GL_STREAM_READ: 260 return PIPE_USAGE_STAGING; 261 case GL_STATIC_DRAW: 262 case GL_STATIC_COPY: 263 default: 264 return PIPE_USAGE_DEFAULT; 265 } 266 } 267} 268 269 270static ALWAYS_INLINE GLboolean 271bufferobj_data(struct gl_context *ctx, 272 GLenum target, 273 GLsizeiptrARB size, 274 const void *data, 275 struct gl_memory_object *memObj, 276 GLuint64 offset, 277 GLenum usage, 278 GLbitfield storageFlags, 279 struct gl_buffer_object *obj) 280{ 281 struct st_context *st = st_context(ctx); 282 struct pipe_context *pipe = st->pipe; 283 struct pipe_screen *screen = pipe->screen; 284 struct st_buffer_object *st_obj = st_buffer_object(obj); 285 struct st_memory_object *st_mem_obj = st_memory_object(memObj); 286 287 if (target != GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD && 288 size && st_obj->buffer && 289 st_obj->Base.Size == size && 290 st_obj->Base.Usage == usage && 291 st_obj->Base.StorageFlags == storageFlags) { 292 if (data) { 293 /* Just discard the old contents and write new data. 294 * This should be the same as creating a new buffer, but we avoid 295 * a lot of validation in Mesa. 296 */ 297 pipe->buffer_subdata(pipe, st_obj->buffer, 298 PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE, 299 0, size, data); 300 return GL_TRUE; 301 } else if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) { 302 pipe->invalidate_resource(pipe, st_obj->buffer); 303 return GL_TRUE; 304 } 305 } 306 307 st_obj->Base.Size = size; 308 st_obj->Base.Usage = usage; 309 st_obj->Base.StorageFlags = storageFlags; 310 311 pipe_resource_reference( &st_obj->buffer, NULL ); 312 313 const unsigned bindings = buffer_target_to_bind_flags(target); 314 315 if (ST_DEBUG & DEBUG_BUFFER) { 316 debug_printf("Create buffer size %" PRId64 " bind 0x%x\n", 317 (int64_t) size, bindings); 318 } 319 320 if (size != 0) { 321 struct pipe_resource buffer; 322 323 memset(&buffer, 0, sizeof buffer); 324 buffer.target = PIPE_BUFFER; 325 buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */ 326 buffer.bind = bindings; 327 buffer.usage = 328 buffer_usage(target, st_obj->Base.Immutable, storageFlags, usage); 329 buffer.flags = storage_flags_to_buffer_flags(storageFlags); 330 buffer.width0 = size; 331 buffer.height0 = 1; 332 buffer.depth0 = 1; 333 buffer.array_size = 1; 334 335 if (st_mem_obj) { 336 st_obj->buffer = screen->resource_from_memobj(screen, &buffer, 337 st_mem_obj->memory, 338 offset); 339 } 340 else if (target == GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD) { 341 st_obj->buffer = 342 screen->resource_from_user_memory(screen, &buffer, (void*)data); 343 } 344 else { 345 st_obj->buffer = screen->resource_create(screen, &buffer); 346 347 if (st_obj->buffer && data) 348 pipe_buffer_write(pipe, st_obj->buffer, 0, size, data); 349 } 350 351 if (!st_obj->buffer) { 352 /* out of memory */ 353 st_obj->Base.Size = 0; 354 return GL_FALSE; 355 } 356 } 357 358 /* The current buffer may be bound, so we have to revalidate all atoms that 359 * might be using it. 360 */ 361 if (st_obj->Base.UsageHistory & USAGE_ARRAY_BUFFER) 362 ctx->NewDriverState |= ST_NEW_VERTEX_ARRAYS; 363 /* if (st_obj->Base.UsageHistory & USAGE_ELEMENT_ARRAY_BUFFER) */ 364 /* ctx->NewDriverState |= TODO: Handle indices as gallium state; */ 365 if (st_obj->Base.UsageHistory & USAGE_UNIFORM_BUFFER) 366 ctx->NewDriverState |= ST_NEW_UNIFORM_BUFFER; 367 if (st_obj->Base.UsageHistory & USAGE_SHADER_STORAGE_BUFFER) 368 ctx->NewDriverState |= ST_NEW_STORAGE_BUFFER; 369 if (st_obj->Base.UsageHistory & USAGE_TEXTURE_BUFFER) 370 ctx->NewDriverState |= ST_NEW_SAMPLER_VIEWS | ST_NEW_IMAGE_UNITS; 371 if (st_obj->Base.UsageHistory & USAGE_ATOMIC_COUNTER_BUFFER) 372 ctx->NewDriverState |= ctx->DriverFlags.NewAtomicBuffer; 373 374 return GL_TRUE; 375} 376 377/** 378 * Allocate space for and store data in a buffer object. Any data that was 379 * previously stored in the buffer object is lost. If data is NULL, 380 * memory will be allocated, but no copy will occur. 381 * Called via ctx->Driver.BufferData(). 382 * \return GL_TRUE for success, GL_FALSE if out of memory 383 */ 384static GLboolean 385st_bufferobj_data(struct gl_context *ctx, 386 GLenum target, 387 GLsizeiptrARB size, 388 const void *data, 389 GLenum usage, 390 GLbitfield storageFlags, 391 struct gl_buffer_object *obj) 392{ 393 return bufferobj_data(ctx, target, size, data, NULL, 0, usage, storageFlags, obj); 394} 395 396static GLboolean 397st_bufferobj_data_mem(struct gl_context *ctx, 398 GLenum target, 399 GLsizeiptrARB size, 400 struct gl_memory_object *memObj, 401 GLuint64 offset, 402 GLenum usage, 403 struct gl_buffer_object *bufObj) 404{ 405 return bufferobj_data(ctx, target, size, NULL, memObj, offset, usage, 0, bufObj); 406} 407 408/** 409 * Called via glInvalidateBuffer(Sub)Data. 410 */ 411static void 412st_bufferobj_invalidate(struct gl_context *ctx, 413 struct gl_buffer_object *obj, 414 GLintptr offset, 415 GLsizeiptr size) 416{ 417 struct st_context *st = st_context(ctx); 418 struct pipe_context *pipe = st->pipe; 419 struct st_buffer_object *st_obj = st_buffer_object(obj); 420 421 /* We ignore partial invalidates. */ 422 if (offset != 0 || size != obj->Size) 423 return; 424 425 /* Nothing to invalidate. */ 426 if (!st_obj->buffer) 427 return; 428 429 pipe->invalidate_resource(pipe, st_obj->buffer); 430} 431 432 433/** 434 * Convert GLbitfield of GL_MAP_x flags to gallium pipe_transfer_usage flags. 435 * \param wholeBuffer is the whole buffer being mapped? 436 */ 437enum pipe_transfer_usage 438st_access_flags_to_transfer_flags(GLbitfield access, bool wholeBuffer) 439{ 440 enum pipe_transfer_usage flags = 0; 441 442 if (access & GL_MAP_WRITE_BIT) 443 flags |= PIPE_TRANSFER_WRITE; 444 445 if (access & GL_MAP_READ_BIT) 446 flags |= PIPE_TRANSFER_READ; 447 448 if (access & GL_MAP_FLUSH_EXPLICIT_BIT) 449 flags |= PIPE_TRANSFER_FLUSH_EXPLICIT; 450 451 if (access & GL_MAP_INVALIDATE_BUFFER_BIT) { 452 flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE; 453 } 454 else if (access & GL_MAP_INVALIDATE_RANGE_BIT) { 455 if (wholeBuffer) 456 flags |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE; 457 else 458 flags |= PIPE_TRANSFER_DISCARD_RANGE; 459 } 460 461 if (access & GL_MAP_UNSYNCHRONIZED_BIT) 462 flags |= PIPE_TRANSFER_UNSYNCHRONIZED; 463 464 if (access & GL_MAP_PERSISTENT_BIT) 465 flags |= PIPE_TRANSFER_PERSISTENT; 466 467 if (access & GL_MAP_COHERENT_BIT) 468 flags |= PIPE_TRANSFER_COHERENT; 469 470 /* ... other flags ... 471 */ 472 473 if (access & MESA_MAP_NOWAIT_BIT) 474 flags |= PIPE_TRANSFER_DONTBLOCK; 475 476 return flags; 477} 478 479 480/** 481 * Called via glMapBufferRange(). 482 */ 483static void * 484st_bufferobj_map_range(struct gl_context *ctx, 485 GLintptr offset, GLsizeiptr length, GLbitfield access, 486 struct gl_buffer_object *obj, 487 gl_map_buffer_index index) 488{ 489 struct pipe_context *pipe = st_context(ctx)->pipe; 490 struct st_buffer_object *st_obj = st_buffer_object(obj); 491 492 assert(offset >= 0); 493 assert(length >= 0); 494 assert(offset < obj->Size); 495 assert(offset + length <= obj->Size); 496 497 const enum pipe_transfer_usage transfer_flags = 498 st_access_flags_to_transfer_flags(access, 499 offset == 0 && length == obj->Size); 500 501 obj->Mappings[index].Pointer = pipe_buffer_map_range(pipe, 502 st_obj->buffer, 503 offset, length, 504 transfer_flags, 505 &st_obj->transfer[index]); 506 if (obj->Mappings[index].Pointer) { 507 obj->Mappings[index].Offset = offset; 508 obj->Mappings[index].Length = length; 509 obj->Mappings[index].AccessFlags = access; 510 } 511 else { 512 st_obj->transfer[index] = NULL; 513 } 514 515 return obj->Mappings[index].Pointer; 516} 517 518 519static void 520st_bufferobj_flush_mapped_range(struct gl_context *ctx, 521 GLintptr offset, GLsizeiptr length, 522 struct gl_buffer_object *obj, 523 gl_map_buffer_index index) 524{ 525 struct pipe_context *pipe = st_context(ctx)->pipe; 526 struct st_buffer_object *st_obj = st_buffer_object(obj); 527 528 /* Subrange is relative to mapped range */ 529 assert(offset >= 0); 530 assert(length >= 0); 531 assert(offset + length <= obj->Mappings[index].Length); 532 assert(obj->Mappings[index].Pointer); 533 534 if (!length) 535 return; 536 537 pipe_buffer_flush_mapped_range(pipe, st_obj->transfer[index], 538 obj->Mappings[index].Offset + offset, 539 length); 540} 541 542 543/** 544 * Called via glUnmapBufferARB(). 545 */ 546static GLboolean 547st_bufferobj_unmap(struct gl_context *ctx, struct gl_buffer_object *obj, 548 gl_map_buffer_index index) 549{ 550 struct pipe_context *pipe = st_context(ctx)->pipe; 551 struct st_buffer_object *st_obj = st_buffer_object(obj); 552 553 if (obj->Mappings[index].Length) 554 pipe_buffer_unmap(pipe, st_obj->transfer[index]); 555 556 st_obj->transfer[index] = NULL; 557 obj->Mappings[index].Pointer = NULL; 558 obj->Mappings[index].Offset = 0; 559 obj->Mappings[index].Length = 0; 560 return GL_TRUE; 561} 562 563 564/** 565 * Called via glCopyBufferSubData(). 566 */ 567static void 568st_copy_buffer_subdata(struct gl_context *ctx, 569 struct gl_buffer_object *src, 570 struct gl_buffer_object *dst, 571 GLintptr readOffset, GLintptr writeOffset, 572 GLsizeiptr size) 573{ 574 struct pipe_context *pipe = st_context(ctx)->pipe; 575 struct st_buffer_object *srcObj = st_buffer_object(src); 576 struct st_buffer_object *dstObj = st_buffer_object(dst); 577 struct pipe_box box; 578 579 if (!size) 580 return; 581 582 /* buffer should not already be mapped */ 583 assert(!_mesa_check_disallowed_mapping(src)); 584 assert(!_mesa_check_disallowed_mapping(dst)); 585 586 u_box_1d(readOffset, size, &box); 587 588 pipe->resource_copy_region(pipe, dstObj->buffer, 0, writeOffset, 0, 0, 589 srcObj->buffer, 0, &box); 590} 591 592/** 593 * Called via glClearBufferSubData(). 594 */ 595static void 596st_clear_buffer_subdata(struct gl_context *ctx, 597 GLintptr offset, GLsizeiptr size, 598 const void *clearValue, 599 GLsizeiptr clearValueSize, 600 struct gl_buffer_object *bufObj) 601{ 602 struct pipe_context *pipe = st_context(ctx)->pipe; 603 struct st_buffer_object *buf = st_buffer_object(bufObj); 604 static const char zeros[16] = {0}; 605 606 if (!pipe->clear_buffer) { 607 _mesa_ClearBufferSubData_sw(ctx, offset, size, 608 clearValue, clearValueSize, bufObj); 609 return; 610 } 611 612 if (!clearValue) 613 clearValue = zeros; 614 615 pipe->clear_buffer(pipe, buf->buffer, offset, size, 616 clearValue, clearValueSize); 617} 618 619static void 620st_bufferobj_page_commitment(struct gl_context *ctx, 621 struct gl_buffer_object *bufferObj, 622 GLintptr offset, GLsizeiptr size, 623 GLboolean commit) 624{ 625 struct pipe_context *pipe = st_context(ctx)->pipe; 626 struct st_buffer_object *buf = st_buffer_object(bufferObj); 627 struct pipe_box box; 628 629 u_box_1d(offset, size, &box); 630 631 if (!pipe->resource_commit(pipe, buf->buffer, 0, &box, commit)) { 632 _mesa_error(ctx, GL_OUT_OF_MEMORY, "glBufferPageCommitmentARB(out of memory)"); 633 return; 634 } 635} 636 637void 638st_init_bufferobject_functions(struct pipe_screen *screen, 639 struct dd_function_table *functions) 640{ 641 functions->NewBufferObject = st_bufferobj_alloc; 642 functions->DeleteBuffer = st_bufferobj_free; 643 functions->BufferData = st_bufferobj_data; 644 functions->BufferDataMem = st_bufferobj_data_mem; 645 functions->BufferSubData = st_bufferobj_subdata; 646 functions->GetBufferSubData = st_bufferobj_get_subdata; 647 functions->MapBufferRange = st_bufferobj_map_range; 648 functions->FlushMappedBufferRange = st_bufferobj_flush_mapped_range; 649 functions->UnmapBuffer = st_bufferobj_unmap; 650 functions->CopyBufferSubData = st_copy_buffer_subdata; 651 functions->ClearBufferSubData = st_clear_buffer_subdata; 652 functions->BufferPageCommitment = st_bufferobj_page_commitment; 653 654 if (screen->get_param(screen, PIPE_CAP_INVALIDATE_BUFFER)) 655 functions->InvalidateBufferSubData = st_bufferobj_invalidate; 656} 657