glthread_draw.c revision 7ec681f3
1/* 2 * Copyright © 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24/* Draw function marshalling for glthread. 25 * 26 * The purpose of these glDraw wrappers is to upload non-VBO vertex and 27 * index data, so that glthread doesn't have to execute synchronously. 28 */ 29 30#include "c99_alloca.h" 31 32#include "main/glthread_marshal.h" 33#include "main/dispatch.h" 34#include "main/varray.h" 35 36static inline unsigned 37get_index_size(GLenum type) 38{ 39 /* GL_UNSIGNED_BYTE - GL_UNSIGNED_BYTE = 0 40 * GL_UNSIGNED_SHORT - GL_UNSIGNED_BYTE = 2 41 * GL_UNSIGNED_INT - GL_UNSIGNED_BYTE = 4 42 * 43 * Divide by 2 to get n=0,1,2, then the index size is: 1 << n 44 */ 45 return 1 << ((type - GL_UNSIGNED_BYTE) >> 1); 46} 47 48static inline bool 49is_index_type_valid(GLenum type) 50{ 51 /* GL_UNSIGNED_BYTE = 0x1401 52 * GL_UNSIGNED_SHORT = 0x1403 53 * GL_UNSIGNED_INT = 0x1405 54 * 55 * The trick is that bit 1 and bit 2 mean USHORT and UINT, respectively. 56 * After clearing those two bits (with ~6), we should get UBYTE. 57 * Both bits can't be set, because the enum would be greater than UINT. 58 */ 59 return type <= GL_UNSIGNED_INT && (type & ~6) == GL_UNSIGNED_BYTE; 60} 61 62static ALWAYS_INLINE struct gl_buffer_object * 63upload_indices(struct gl_context *ctx, unsigned count, unsigned index_size, 64 const GLvoid **indices) 65{ 66 struct gl_buffer_object *upload_buffer = NULL; 67 unsigned upload_offset = 0; 68 69 assert(count); 70 71 _mesa_glthread_upload(ctx, *indices, index_size * count, 72 &upload_offset, &upload_buffer, NULL); 73 assert(upload_buffer); 74 *indices = (const GLvoid*)(intptr_t)upload_offset; 75 76 return upload_buffer; 77} 78 79static ALWAYS_INLINE struct gl_buffer_object * 80upload_multi_indices(struct gl_context *ctx, unsigned total_count, 81 unsigned index_size, unsigned draw_count, 82 const GLsizei *count, const GLvoid *const *indices, 83 const GLvoid **out_indices) 84{ 85 struct gl_buffer_object *upload_buffer = NULL; 86 unsigned upload_offset = 0; 87 uint8_t *upload_ptr = NULL; 88 89 assert(total_count); 90 91 _mesa_glthread_upload(ctx, NULL, index_size * total_count, 92 &upload_offset, &upload_buffer, &upload_ptr); 93 assert(upload_buffer); 94 95 for (unsigned i = 0, offset = 0; i < draw_count; i++) { 96 if (count[i] == 0) 97 continue; 98 99 unsigned size = count[i] * index_size; 100 101 memcpy(upload_ptr + offset, indices[i], size); 102 out_indices[i] = (const GLvoid*)(intptr_t)(upload_offset + offset); 103 offset += size; 104 } 105 106 return upload_buffer; 107} 108 109static ALWAYS_INLINE bool 110upload_vertices(struct gl_context *ctx, unsigned user_buffer_mask, 111 unsigned start_vertex, unsigned num_vertices, 112 unsigned start_instance, unsigned num_instances, 113 struct glthread_attrib_binding *buffers) 114{ 115 struct glthread_vao *vao = ctx->GLThread.CurrentVAO; 116 unsigned attrib_mask_iter = vao->Enabled; 117 unsigned num_buffers = 0; 118 119 assert((num_vertices || !(user_buffer_mask & ~vao->NonZeroDivisorMask)) && 120 (num_instances || !(user_buffer_mask & vao->NonZeroDivisorMask))); 121 122 if (unlikely(vao->BufferInterleaved & user_buffer_mask)) { 123 /* Slower upload path where some buffers reference multiple attribs, 124 * so we have to use 2 while loops instead of 1. 125 */ 126 unsigned start_offset[VERT_ATTRIB_MAX]; 127 unsigned end_offset[VERT_ATTRIB_MAX]; 128 uint32_t buffer_mask = 0; 129 130 while (attrib_mask_iter) { 131 unsigned i = u_bit_scan(&attrib_mask_iter); 132 unsigned binding_index = vao->Attrib[i].BufferIndex; 133 134 if (!(user_buffer_mask & (1 << binding_index))) 135 continue; 136 137 unsigned stride = vao->Attrib[binding_index].Stride; 138 unsigned instance_div = vao->Attrib[binding_index].Divisor; 139 unsigned element_size = vao->Attrib[i].ElementSize; 140 unsigned offset = vao->Attrib[i].RelativeOffset; 141 unsigned size; 142 143 if (instance_div) { 144 /* Per-instance attrib. */ 145 146 /* Figure out how many instances we'll render given instance_div. We 147 * can't use the typical div_round_up() pattern because the CTS uses 148 * instance_div = ~0 for a test, which overflows div_round_up()'s 149 * addition. 150 */ 151 unsigned count = num_instances / instance_div; 152 if (count * instance_div != num_instances) 153 count++; 154 155 offset += stride * start_instance; 156 size = stride * (count - 1) + element_size; 157 } else { 158 /* Per-vertex attrib. */ 159 offset += stride * start_vertex; 160 size = stride * (num_vertices - 1) + element_size; 161 } 162 163 unsigned binding_index_bit = 1u << binding_index; 164 165 /* Update upload offsets. */ 166 if (!(buffer_mask & binding_index_bit)) { 167 start_offset[binding_index] = offset; 168 end_offset[binding_index] = offset + size; 169 } else { 170 if (offset < start_offset[binding_index]) 171 start_offset[binding_index] = offset; 172 if (offset + size > end_offset[binding_index]) 173 end_offset[binding_index] = offset + size; 174 } 175 176 buffer_mask |= binding_index_bit; 177 } 178 179 /* Upload buffers. */ 180 while (buffer_mask) { 181 struct gl_buffer_object *upload_buffer = NULL; 182 unsigned upload_offset = 0; 183 unsigned start, end; 184 185 unsigned binding_index = u_bit_scan(&buffer_mask); 186 187 start = start_offset[binding_index]; 188 end = end_offset[binding_index]; 189 assert(start < end); 190 191 const void *ptr = vao->Attrib[binding_index].Pointer; 192 _mesa_glthread_upload(ctx, (uint8_t*)ptr + start, 193 end - start, &upload_offset, 194 &upload_buffer, NULL); 195 assert(upload_buffer); 196 197 buffers[num_buffers].buffer = upload_buffer; 198 buffers[num_buffers].offset = upload_offset - start; 199 buffers[num_buffers].original_pointer = ptr; 200 num_buffers++; 201 } 202 203 return true; 204 } 205 206 /* Faster path where all attribs are separate. */ 207 while (attrib_mask_iter) { 208 unsigned i = u_bit_scan(&attrib_mask_iter); 209 unsigned binding_index = vao->Attrib[i].BufferIndex; 210 211 if (!(user_buffer_mask & (1 << binding_index))) 212 continue; 213 214 struct gl_buffer_object *upload_buffer = NULL; 215 unsigned upload_offset = 0; 216 unsigned stride = vao->Attrib[binding_index].Stride; 217 unsigned instance_div = vao->Attrib[binding_index].Divisor; 218 unsigned element_size = vao->Attrib[i].ElementSize; 219 unsigned offset = vao->Attrib[i].RelativeOffset; 220 unsigned size; 221 222 if (instance_div) { 223 /* Per-instance attrib. */ 224 225 /* Figure out how many instances we'll render given instance_div. We 226 * can't use the typical div_round_up() pattern because the CTS uses 227 * instance_div = ~0 for a test, which overflows div_round_up()'s 228 * addition. 229 */ 230 unsigned count = num_instances / instance_div; 231 if (count * instance_div != num_instances) 232 count++; 233 234 offset += stride * start_instance; 235 size = stride * (count - 1) + element_size; 236 } else { 237 /* Per-vertex attrib. */ 238 offset += stride * start_vertex; 239 size = stride * (num_vertices - 1) + element_size; 240 } 241 242 const void *ptr = vao->Attrib[binding_index].Pointer; 243 _mesa_glthread_upload(ctx, (uint8_t*)ptr + offset, 244 size, &upload_offset, &upload_buffer, NULL); 245 assert(upload_buffer); 246 247 buffers[num_buffers].buffer = upload_buffer; 248 buffers[num_buffers].offset = upload_offset - offset; 249 buffers[num_buffers].original_pointer = ptr; 250 num_buffers++; 251 } 252 253 return true; 254} 255 256/* Generic DrawArrays structure NOT supporting user buffers. Ignore the name. */ 257struct marshal_cmd_DrawArrays 258{ 259 struct marshal_cmd_base cmd_base; 260 GLenum mode; 261 GLint first; 262 GLsizei count; 263 GLsizei instance_count; 264 GLuint baseinstance; 265}; 266 267uint32_t 268_mesa_unmarshal_DrawArrays(struct gl_context *ctx, 269 const struct marshal_cmd_DrawArrays *cmd, 270 const uint64_t *last) 271{ 272 /* Ignore the function name. We use DISPATCH_CMD_DrawArrays 273 * for all DrawArrays variants without user buffers, and 274 * DISPATCH_CMD_DrawArraysInstancedBaseInstance for all DrawArrays 275 * variants with user buffrs. 276 */ 277 const GLenum mode = cmd->mode; 278 const GLint first = cmd->first; 279 const GLsizei count = cmd->count; 280 const GLsizei instance_count = cmd->instance_count; 281 const GLuint baseinstance = cmd->baseinstance; 282 283 CALL_DrawArraysInstancedBaseInstance(ctx->CurrentServerDispatch, 284 (mode, first, count, instance_count, 285 baseinstance)); 286 return cmd->cmd_base.cmd_size; 287} 288 289static ALWAYS_INLINE void 290draw_arrays_async(struct gl_context *ctx, GLenum mode, GLint first, 291 GLsizei count, GLsizei instance_count, GLuint baseinstance) 292{ 293 int cmd_size = sizeof(struct marshal_cmd_DrawArrays); 294 struct marshal_cmd_DrawArrays *cmd = 295 _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawArrays, cmd_size); 296 297 cmd->mode = mode; 298 cmd->first = first; 299 cmd->count = count; 300 cmd->instance_count = instance_count; 301 cmd->baseinstance = baseinstance; 302} 303 304/* Generic DrawArrays structure supporting user buffers. Ignore the name. */ 305struct marshal_cmd_DrawArraysInstancedBaseInstance 306{ 307 struct marshal_cmd_base cmd_base; 308 GLenum mode; 309 GLint first; 310 GLsizei count; 311 GLsizei instance_count; 312 GLuint baseinstance; 313 GLuint user_buffer_mask; 314}; 315 316uint32_t 317_mesa_unmarshal_DrawArraysInstancedBaseInstance(struct gl_context *ctx, 318 const struct marshal_cmd_DrawArraysInstancedBaseInstance *cmd, 319 const uint64_t *last) 320{ 321 /* Ignore the function name. We use DISPATCH_CMD_DrawArrays 322 * for all DrawArrays variants without user buffers, and 323 * DISPATCH_CMD_DrawArraysInstancedBaseInstance for all DrawArrays 324 * variants with user buffrs. 325 */ 326 const GLenum mode = cmd->mode; 327 const GLint first = cmd->first; 328 const GLsizei count = cmd->count; 329 const GLsizei instance_count = cmd->instance_count; 330 const GLuint baseinstance = cmd->baseinstance; 331 const GLuint user_buffer_mask = cmd->user_buffer_mask; 332 const struct glthread_attrib_binding *buffers = 333 (const struct glthread_attrib_binding *)(cmd + 1); 334 335 /* Bind uploaded buffers if needed. */ 336 if (user_buffer_mask) { 337 _mesa_InternalBindVertexBuffers(ctx, buffers, user_buffer_mask, 338 false); 339 } 340 341 CALL_DrawArraysInstancedBaseInstance(ctx->CurrentServerDispatch, 342 (mode, first, count, instance_count, 343 baseinstance)); 344 345 /* Restore states. */ 346 if (user_buffer_mask) { 347 _mesa_InternalBindVertexBuffers(ctx, buffers, user_buffer_mask, 348 true); 349 } 350 return cmd->cmd_base.cmd_size; 351} 352 353static ALWAYS_INLINE void 354draw_arrays_async_user(struct gl_context *ctx, GLenum mode, GLint first, 355 GLsizei count, GLsizei instance_count, GLuint baseinstance, 356 unsigned user_buffer_mask, 357 const struct glthread_attrib_binding *buffers) 358{ 359 int buffers_size = util_bitcount(user_buffer_mask) * sizeof(buffers[0]); 360 int cmd_size = sizeof(struct marshal_cmd_DrawArraysInstancedBaseInstance) + 361 buffers_size; 362 struct marshal_cmd_DrawArraysInstancedBaseInstance *cmd; 363 364 cmd = _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawArraysInstancedBaseInstance, 365 cmd_size); 366 cmd->mode = mode; 367 cmd->first = first; 368 cmd->count = count; 369 cmd->instance_count = instance_count; 370 cmd->baseinstance = baseinstance; 371 cmd->user_buffer_mask = user_buffer_mask; 372 373 if (user_buffer_mask) 374 memcpy(cmd + 1, buffers, buffers_size); 375} 376 377static ALWAYS_INLINE void 378draw_arrays(GLenum mode, GLint first, GLsizei count, GLsizei instance_count, 379 GLuint baseinstance, bool compiled_into_dlist) 380{ 381 GET_CURRENT_CONTEXT(ctx); 382 383 struct glthread_vao *vao = ctx->GLThread.CurrentVAO; 384 unsigned user_buffer_mask = vao->UserPointerMask & vao->BufferEnabled; 385 386 if (compiled_into_dlist && ctx->GLThread.ListMode) { 387 _mesa_glthread_finish_before(ctx, "DrawArrays"); 388 /* Use the function that's compiled into a display list. */ 389 CALL_DrawArrays(ctx->CurrentServerDispatch, (mode, first, count)); 390 return; 391 } 392 393 /* Fast path when nothing needs to be done. 394 * 395 * This is also an error path. Zero counts should still call the driver 396 * for possible GL errors. 397 */ 398 if (ctx->API == API_OPENGL_CORE || !user_buffer_mask || 399 count <= 0 || instance_count <= 0) { 400 draw_arrays_async(ctx, mode, first, count, instance_count, baseinstance); 401 return; 402 } 403 404 /* Upload and draw. */ 405 struct glthread_attrib_binding buffers[VERT_ATTRIB_MAX]; 406 if (!ctx->GLThread.SupportsNonVBOUploads || 407 !upload_vertices(ctx, user_buffer_mask, first, count, baseinstance, 408 instance_count, buffers)) { 409 _mesa_glthread_finish_before(ctx, "DrawArrays"); 410 CALL_DrawArraysInstancedBaseInstance(ctx->CurrentServerDispatch, 411 (mode, first, count, instance_count, 412 baseinstance)); 413 return; 414 } 415 416 draw_arrays_async_user(ctx, mode, first, count, instance_count, baseinstance, 417 user_buffer_mask, buffers); 418} 419 420struct marshal_cmd_MultiDrawArrays 421{ 422 struct marshal_cmd_base cmd_base; 423 GLenum mode; 424 GLsizei draw_count; 425 GLuint user_buffer_mask; 426}; 427 428uint32_t 429_mesa_unmarshal_MultiDrawArrays(struct gl_context *ctx, 430 const struct marshal_cmd_MultiDrawArrays *cmd, 431 const uint64_t *last) 432{ 433 const GLenum mode = cmd->mode; 434 const GLsizei draw_count = cmd->draw_count; 435 const GLuint user_buffer_mask = cmd->user_buffer_mask; 436 437 const char *variable_data = (const char *)(cmd + 1); 438 const GLint *first = (GLint *)variable_data; 439 variable_data += sizeof(GLint) * draw_count; 440 const GLsizei *count = (GLsizei *)variable_data; 441 variable_data += sizeof(GLsizei) * draw_count; 442 const struct glthread_attrib_binding *buffers = 443 (const struct glthread_attrib_binding *)variable_data; 444 445 /* Bind uploaded buffers if needed. */ 446 if (user_buffer_mask) { 447 _mesa_InternalBindVertexBuffers(ctx, buffers, user_buffer_mask, 448 false); 449 } 450 451 CALL_MultiDrawArrays(ctx->CurrentServerDispatch, 452 (mode, first, count, draw_count)); 453 454 /* Restore states. */ 455 if (user_buffer_mask) { 456 _mesa_InternalBindVertexBuffers(ctx, buffers, user_buffer_mask, 457 true); 458 } 459 return cmd->cmd_base.cmd_size; 460} 461 462static ALWAYS_INLINE void 463multi_draw_arrays_async(struct gl_context *ctx, GLenum mode, 464 const GLint *first, const GLsizei *count, 465 GLsizei draw_count, unsigned user_buffer_mask, 466 const struct glthread_attrib_binding *buffers) 467{ 468 int first_size = sizeof(GLint) * draw_count; 469 int count_size = sizeof(GLsizei) * draw_count; 470 int buffers_size = util_bitcount(user_buffer_mask) * sizeof(buffers[0]); 471 int cmd_size = sizeof(struct marshal_cmd_MultiDrawArrays) + 472 first_size + count_size + buffers_size; 473 struct marshal_cmd_MultiDrawArrays *cmd; 474 475 cmd = _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_MultiDrawArrays, 476 cmd_size); 477 cmd->mode = mode; 478 cmd->draw_count = draw_count; 479 cmd->user_buffer_mask = user_buffer_mask; 480 481 char *variable_data = (char*)(cmd + 1); 482 memcpy(variable_data, first, first_size); 483 variable_data += first_size; 484 memcpy(variable_data, count, count_size); 485 486 if (user_buffer_mask) { 487 variable_data += count_size; 488 memcpy(variable_data, buffers, buffers_size); 489 } 490} 491 492void GLAPIENTRY 493_mesa_marshal_MultiDrawArrays(GLenum mode, const GLint *first, 494 const GLsizei *count, GLsizei draw_count) 495{ 496 GET_CURRENT_CONTEXT(ctx); 497 498 struct glthread_vao *vao = ctx->GLThread.CurrentVAO; 499 unsigned user_buffer_mask = vao->UserPointerMask & vao->BufferEnabled; 500 501 if (ctx->GLThread.ListMode) 502 goto sync; 503 504 if (draw_count >= 0 && 505 (ctx->API == API_OPENGL_CORE || !user_buffer_mask)) { 506 multi_draw_arrays_async(ctx, mode, first, count, draw_count, 0, NULL); 507 return; 508 } 509 510 /* If the draw count is too high or negative, the queue can't be used. */ 511 if (!ctx->GLThread.SupportsNonVBOUploads || 512 draw_count < 0 || draw_count > MARSHAL_MAX_CMD_SIZE / 16) 513 goto sync; 514 515 unsigned min_index = ~0; 516 unsigned max_index_exclusive = 0; 517 518 for (unsigned i = 0; i < draw_count; i++) { 519 GLsizei vertex_count = count[i]; 520 521 if (vertex_count < 0) { 522 /* Just call the driver to set the error. */ 523 multi_draw_arrays_async(ctx, mode, first, count, draw_count, 0, NULL); 524 return; 525 } 526 if (vertex_count == 0) 527 continue; 528 529 min_index = MIN2(min_index, first[i]); 530 max_index_exclusive = MAX2(max_index_exclusive, first[i] + vertex_count); 531 } 532 533 unsigned num_vertices = max_index_exclusive - min_index; 534 if (num_vertices == 0) { 535 /* Nothing to do, but call the driver to set possible GL errors. */ 536 multi_draw_arrays_async(ctx, mode, first, count, draw_count, 0, NULL); 537 return; 538 } 539 540 /* Upload and draw. */ 541 struct glthread_attrib_binding buffers[VERT_ATTRIB_MAX]; 542 if (!upload_vertices(ctx, user_buffer_mask, min_index, num_vertices, 543 0, 1, buffers)) 544 goto sync; 545 546 multi_draw_arrays_async(ctx, mode, first, count, draw_count, 547 user_buffer_mask, buffers); 548 return; 549 550sync: 551 _mesa_glthread_finish_before(ctx, "MultiDrawArrays"); 552 CALL_MultiDrawArrays(ctx->CurrentServerDispatch, 553 (mode, first, count, draw_count)); 554} 555 556/* DrawElementsInstancedBaseVertexBaseInstance not supporting user buffers. 557 * Ignore the name. 558 */ 559struct marshal_cmd_DrawElementsInstancedARB 560{ 561 struct marshal_cmd_base cmd_base; 562 GLenum mode; 563 GLenum type; 564 GLsizei count; 565 GLsizei instance_count; 566 GLint basevertex; 567 GLuint baseinstance; 568 const GLvoid *indices; 569}; 570 571uint32_t 572_mesa_unmarshal_DrawElementsInstancedARB(struct gl_context *ctx, 573 const struct marshal_cmd_DrawElementsInstancedARB *cmd, 574 const uint64_t *last) 575{ 576 /* Ignore the function name. We use DISPATCH_CMD_DrawElementsInstanced- 577 * BaseVertexBaseInstance for all DrawElements variants with user buffers, 578 * and both DISPATCH_CMD_DrawElementsInstancedARB and DISPATCH_CMD_Draw- 579 * RangeElementsBaseVertex for all draw elements variants without user 580 * buffers. 581 */ 582 const GLenum mode = cmd->mode; 583 const GLsizei count = cmd->count; 584 const GLenum type = cmd->type; 585 const GLvoid *indices = cmd->indices; 586 const GLsizei instance_count = cmd->instance_count; 587 const GLint basevertex = cmd->basevertex; 588 const GLuint baseinstance = cmd->baseinstance; 589 590 CALL_DrawElementsInstancedBaseVertexBaseInstance(ctx->CurrentServerDispatch, 591 (mode, count, type, indices, 592 instance_count, basevertex, 593 baseinstance)); 594 return cmd->cmd_base.cmd_size; 595} 596 597struct marshal_cmd_DrawRangeElementsBaseVertex 598{ 599 struct marshal_cmd_base cmd_base; 600 GLenum mode; 601 GLenum type; 602 GLsizei count; 603 GLint basevertex; 604 GLuint min_index; 605 GLuint max_index; 606 const GLvoid *indices; 607}; 608 609uint32_t 610_mesa_unmarshal_DrawRangeElementsBaseVertex(struct gl_context *ctx, 611 const struct marshal_cmd_DrawRangeElementsBaseVertex *cmd, 612 const uint64_t *last) 613{ 614 const GLenum mode = cmd->mode; 615 const GLsizei count = cmd->count; 616 const GLenum type = cmd->type; 617 const GLvoid *indices = cmd->indices; 618 const GLint basevertex = cmd->basevertex; 619 const GLuint min_index = cmd->min_index; 620 const GLuint max_index = cmd->max_index; 621 622 CALL_DrawRangeElementsBaseVertex(ctx->CurrentServerDispatch, 623 (mode, min_index, max_index, count, 624 type, indices, basevertex)); 625 return cmd->cmd_base.cmd_size; 626} 627 628static ALWAYS_INLINE void 629draw_elements_async(struct gl_context *ctx, GLenum mode, GLsizei count, 630 GLenum type, const GLvoid *indices, GLsizei instance_count, 631 GLint basevertex, GLuint baseinstance, 632 bool index_bounds_valid, GLuint min_index, GLuint max_index) 633{ 634 if (index_bounds_valid) { 635 int cmd_size = sizeof(struct marshal_cmd_DrawRangeElementsBaseVertex); 636 struct marshal_cmd_DrawRangeElementsBaseVertex *cmd = 637 _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawRangeElementsBaseVertex, cmd_size); 638 639 cmd->mode = mode; 640 cmd->count = count; 641 cmd->type = type; 642 cmd->indices = indices; 643 cmd->basevertex = basevertex; 644 cmd->min_index = min_index; 645 cmd->max_index = max_index; 646 } else { 647 int cmd_size = sizeof(struct marshal_cmd_DrawElementsInstancedARB); 648 struct marshal_cmd_DrawElementsInstancedARB *cmd = 649 _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawElementsInstancedARB, cmd_size); 650 651 cmd->mode = mode; 652 cmd->count = count; 653 cmd->type = type; 654 cmd->indices = indices; 655 cmd->instance_count = instance_count; 656 cmd->basevertex = basevertex; 657 cmd->baseinstance = baseinstance; 658 } 659} 660 661struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstance 662{ 663 struct marshal_cmd_base cmd_base; 664 bool index_bounds_valid; 665 GLenum mode; 666 GLenum type; 667 GLsizei count; 668 GLsizei instance_count; 669 GLint basevertex; 670 GLuint baseinstance; 671 GLuint min_index; 672 GLuint max_index; 673 GLuint user_buffer_mask; 674 const GLvoid *indices; 675 struct gl_buffer_object *index_buffer; 676}; 677 678uint32_t 679_mesa_unmarshal_DrawElementsInstancedBaseVertexBaseInstance(struct gl_context *ctx, 680 const struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstance *cmd, 681 const uint64_t *last) 682{ 683 /* Ignore the function name. We use DISPATCH_CMD_DrawElementsInstanced- 684 * BaseVertexBaseInstance for all DrawElements variants with user buffers, 685 * and both DISPATCH_CMD_DrawElementsInstancedARB and DISPATCH_CMD_Draw- 686 * RangeElementsBaseVertex for all draw elements variants without user 687 * buffers. 688 */ 689 const GLenum mode = cmd->mode; 690 const GLsizei count = cmd->count; 691 const GLenum type = cmd->type; 692 const GLvoid *indices = cmd->indices; 693 const GLsizei instance_count = cmd->instance_count; 694 const GLint basevertex = cmd->basevertex; 695 const GLuint baseinstance = cmd->baseinstance; 696 const GLuint min_index = cmd->min_index; 697 const GLuint max_index = cmd->max_index; 698 const GLuint user_buffer_mask = cmd->user_buffer_mask; 699 struct gl_buffer_object *index_buffer = cmd->index_buffer; 700 const struct glthread_attrib_binding *buffers = 701 (const struct glthread_attrib_binding *)(cmd + 1); 702 703 /* Bind uploaded buffers if needed. */ 704 if (user_buffer_mask) { 705 _mesa_InternalBindVertexBuffers(ctx, buffers, user_buffer_mask, 706 false); 707 } 708 if (index_buffer) { 709 _mesa_InternalBindElementBuffer(ctx, index_buffer); 710 } 711 712 /* Draw. */ 713 if (cmd->index_bounds_valid && instance_count == 1 && baseinstance == 0) { 714 CALL_DrawRangeElementsBaseVertex(ctx->CurrentServerDispatch, 715 (mode, min_index, max_index, count, 716 type, indices, basevertex)); 717 } else { 718 CALL_DrawElementsInstancedBaseVertexBaseInstance(ctx->CurrentServerDispatch, 719 (mode, count, type, indices, 720 instance_count, basevertex, 721 baseinstance)); 722 } 723 724 /* Restore states. */ 725 if (index_buffer) { 726 _mesa_InternalBindElementBuffer(ctx, NULL); 727 } 728 if (user_buffer_mask) { 729 _mesa_InternalBindVertexBuffers(ctx, buffers, user_buffer_mask, 730 true); 731 } 732 return cmd->cmd_base.cmd_size; 733} 734 735static ALWAYS_INLINE void 736draw_elements_async_user(struct gl_context *ctx, GLenum mode, GLsizei count, 737 GLenum type, const GLvoid *indices, GLsizei instance_count, 738 GLint basevertex, GLuint baseinstance, 739 bool index_bounds_valid, GLuint min_index, GLuint max_index, 740 struct gl_buffer_object *index_buffer, 741 unsigned user_buffer_mask, 742 const struct glthread_attrib_binding *buffers) 743{ 744 int buffers_size = util_bitcount(user_buffer_mask) * sizeof(buffers[0]); 745 int cmd_size = sizeof(struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstance) + 746 buffers_size; 747 struct marshal_cmd_DrawElementsInstancedBaseVertexBaseInstance *cmd; 748 749 cmd = _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_DrawElementsInstancedBaseVertexBaseInstance, cmd_size); 750 cmd->mode = mode; 751 cmd->count = count; 752 cmd->type = type; 753 cmd->indices = indices; 754 cmd->instance_count = instance_count; 755 cmd->basevertex = basevertex; 756 cmd->baseinstance = baseinstance; 757 cmd->min_index = min_index; 758 cmd->max_index = max_index; 759 cmd->user_buffer_mask = user_buffer_mask; 760 cmd->index_bounds_valid = index_bounds_valid; 761 cmd->index_buffer = index_buffer; 762 763 if (user_buffer_mask) 764 memcpy(cmd + 1, buffers, buffers_size); 765} 766 767static void 768draw_elements(GLenum mode, GLsizei count, GLenum type, const GLvoid *indices, 769 GLsizei instance_count, GLint basevertex, GLuint baseinstance, 770 bool index_bounds_valid, GLuint min_index, GLuint max_index, 771 bool compiled_into_dlist) 772{ 773 GET_CURRENT_CONTEXT(ctx); 774 775 struct glthread_vao *vao = ctx->GLThread.CurrentVAO; 776 unsigned user_buffer_mask = vao->UserPointerMask & vao->BufferEnabled; 777 bool has_user_indices = vao->CurrentElementBufferName == 0; 778 779 if (compiled_into_dlist && ctx->GLThread.ListMode) 780 goto sync; 781 782 /* Fast path when nothing needs to be done. 783 * 784 * This is also an error path. Zero counts should still call the driver 785 * for possible GL errors. 786 */ 787 if (ctx->API == API_OPENGL_CORE || 788 count <= 0 || instance_count <= 0 || max_index < min_index || 789 !is_index_type_valid(type) || 790 (!user_buffer_mask && !has_user_indices)) { 791 draw_elements_async(ctx, mode, count, type, indices, instance_count, 792 basevertex, baseinstance, index_bounds_valid, 793 min_index, max_index); 794 return; 795 } 796 797 if (!ctx->GLThread.SupportsNonVBOUploads) 798 goto sync; 799 800 bool need_index_bounds = user_buffer_mask & ~vao->NonZeroDivisorMask; 801 unsigned index_size = get_index_size(type); 802 803 if (need_index_bounds && !index_bounds_valid) { 804 /* Sync if indices come from a buffer and vertices come from memory 805 * and index bounds are not valid. 806 * 807 * We would have to map the indices to compute the index bounds, and 808 * for that we would have to sync anyway. 809 */ 810 if (!has_user_indices) 811 goto sync; 812 813 /* Compute the index bounds. */ 814 min_index = ~0; 815 max_index = 0; 816 vbo_get_minmax_index_mapped(count, index_size, 817 ctx->GLThread._RestartIndex[index_size - 1], 818 ctx->GLThread._PrimitiveRestart, indices, 819 &min_index, &max_index); 820 index_bounds_valid = true; 821 } 822 823 unsigned start_vertex = min_index + basevertex; 824 unsigned num_vertices = max_index + 1 - min_index; 825 826 /* If there is too much data to upload, sync and let the driver unroll 827 * indices. */ 828 if (util_is_vbo_upload_ratio_too_large(count, num_vertices)) 829 goto sync; 830 831 struct glthread_attrib_binding buffers[VERT_ATTRIB_MAX]; 832 if (user_buffer_mask && 833 !upload_vertices(ctx, user_buffer_mask, start_vertex, num_vertices, 834 baseinstance, instance_count, buffers)) 835 goto sync; 836 837 /* Upload indices. */ 838 struct gl_buffer_object *index_buffer = NULL; 839 if (has_user_indices) 840 index_buffer = upload_indices(ctx, count, index_size, &indices); 841 842 /* Draw asynchronously. */ 843 draw_elements_async_user(ctx, mode, count, type, indices, instance_count, 844 basevertex, baseinstance, index_bounds_valid, 845 min_index, max_index, index_buffer, 846 user_buffer_mask, buffers); 847 return; 848 849sync: 850 _mesa_glthread_finish_before(ctx, "DrawElements"); 851 852 if (compiled_into_dlist && ctx->GLThread.ListMode) { 853 /* Only use the ones that are compiled into display lists. */ 854 if (basevertex) { 855 CALL_DrawElementsBaseVertex(ctx->CurrentServerDispatch, 856 (mode, count, type, indices, basevertex)); 857 } else if (index_bounds_valid) { 858 CALL_DrawRangeElements(ctx->CurrentServerDispatch, 859 (mode, min_index, max_index, count, type, indices)); 860 } else { 861 CALL_DrawElements(ctx->CurrentServerDispatch, (mode, count, type, indices)); 862 } 863 } else if (index_bounds_valid && instance_count == 1 && baseinstance == 0) { 864 CALL_DrawRangeElementsBaseVertex(ctx->CurrentServerDispatch, 865 (mode, min_index, max_index, count, 866 type, indices, basevertex)); 867 } else { 868 CALL_DrawElementsInstancedBaseVertexBaseInstance(ctx->CurrentServerDispatch, 869 (mode, count, type, indices, 870 instance_count, basevertex, 871 baseinstance)); 872 } 873} 874 875struct marshal_cmd_MultiDrawElementsBaseVertex 876{ 877 struct marshal_cmd_base cmd_base; 878 bool has_base_vertex; 879 GLenum mode; 880 GLenum type; 881 GLsizei draw_count; 882 GLuint user_buffer_mask; 883 struct gl_buffer_object *index_buffer; 884}; 885 886uint32_t 887_mesa_unmarshal_MultiDrawElementsBaseVertex(struct gl_context *ctx, 888 const struct marshal_cmd_MultiDrawElementsBaseVertex *cmd, 889 const uint64_t *last) 890{ 891 const GLenum mode = cmd->mode; 892 const GLenum type = cmd->type; 893 const GLsizei draw_count = cmd->draw_count; 894 const GLuint user_buffer_mask = cmd->user_buffer_mask; 895 struct gl_buffer_object *index_buffer = cmd->index_buffer; 896 const bool has_base_vertex = cmd->has_base_vertex; 897 898 const char *variable_data = (const char *)(cmd + 1); 899 const GLsizei *count = (GLsizei *)variable_data; 900 variable_data += sizeof(GLsizei) * draw_count; 901 const GLvoid *const *indices = (const GLvoid *const *)variable_data; 902 variable_data += sizeof(const GLvoid *const *) * draw_count; 903 const GLsizei *basevertex = NULL; 904 if (has_base_vertex) { 905 basevertex = (GLsizei *)variable_data; 906 variable_data += sizeof(GLsizei) * draw_count; 907 } 908 const struct glthread_attrib_binding *buffers = 909 (const struct glthread_attrib_binding *)variable_data; 910 911 /* Bind uploaded buffers if needed. */ 912 if (user_buffer_mask) { 913 _mesa_InternalBindVertexBuffers(ctx, buffers, user_buffer_mask, 914 false); 915 } 916 if (index_buffer) { 917 _mesa_InternalBindElementBuffer(ctx, index_buffer); 918 } 919 920 /* Draw. */ 921 if (has_base_vertex) { 922 CALL_MultiDrawElementsBaseVertex(ctx->CurrentServerDispatch, 923 (mode, count, type, indices, draw_count, 924 basevertex)); 925 } else { 926 CALL_MultiDrawElementsEXT(ctx->CurrentServerDispatch, 927 (mode, count, type, indices, draw_count)); 928 } 929 930 /* Restore states. */ 931 if (index_buffer) { 932 _mesa_InternalBindElementBuffer(ctx, NULL); 933 } 934 if (user_buffer_mask) { 935 _mesa_InternalBindVertexBuffers(ctx, buffers, user_buffer_mask, 936 true); 937 } 938 return cmd->cmd_base.cmd_size; 939} 940 941static ALWAYS_INLINE void 942multi_draw_elements_async(struct gl_context *ctx, GLenum mode, 943 const GLsizei *count, GLenum type, 944 const GLvoid *const *indices, GLsizei draw_count, 945 const GLsizei *basevertex, 946 struct gl_buffer_object *index_buffer, 947 unsigned user_buffer_mask, 948 const struct glthread_attrib_binding *buffers) 949{ 950 int count_size = sizeof(GLsizei) * draw_count; 951 int indices_size = sizeof(indices[0]) * draw_count; 952 int basevertex_size = basevertex ? sizeof(GLsizei) * draw_count : 0; 953 int buffers_size = util_bitcount(user_buffer_mask) * sizeof(buffers[0]); 954 int cmd_size = sizeof(struct marshal_cmd_MultiDrawElementsBaseVertex) + 955 count_size + indices_size + basevertex_size + buffers_size; 956 struct marshal_cmd_MultiDrawElementsBaseVertex *cmd; 957 958 cmd = _mesa_glthread_allocate_command(ctx, DISPATCH_CMD_MultiDrawElementsBaseVertex, cmd_size); 959 cmd->mode = mode; 960 cmd->type = type; 961 cmd->draw_count = draw_count; 962 cmd->user_buffer_mask = user_buffer_mask; 963 cmd->index_buffer = index_buffer; 964 cmd->has_base_vertex = basevertex != NULL; 965 966 char *variable_data = (char*)(cmd + 1); 967 memcpy(variable_data, count, count_size); 968 variable_data += count_size; 969 memcpy(variable_data, indices, indices_size); 970 variable_data += indices_size; 971 972 if (basevertex) { 973 memcpy(variable_data, basevertex, basevertex_size); 974 variable_data += basevertex_size; 975 } 976 977 if (user_buffer_mask) 978 memcpy(variable_data, buffers, buffers_size); 979} 980 981void GLAPIENTRY 982_mesa_marshal_MultiDrawElementsBaseVertex(GLenum mode, const GLsizei *count, 983 GLenum type, 984 const GLvoid *const *indices, 985 GLsizei draw_count, 986 const GLsizei *basevertex) 987{ 988 GET_CURRENT_CONTEXT(ctx); 989 990 struct glthread_vao *vao = ctx->GLThread.CurrentVAO; 991 unsigned user_buffer_mask = vao->UserPointerMask & vao->BufferEnabled; 992 bool has_user_indices = vao->CurrentElementBufferName == 0; 993 994 if (ctx->GLThread.ListMode) 995 goto sync; 996 997 /* Fast path when nothing needs to be done. */ 998 if (draw_count >= 0 && 999 (ctx->API == API_OPENGL_CORE || 1000 !is_index_type_valid(type) || 1001 (!user_buffer_mask && !has_user_indices))) { 1002 multi_draw_elements_async(ctx, mode, count, type, indices, draw_count, 1003 basevertex, 0, 0, NULL); 1004 return; 1005 } 1006 1007 bool need_index_bounds = user_buffer_mask & ~vao->NonZeroDivisorMask; 1008 1009 /* If the draw count is too high or negative, the queue can't be used. 1010 * 1011 * Sync if indices come from a buffer and vertices come from memory 1012 * and index bounds are not valid. We would have to map the indices 1013 * to compute the index bounds, and for that we would have to sync anyway. 1014 */ 1015 if (!ctx->GLThread.SupportsNonVBOUploads || 1016 draw_count < 0 || draw_count > MARSHAL_MAX_CMD_SIZE / 32 || 1017 (need_index_bounds && !has_user_indices)) 1018 goto sync; 1019 1020 unsigned index_size = get_index_size(type); 1021 unsigned min_index = ~0; 1022 unsigned max_index = 0; 1023 unsigned total_count = 0; 1024 unsigned num_vertices = 0; 1025 1026 /* This is always true if there is per-vertex data that needs to be 1027 * uploaded. 1028 */ 1029 if (need_index_bounds) { 1030 /* Compute the index bounds. */ 1031 for (unsigned i = 0; i < draw_count; i++) { 1032 GLsizei vertex_count = count[i]; 1033 1034 if (vertex_count < 0) { 1035 /* Just call the driver to set the error. */ 1036 multi_draw_elements_async(ctx, mode, count, type, indices, draw_count, 1037 basevertex, 0, 0, NULL); 1038 return; 1039 } 1040 if (vertex_count == 0) 1041 continue; 1042 1043 unsigned min = ~0, max = 0; 1044 vbo_get_minmax_index_mapped(vertex_count, index_size, 1045 ctx->GLThread._RestartIndex[index_size - 1], 1046 ctx->GLThread._PrimitiveRestart, indices[i], 1047 &min, &max); 1048 if (basevertex) { 1049 min += basevertex[i]; 1050 max += basevertex[i]; 1051 } 1052 min_index = MIN2(min_index, min); 1053 max_index = MAX2(max_index, max); 1054 total_count += vertex_count; 1055 } 1056 1057 num_vertices = max_index + 1 - min_index; 1058 1059 if (total_count == 0 || num_vertices == 0) { 1060 /* Nothing to do, but call the driver to set possible GL errors. */ 1061 multi_draw_elements_async(ctx, mode, count, type, indices, draw_count, 1062 basevertex, 0, 0, NULL); 1063 return; 1064 } 1065 1066 /* If there is too much data to upload, sync and let the driver unroll 1067 * indices. */ 1068 if (util_is_vbo_upload_ratio_too_large(total_count, num_vertices)) 1069 goto sync; 1070 } else if (has_user_indices) { 1071 /* Only compute total_count for the upload of indices. */ 1072 for (unsigned i = 0; i < draw_count; i++) { 1073 GLsizei vertex_count = count[i]; 1074 1075 if (vertex_count < 0) { 1076 /* Just call the driver to set the error. */ 1077 multi_draw_elements_async(ctx, mode, count, type, indices, draw_count, 1078 basevertex, 0, 0, NULL); 1079 return; 1080 } 1081 if (vertex_count == 0) 1082 continue; 1083 1084 total_count += vertex_count; 1085 } 1086 1087 if (total_count == 0) { 1088 /* Nothing to do, but call the driver to set possible GL errors. */ 1089 multi_draw_elements_async(ctx, mode, count, type, indices, draw_count, 1090 basevertex, 0, 0, NULL); 1091 return; 1092 } 1093 } 1094 1095 /* Upload vertices. */ 1096 struct glthread_attrib_binding buffers[VERT_ATTRIB_MAX]; 1097 if (user_buffer_mask && 1098 !upload_vertices(ctx, user_buffer_mask, min_index, num_vertices, 1099 0, 1, buffers)) 1100 goto sync; 1101 1102 /* Upload indices. */ 1103 struct gl_buffer_object *index_buffer = NULL; 1104 if (has_user_indices) { 1105 const GLvoid **out_indices = alloca(sizeof(indices[0]) * draw_count); 1106 1107 index_buffer = upload_multi_indices(ctx, total_count, index_size, 1108 draw_count, count, indices, 1109 out_indices); 1110 indices = out_indices; 1111 } 1112 1113 /* Draw asynchronously. */ 1114 multi_draw_elements_async(ctx, mode, count, type, indices, draw_count, 1115 basevertex, index_buffer, user_buffer_mask, 1116 buffers); 1117 return; 1118 1119sync: 1120 _mesa_glthread_finish_before(ctx, "DrawElements"); 1121 1122 if (basevertex) { 1123 CALL_MultiDrawElementsBaseVertex(ctx->CurrentServerDispatch, 1124 (mode, count, type, indices, draw_count, 1125 basevertex)); 1126 } else { 1127 CALL_MultiDrawElementsEXT(ctx->CurrentServerDispatch, 1128 (mode, count, type, indices, draw_count)); 1129 } 1130} 1131 1132void GLAPIENTRY 1133_mesa_marshal_DrawArrays(GLenum mode, GLint first, GLsizei count) 1134{ 1135 draw_arrays(mode, first, count, 1, 0, true); 1136} 1137 1138void GLAPIENTRY 1139_mesa_marshal_DrawArraysInstancedARB(GLenum mode, GLint first, GLsizei count, 1140 GLsizei instance_count) 1141{ 1142 draw_arrays(mode, first, count, instance_count, 0, false); 1143} 1144 1145void GLAPIENTRY 1146_mesa_marshal_DrawArraysInstancedBaseInstance(GLenum mode, GLint first, 1147 GLsizei count, GLsizei instance_count, 1148 GLuint baseinstance) 1149{ 1150 draw_arrays(mode, first, count, instance_count, baseinstance, false); 1151} 1152 1153void GLAPIENTRY 1154_mesa_marshal_DrawElements(GLenum mode, GLsizei count, GLenum type, 1155 const GLvoid *indices) 1156{ 1157 draw_elements(mode, count, type, indices, 1, 0, 0, false, 0, 0, true); 1158} 1159 1160void GLAPIENTRY 1161_mesa_marshal_DrawRangeElements(GLenum mode, GLuint start, GLuint end, 1162 GLsizei count, GLenum type, 1163 const GLvoid *indices) 1164{ 1165 draw_elements(mode, count, type, indices, 1, 0, 0, true, start, end, true); 1166} 1167 1168void GLAPIENTRY 1169_mesa_marshal_DrawElementsInstancedARB(GLenum mode, GLsizei count, GLenum type, 1170 const GLvoid *indices, GLsizei instance_count) 1171{ 1172 draw_elements(mode, count, type, indices, instance_count, 0, 0, false, 0, 0, false); 1173} 1174 1175void GLAPIENTRY 1176_mesa_marshal_DrawElementsBaseVertex(GLenum mode, GLsizei count, GLenum type, 1177 const GLvoid *indices, GLint basevertex) 1178{ 1179 draw_elements(mode, count, type, indices, 1, basevertex, 0, false, 0, 0, true); 1180} 1181 1182void GLAPIENTRY 1183_mesa_marshal_DrawRangeElementsBaseVertex(GLenum mode, GLuint start, GLuint end, 1184 GLsizei count, GLenum type, 1185 const GLvoid *indices, GLint basevertex) 1186{ 1187 draw_elements(mode, count, type, indices, 1, basevertex, 0, true, start, end, false); 1188} 1189 1190void GLAPIENTRY 1191_mesa_marshal_DrawElementsInstancedBaseVertex(GLenum mode, GLsizei count, 1192 GLenum type, const GLvoid *indices, 1193 GLsizei instance_count, GLint basevertex) 1194{ 1195 draw_elements(mode, count, type, indices, instance_count, basevertex, 0, false, 0, 0, false); 1196} 1197 1198void GLAPIENTRY 1199_mesa_marshal_DrawElementsInstancedBaseInstance(GLenum mode, GLsizei count, 1200 GLenum type, const GLvoid *indices, 1201 GLsizei instance_count, GLuint baseinstance) 1202{ 1203 draw_elements(mode, count, type, indices, instance_count, 0, baseinstance, false, 0, 0, false); 1204} 1205 1206void GLAPIENTRY 1207_mesa_marshal_DrawElementsInstancedBaseVertexBaseInstance(GLenum mode, GLsizei count, 1208 GLenum type, const GLvoid *indices, 1209 GLsizei instance_count, GLint basevertex, 1210 GLuint baseinstance) 1211{ 1212 draw_elements(mode, count, type, indices, instance_count, basevertex, baseinstance, false, 0, 0, false); 1213} 1214 1215void GLAPIENTRY 1216_mesa_marshal_MultiDrawElementsEXT(GLenum mode, const GLsizei *count, 1217 GLenum type, const GLvoid *const *indices, 1218 GLsizei draw_count) 1219{ 1220 _mesa_marshal_MultiDrawElementsBaseVertex(mode, count, type, indices, 1221 draw_count, NULL); 1222} 1223 1224uint32_t 1225_mesa_unmarshal_DrawArraysInstancedARB(struct gl_context *ctx, const struct marshal_cmd_DrawArraysInstancedARB *cmd, const uint64_t *last) 1226{ 1227 unreachable("never used - DrawArraysInstancedBaseInstance is used instead"); 1228 return 0; 1229} 1230 1231uint32_t 1232_mesa_unmarshal_DrawElements(struct gl_context *ctx, const struct marshal_cmd_DrawElements *cmd, const uint64_t *last) 1233{ 1234 unreachable("never used - DrawElementsInstancedBaseVertexBaseInstance is used instead"); 1235 return 0; 1236} 1237 1238uint32_t 1239_mesa_unmarshal_DrawRangeElements(struct gl_context *ctx, const struct marshal_cmd_DrawRangeElements *cmd, const uint64_t *last) 1240{ 1241 unreachable("never used - DrawElementsInstancedBaseVertexBaseInstance is used instead"); 1242 return 0; 1243} 1244 1245uint32_t 1246_mesa_unmarshal_DrawElementsBaseVertex(struct gl_context *ctx, const struct marshal_cmd_DrawElementsBaseVertex *cmd, const uint64_t *last) 1247{ 1248 unreachable("never used - DrawElementsInstancedBaseVertexBaseInstance is used instead"); 1249 return 0; 1250} 1251 1252uint32_t 1253_mesa_unmarshal_DrawElementsInstancedBaseVertex(struct gl_context *ctx, const struct marshal_cmd_DrawElementsInstancedBaseVertex *cmd, const uint64_t *last) 1254{ 1255 unreachable("never used - DrawElementsInstancedBaseVertexBaseInstance is used instead"); 1256 return 0; 1257} 1258 1259uint32_t 1260_mesa_unmarshal_DrawElementsInstancedBaseInstance(struct gl_context *ctx, const struct marshal_cmd_DrawElementsInstancedBaseInstance *cmd, const uint64_t *last) 1261{ 1262 unreachable("never used - DrawElementsInstancedBaseVertexBaseInstance is used instead"); 1263 return 0; 1264} 1265 1266uint32_t 1267_mesa_unmarshal_MultiDrawElementsEXT(struct gl_context *ctx, const struct marshal_cmd_MultiDrawElementsEXT *cmd, const uint64_t *last) 1268{ 1269 unreachable("never used - MultiDrawElementsBaseVertex is used instead"); 1270 return 0; 1271} 1272