| /xsrc/external/mit/xf86-video-intel/dist/src/sna/ |
| H A D | gen6_common.c | 39 if (kgem->nbatch) { 49 assert(kgem->nbatch == 0);
|
| H A D | sna_blt.c | 96 assert(sna->kgem.nbatch <= KGEM_BATCH_SIZE(&sna->kgem)); 108 assert(kgem->nbatch <= KGEM_BATCH_SIZE(kgem)); 116 uint32_t *b = kgem->batch + kgem->nbatch; 120 kgem->nbatch += 3; 121 assert(kgem->nbatch < kgem->surface); 123 assert(sna->kgem.nbatch <= KGEM_BATCH_SIZE(&sna->kgem)); 193 b = kgem->batch + kgem->nbatch; 204 kgem_add_reloc64(kgem, kgem->nbatch + 4, bo, 213 kgem->nbatch += 10; 223 b[4] = kgem_add_reloc(kgem, kgem->nbatch [all...] |
| H A D | sna_render_inline.h | 47 assert(sna->kgem.nbatch <= KGEM_BATCH_SIZE(&sna->kgem)); 48 assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED <= sna->kgem.surface); 49 return sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED; 55 assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED < sna->kgem.surface); 56 sna->kgem.batch[sna->kgem.nbatch++] = dword; 62 assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED < sna->kgem.surface); 63 while (sna->kgem.nbatch & (align-1)) 64 sna->kgem.batch[sna->kgem.nbatch++] = 0; 65 sna->kgem.batch[sna->kgem.nbatch++] = dword; 71 assert(sna->kgem.nbatch [all...] |
| H A D | gen8_vertex.c | 67 assert(sna->render.vertex_offset <= sna->kgem.nbatch); 208 size = sna->kgem.nbatch; 214 sna->render.vertex_used, sna->kgem.nbatch)); 215 assert(sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface); 216 memcpy(sna->kgem.batch + sna->kgem.nbatch, 219 delta = sna->kgem.nbatch * 4; 221 sna->kgem.nbatch += sna->render.vertex_used;
|
| H A D | gen2_render.c | 314 BATCH(kgem_add_reloc(&sna->kgem, sna->kgem.nbatch, 576 sna->kgem.surface-sna->kgem.nbatch)); 630 BATCH(kgem_add_reloc(&sna->kgem, sna->kgem.nbatch, 725 unwind = sna->kgem.nbatch; 736 sna->kgem.nbatch = unwind; 743 unwind = sna->kgem.nbatch; 751 sna->kgem.nbatch = unwind; 928 v = (float *)sna->kgem.batch + sna->kgem.nbatch; 929 sna->kgem.nbatch += 12; 954 v = (float *)sna->kgem.batch + sna->kgem.nbatch; [all...] |
| H A D | kgem.h | 129 uint16_t nbatch; member in struct:kgem 382 if (kgem->nbatch) 440 assert(kgem->nbatch == 0); 448 int rem = kgem->surface - kgem->nbatch; 456 assert(kgem->nbatch < kgem->surface); 458 return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface); 482 return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) && 495 return kgem->batch + kgem->nbatch; 852 void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch); 854 static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch) argument [all...] |
| H A D | sna_accel.c | 1441 if (bo->exec == NULL && sna->kgem.nbatch && kgem_is_idle(&sna->kgem)) { 5300 if (sna->kgem.nbatch && __kgem_ring_empty(&sna->kgem)) { 5407 b = sna->kgem.batch + sna->kgem.nbatch; 5420 kgem_add_reloc64(&sna->kgem, sna->kgem.nbatch + 4, bo, 5426 kgem_add_reloc64(&sna->kgem, sna->kgem.nbatch + 6, upload, 5433 sna->kgem.nbatch += 10; 5435 b = sna->kgem.batch + sna->kgem.nbatch; 5447 b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo, 5452 b[5] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 5, upload, 5459 sna->kgem.nbatch [all...] |
| H A D | sna_io.c | 509 uint32_t *b = kgem->batch + kgem->nbatch; 526 kgem_add_reloc64(kgem, kgem->nbatch + 4, dst_bo, 534 kgem_add_reloc64(kgem, kgem->nbatch + 8, src_bo, 538 kgem->nbatch += 10; 570 uint32_t *b = kgem->batch + kgem->nbatch; 586 b[4] = kgem_add_reloc(kgem, kgem->nbatch + 4, dst_bo, 593 b[7] = kgem_add_reloc(kgem, kgem->nbatch + 7, src_bo, 597 kgem->nbatch += 8; 1124 b = kgem->batch + kgem->nbatch; 1130 kgem_add_reloc64(kgem, kgem->nbatch [all...] |
| H A D | gen3_render.c | 1594 shader_offset = sna->kgem.nbatch++; 1878 length = sna->kgem.nbatch - shader_offset; 1943 sna->kgem.surface-sna->kgem.nbatch)); 1993 OUT_BATCH(kgem_add_reloc(&sna->kgem, sna->kgem.nbatch, 2128 uint32_t blend_offset = sna->kgem.nbatch; 2139 sna->kgem.nbatch = blend_offset; 2155 state->last_constants = sna->kgem.nbatch; 2159 memcpy(sna->kgem.batch + sna->kgem.nbatch, 2162 sna->kgem.nbatch += count; 2184 sna->kgem.nbatch, [all...] |
| H A D | kgem.c | 1774 if (kgem->reloc[n].offset >= sizeof(uint32_t)*kgem->nbatch) 2513 b = kgem->batch + kgem->nbatch; 2514 kgem->nbatch += 7; 2531 kgem->batch[kgem->nbatch++] = MI_BATCH_BUFFER_END; 2532 if (kgem->nbatch & 1) 2533 kgem->batch[kgem->nbatch++] = MI_NOOP; 2535 return kgem->nbatch; 3787 memcpy(ptr, kgem->batch, sizeof(uint32_t)*kgem->nbatch); 3801 0, sizeof(uint32_t)*kgem->nbatch, 3809 if (kgem->surface < kgem->nbatch [all...] |
| H A D | kgem_debug.c | 601 void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch) argument 611 while (offset < nbatch) {
|
| H A D | gen4_render.c | 283 state->last_primitive = sna->kgem.nbatch; 603 sna->render.vertex_reloc[sna->render.nvertex_reloc++] = sna->kgem.nbatch; 657 if (sna->kgem.nbatch == sna->render_state.gen4.last_primitive) { 658 sna->render.vertex_offset = sna->kgem.nbatch - 5; 667 sna->render.vertex_offset = sna->kgem.nbatch; 675 sna->render_state.gen4.last_primitive = sna->kgem.nbatch; 740 if (sna->kgem.nbatch == sna->render_state.gen4.last_primitive) 741 rem = sna->kgem.nbatch - 5; 831 while ((sna->kgem.nbatch & 15) > 12) 860 sna->kgem.nbatch, [all...] |
| H A D | gen5_render.c | 272 state->last_primitive = sna->kgem.nbatch; 589 sna->render.vertex_reloc[sna->render.nvertex_reloc++] = sna->kgem.nbatch; 599 if (sna->kgem.nbatch == sna->render_state.gen5.last_primitive) { 600 sna->render.vertex_offset = sna->kgem.nbatch - 5; 609 sna->render.vertex_offset = sna->kgem.nbatch; 617 sna->render_state.gen5.last_primitive = sna->kgem.nbatch; 774 sna->kgem.nbatch, 779 sna->kgem.nbatch, 785 sna->kgem.nbatch, 825 __FUNCTION__, sna->kgem.surface - sna->kgem.nbatch, [all...] |
| H A D | gen6_render.c | 490 sna->kgem.nbatch, 495 sna->kgem.nbatch, 501 sna->kgem.nbatch, 784 OUT_BATCH(kgem_add_reloc(&sna->kgem, sna->kgem.nbatch, 1008 state->last_primitive = sna->kgem.nbatch; 1175 sna->render.vertex_reloc[sna->render.nvertex_reloc++] = sna->kgem.nbatch; 1185 if (sna->kgem.nbatch == sna->render_state.gen6.last_primitive) { 1190 sna->render.vertex_offset = sna->kgem.nbatch - 5; 1199 sna->render.vertex_offset = sna->kgem.nbatch; 1209 sna->render_state.gen6.last_primitive = sna->kgem.nbatch; [all...] |
| /xsrc/external/mit/xf86-video-intel-2014/dist/src/sna/ |
| H A D | gen6_common.c | 39 if (kgem->nbatch) { 49 assert(kgem->nbatch == 0);
|
| H A D | sna_blt.c | 91 assert(sna->kgem.nbatch <= KGEM_BATCH_SIZE(&sna->kgem)); 103 assert(kgem->nbatch <= KGEM_BATCH_SIZE(kgem)); 111 uint32_t *b = kgem->batch + kgem->nbatch; 115 kgem->nbatch += 3; 116 assert(kgem->nbatch < kgem->surface); 118 assert(sna->kgem.nbatch <= KGEM_BATCH_SIZE(&sna->kgem)); 188 b = kgem->batch + kgem->nbatch; 199 kgem_add_reloc64(kgem, kgem->nbatch + 4, bo, 208 kgem->nbatch += 10; 218 b[4] = kgem_add_reloc(kgem, kgem->nbatch [all...] |
| H A D | sna_render_inline.h | 47 assert(sna->kgem.nbatch <= KGEM_BATCH_SIZE(&sna->kgem)); 48 assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED <= sna->kgem.surface); 49 return sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED; 55 assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED < sna->kgem.surface); 56 sna->kgem.batch[sna->kgem.nbatch++] = dword; 62 assert(sna->kgem.nbatch + 2 + KGEM_BATCH_RESERVED < sna->kgem.surface); 63 *(uint64_t *)(sna->kgem.batch+sna->kgem.nbatch) = qword; 64 sna->kgem.nbatch += 2;
|
| H A D | gen8_vertex.c | 67 assert(sna->render.vertex_offset <= sna->kgem.nbatch); 208 size = sna->kgem.nbatch; 214 sna->render.vertex_used, sna->kgem.nbatch)); 215 assert(sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface); 216 memcpy(sna->kgem.batch + sna->kgem.nbatch, 219 delta = sna->kgem.nbatch * 4; 221 sna->kgem.nbatch += sna->render.vertex_used;
|
| H A D | gen2_render.c | 311 BATCH(kgem_add_reloc(&sna->kgem, sna->kgem.nbatch, 572 sna->kgem.surface-sna->kgem.nbatch)); 615 BATCH(kgem_add_reloc(&sna->kgem, sna->kgem.nbatch, 706 unwind = sna->kgem.nbatch; 717 sna->kgem.nbatch = unwind; 724 unwind = sna->kgem.nbatch; 732 sna->kgem.nbatch = unwind; 909 v = (float *)sna->kgem.batch + sna->kgem.nbatch; 910 sna->kgem.nbatch += 12; 935 v = (float *)sna->kgem.batch + sna->kgem.nbatch; [all...] |
| H A D | kgem.h | 122 uint16_t nbatch; member in struct:kgem 367 if (kgem->nbatch) 424 assert(kgem->nbatch == 0); 432 int rem = kgem->surface - kgem->nbatch; 440 assert(kgem->nbatch < kgem->surface); 442 return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface); 466 return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) && 479 return kgem->batch + kgem->nbatch; 806 void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch); 808 static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch) argument [all...] |
| H A D | sna_accel.c | 1401 if (bo->exec == NULL && sna->kgem.nbatch && kgem_is_idle(&sna->kgem)) { 5273 b = sna->kgem.batch + sna->kgem.nbatch; 5286 kgem_add_reloc64(&sna->kgem, sna->kgem.nbatch + 4, bo, 5292 kgem_add_reloc64(&sna->kgem, sna->kgem.nbatch + 6, upload, 5299 sna->kgem.nbatch += 10; 5301 b = sna->kgem.batch + sna->kgem.nbatch; 5313 b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo, 5318 b[5] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 5, upload, 5325 sna->kgem.nbatch += 8; 5442 b = sna->kgem.batch + sna->kgem.nbatch; [all...] |
| H A D | sna_io.c | 503 uint32_t *b = kgem->batch + kgem->nbatch; 520 kgem_add_reloc64(kgem, kgem->nbatch + 4, dst_bo, 528 kgem_add_reloc64(kgem, kgem->nbatch + 8, src_bo, 532 kgem->nbatch += 10; 563 uint32_t *b = kgem->batch + kgem->nbatch; 579 b[4] = kgem_add_reloc(kgem, kgem->nbatch + 4, dst_bo, 586 b[7] = kgem_add_reloc(kgem, kgem->nbatch + 7, src_bo, 590 kgem->nbatch += 8; 1102 b = kgem->batch + kgem->nbatch; 1108 kgem_add_reloc64(kgem, kgem->nbatch [all...] |
| H A D | gen3_render.c | 1570 shader_offset = sna->kgem.nbatch++; 1854 length = sna->kgem.nbatch - shader_offset; 1919 sna->kgem.surface-sna->kgem.nbatch)); 1969 OUT_BATCH(kgem_add_reloc(&sna->kgem, sna->kgem.nbatch, 2104 uint32_t blend_offset = sna->kgem.nbatch; 2115 sna->kgem.nbatch = blend_offset; 2131 state->last_constants = sna->kgem.nbatch; 2135 memcpy(sna->kgem.batch + sna->kgem.nbatch, 2138 sna->kgem.nbatch += count; 2160 sna->kgem.nbatch, [all...] |
| H A D | kgem_debug.c | 601 void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch) argument 611 while (offset < nbatch) {
|
| H A D | gen4_render.c | 246 state->last_primitive = sna->kgem.nbatch; 566 sna->render.vertex_reloc[sna->render.nvertex_reloc++] = sna->kgem.nbatch; 620 if (sna->kgem.nbatch == sna->render_state.gen4.last_primitive) { 621 sna->render.vertex_offset = sna->kgem.nbatch - 5; 630 sna->render.vertex_offset = sna->kgem.nbatch; 638 sna->render_state.gen4.last_primitive = sna->kgem.nbatch; 703 if (sna->kgem.nbatch == sna->render_state.gen4.last_primitive) 704 rem = sna->kgem.nbatch - 5; 794 while ((sna->kgem.nbatch & 15) > 12) 823 sna->kgem.nbatch, [all...] |