| /xsrc/external/mit/MesaLib/dist/src/compiler/nir/ |
| H A D | nir_lower_ubo_vec4.c | 93 unsigned align_mul = nir_intrinsic_align_mul(intr); local in function:nir_lower_ubo_vec4_lower 102 align_mul = MIN2(align_mul, 16); 107 bool aligned_mul = (align_mul == 16 && 136 } else if (align_mul == 8 &&
|
| H A D | nir_lower_printf.c | 68 counter->cast.align_mul = 4; 91 fmt_str_id_deref->cast.align_mul = 4; 122 dst_arg_deref->cast.align_mul = 4;
|
| H A D | nir_lower_wrmasks.c | 133 unsigned align_mul = nir_intrinsic_align_mul(intr); local in function:split_wrmask 137 align_off = align_off % align_mul; 139 nir_intrinsic_set_align(new_intr, align_mul, align_off);
|
| H A D | nir_lower_io.c | 1198 uint32_t align_mul, uint32_t align_offset, 1208 align_mul, align_offset, 1216 align_mul, align_offset, 1222 align_mul, align_offset, 1233 align_mul, align_offset, 1240 align_mul, align_offset, 1377 nir_intrinsic_set_align(load, align_mul, align_offset); 1439 uint32_t align_mul, uint32_t align_offset, 1448 align_mul, align_offset, 1455 align_mul, align_offse 1195 build_explicit_io_load(nir_builder * b,nir_intrinsic_instr * intrin,nir_ssa_def * addr,nir_address_format addr_format,nir_variable_mode modes,uint32_t align_mul,uint32_t align_offset,unsigned num_components) argument 1436 build_explicit_io_store(nir_builder * b,nir_intrinsic_instr * intrin,nir_ssa_def * addr,nir_address_format addr_format,nir_variable_mode modes,uint32_t align_mul,uint32_t align_offset,nir_ssa_def * value,nir_component_mask_t write_mask) argument 1770 uint32_t align_mul, align_offset; local in function:nir_lower_explicit_io_instr 1861 nir_get_explicit_deref_align(nir_deref_instr * deref,bool default_to_type_align,uint32_t * align_mul,uint32_t * align_offset) argument [all...] |
| H A D | nir_opt_load_store_vectorize.c | 176 uint32_t align_mul; member in struct:entry 553 uint32_t align_mul = 31; local in function:calc_alignment 556 align_mul = MIN2(align_mul, ffsll(entry->key->offset_defs_mul[i])); 559 entry->align_mul = 1u << (align_mul - 1); 561 if (!has_align || entry->align_mul >= nir_intrinsic_align_mul(entry->intrin)) { 562 entry->align_offset = entry->offset % entry->align_mul; 564 entry->align_mul = nir_intrinsic_align_mul(entry->intrin); 663 if (!ctx->options->callback(low->align_mul, [all...] |
| H A D | nir_deref.c | 883 if (cast->cast.align_mul == 0) 907 if (parent_mul < cast->cast.align_mul) 910 /* If we've gotten here, we have a parent deref with an align_mul at least 935 * it even if the align_mul from the parent is larger. 937 assert(cast->cast.align_mul <= parent_mul); 938 if (parent_offset % cast->cast.align_mul != cast->cast.align_offset) 944 cast->cast.align_mul = 0; 1049 if (cast->cast.align_mul > 0) 1091 if (cast->cast.align_mul > 0) 1140 parent->cast.align_mul [all...] |
| H A D | nir_lower_scratch.c | 57 b, intrin->num_components, bit_size == 1 ? 32 : bit_size, offset, .align_mul=align); 70 nir_store_scratch(b, value, offset, .align_mul=align,
|
| H A D | nir_lower_variable_initializers.c | 186 .align_mul=chunk_size,
|
| H A D | nir_opt_memcpy.c | 43 if (cast->cast.align_mul > 0)
|
| /xsrc/external/mit/MesaLib/dist/src/gallium/drivers/etnaviv/tests/ |
| H A D | lower_ubo_tests.cpp | 143 nir_load_ubo(&b, 1, 32, index, offset, .align_mul = 16, .align_offset = 0, .range_base = 0, .range = 8); 158 nir_load_ubo(&b, 1, 32, index, offset, .align_mul = 16, .align_offset = 0, .range_base = 0, .range = 8); 175 nir_load_ubo(&b, 1, 32, index, offset, .align_mul = 16, .align_offset = 0, .range_base = 0, .range = 8);
|
| /xsrc/external/mit/MesaLib/dist/src/intel/vulkan/ |
| H A D | anv_nir_lower_ubo_loads.c | 97 .align_mul = nir_intrinsic_align_mul(load), 107 .align_mul = nir_intrinsic_align_mul(load),
|
| /xsrc/external/mit/MesaLib/dist/src/amd/vulkan/ |
| H A D | radv_acceleration_structure.c | 722 .align_mul = 2, .align_offset = 0); 730 b, 3, 32, nir_iadd(b, addr, nir_u2u64(b, index_id)), .align_mul = 4, .align_offset = 0); 751 .align_mul = 1, .align_offset = 0); 786 .align_mul = 4, .align_offset = 0), 805 .align_mul = comp_bytes, .align_offset = 0); 987 .align_mul = 4, .align_offset = 0), 992 .align_mul = 4, .align_offset = 0), 997 .align_mul = 4, .align_offset = 0), 1015 .write_mask = 15, .align_mul = 16, .align_offset = 0); 1019 nir_build_store_global(&b, node_id, scratch_addr, .write_mask = 1, .align_mul [all...] |
| H A D | radv_query.c | 80 .align_mul = 8); 84 nir_store_ssbo(b, value32, dst_buf, offset, .write_mask = 0x1, .align_mul = 4); 176 nir_ssa_def *load = nir_load_ssbo(&b, 2, 64, src_buf, load_offset, .align_mul = 16); 210 .align_mul = 8); 215 .write_mask = 0x1, .align_mul = 8); 296 nir_ssa_def *available32 = nir_load_ssbo(&b, 1, 32, src_buf, avail_offset, .align_mul = 4); 314 nir_ssa_def *start = nir_load_ssbo(&b, 1, 64, src_buf, start_offset, .align_mul = 8); 319 nir_ssa_def *end = nir_load_ssbo(&b, 1, 64, src_buf, end_offset, .align_mul = 8); 327 .align_mul = 8); 332 .write_mask = 0x1, .align_mul [all...] |
| H A D | radv_pipeline_rt.c | 319 nir_load_scratch(b, 1, 32, nir_load_var(b, vars->stack_ptr), .align_mul = 16), 1); 352 nir_build_load_global(b, 1, 32, load_addr, .align_mul = 4, .align_offset = 0); 397 .align_mul = 64, .align_offset = offset + i * 16); 425 nir_load_var(&b_shader, vars->stack_ptr), .align_mul = 16, 452 nir_load_var(&b_shader, vars->stack_ptr), .align_mul = 16, 638 .align_mul = 4, .align_offset = 0); 650 .align_mul = 64, .align_offset = 16), 654 .align_mul = 64, .align_offset = 32), 658 .align_mul = 64, .align_offset = 48)}; 1267 .align_mul [all...] |
| H A D | radv_meta_copy_vrs_htile.c | 122 nir_ssa_def *input_value = nir_load_ssbo(&b, 1, 32, htile_buf, htile_addr, .align_mul = 4); 138 .access = ACCESS_NON_READABLE, .align_mul = 4);
|
| H A D | radv_meta_buffer.c | 26 .access = ACCESS_NON_READABLE, .align_mul = 16); 47 nir_ssa_def *load = nir_load_ssbo(&b, 4, 32, src_buf, offset, .align_mul = 16); 49 .align_mul = 16);
|
| /xsrc/external/mit/MesaLib/dist/src/gallium/drivers/radeonsi/ |
| H A D | si_shaderlib_nir.c | 92 nir_ssa_def *value = nir_load_ssbo(&b, 1, 8, zero, src_offset, .align_mul=1); 99 nir_store_ssbo(&b, value, zero, dst_offset, .write_mask=0x1, .align_mul=1); 145 nir_store_ssbo(&b, clear_value, zero, offset, .write_mask=0x1, .align_mul=2);
|
| /xsrc/external/mit/MesaLib/dist/src/amd/common/ |
| H A D | ac_nir_lower_ngg.c | 249 nir_build_store_shared(b, nir_u2u8(b, surviving_invocations_in_current_wave), wave_id, .base = lds_addr_base, .align_mul = 1u, .write_mask = 0x1u); 254 nir_ssa_def *packed_counts = nir_build_load_shared(b, 1, num_lds_dwords * 32, nir_imm_int(b, 0), .base = lds_addr_base, .align_mul = 8u); 366 nir_build_store_shared(b, prim_id, addr, .write_mask = 1u, .align_mul = 4u); 389 prim_id = nir_build_load_shared(b, 1, 32, addr, .align_mul = 4u); 697 nir_build_store_shared(b, nir_u2u8(b, es_exporter_tid), es_vertex_lds_addr, .base = lds_es_exporter_tid, .align_mul = 1u, .write_mask = 0x1u); 701 nir_build_store_shared(b, pos, exporter_addr, .base = lds_es_pos_x, .align_mul = 4u, .write_mask = 0xfu); 706 nir_intrinsic_instr *store = nir_build_store_shared(b, arg_val, exporter_addr, .base = lds_es_arg_0 + 4u * i, .align_mul = 4u, .write_mask = 0x1u); 724 nir_ssa_def *exported_pos = nir_build_load_shared(b, 4, 32, es_vertex_lds_addr, .base = lds_es_pos_x, .align_mul = 4u); 729 nir_ssa_def *arg_val = nir_build_load_shared(b, 1, 32, es_vertex_lds_addr, .base = lds_es_arg_0 + 4u * i, .align_mul = 4u); 748 nir_ssa_def *exporter_vtx_idx = nir_build_load_shared(b, 1, 8, vtx_addr, .base = lds_es_exporter_tid, .align_mul [all...] |
| H A D | ac_nir_lower_esgs_io_to_mem.c | 143 .align_mul = 16u, .align_offset = (nir_intrinsic_component(intrin) * 4u) % 16u); 217 .align_mul = 16u, .align_offset = (nir_intrinsic_component(intrin) * 4u) % 16u);
|
| H A D | ac_nir_lower_tess_io_to_mem.c | 226 .align_mul = 16u, .align_offset = (nir_intrinsic_component(intrin) * 4u) % 16u); 383 .align_mul = 16u, .align_offset = (nir_intrinsic_component(intrin) * 4u) % 16u); 421 .align_mul = 16u, .align_offset = (nir_intrinsic_component(intrin) * 4u) % 16u); 432 .align_mul = 16u, .align_offset = (nir_intrinsic_component(intrin) * 4u) % 16u); 523 .align_mul = 16u, .align_offset = st->tcs_tess_lvl_out_loc % 16u); 526 .align_mul = 16u, .align_offset = st->tcs_tess_lvl_in_loc % 16u)
|
| /xsrc/external/mit/MesaLib/dist/src/intel/compiler/ |
| H A D | brw_nir_lower_mem_access_bit_sizes.c | 169 const unsigned align_mul = nir_intrinsic_align_mul(intrin); local in function:lower_mem_store_bit_size 210 (align_mul >= 4 && (align_offset + start) % 4 == 0) ||
|
| /xsrc/external/mit/MesaLib.old/dist/src/intel/compiler/ |
| H A D | brw_nir_lower_mem_access_bit_sizes.c | 166 const unsigned align_mul = nir_intrinsic_align_mul(intrin); local in function:lower_mem_store_bit_size 202 (align_mul >= 4 && (align_offset + start) % 4 == 0) ||
|
| /xsrc/external/mit/MesaLib/dist/src/freedreno/ir3/ |
| H A D | ir3_nir.c | 158 ir3_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset, argument 171 assert(util_is_power_of_two_nonzero(align_mul)); 172 align_mul = MIN2(align_mul, 16); 176 if (align_mul < 4) 179 unsigned worst_start_offset = 16 - align_mul + align_offset;
|
| /xsrc/external/mit/MesaLib.old/dist/src/compiler/nir/ |
| H A D | nir.h | 1374 * (X - align_offset) % align_mul == 0 1491 INTRINSIC_IDX_ACCESSORS(align_mul, ALIGN_MUL, unsigned) 1497 unsigned align_mul, unsigned align_offset) 1499 assert(util_is_power_of_two_nonzero(align_mul)); 1500 assert(align_offset < align_mul); 1501 nir_intrinsic_set_align_mul(intrin, align_mul); 1515 const unsigned align_mul = nir_intrinsic_align_mul(intrin); local in function:nir_intrinsic_align 1517 assert(align_offset < align_mul); 1518 return align_offset ? 1 << (ffs(align_offset) - 1) : align_mul;
|
| /xsrc/external/mit/MesaLib/dist/src/compiler/glsl/ |
| H A D | gl_nir_lower_buffers.c | 237 * align_mul. 239 cast->cast.align_mul = NIR_ALIGN_MUL_MAX;
|