| /xsrc/external/mit/MesaLib/dist/src/compiler/nir/ |
| H A D | nir_lower_load_const_to_scalar.c | 30 * Replaces vector nir_load_const instructions with a series of loads and a 35 * same value was used in different vector contant loads. 48 /* Emit the individual loads. */ 49 nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS]; local in function:lower_load_const_instr_scalar 55 loads[i] = &load_comp->def; 59 nir_ssa_def *vec = nir_vec(&b, loads, lower->def.num_components);
|
| H A D | nir_lower_io_to_scalar.c | 41 nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS]; local in function:lower_load_input_to_scalar 59 loads[i] = &chan_intr->dest.ssa; 63 nir_vec(b, loads, intr->num_components)); 185 nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS]; local in function:lower_load_to_scalar_early 229 loads[i] = &chan_intr->dest.ssa; 233 nir_vec(b, loads, intr->num_components));
|
| H A D | nir_lower_vars_to_ssa.c | 48 struct set *loads; member in struct:deref_node 84 * At the moment, we only lower loads, stores, and copies that can be 85 * trivially lowered to loads and stores, i.e. copies with no indirects 88 * wildcards, then we lower that copy operation to loads and stores, but 90 * used in these loads, stores, and trivial copies are ones with no 414 if (node->loads == NULL) 415 node->loads = _mesa_pointer_set_create(state->dead_ctx); 417 _mesa_set_add(node->loads, load_instr);
|
| H A D | nir_opt_load_store_vectorize.c | 26 * intersecting and identical loads/stores. It currently supports derefs, ubo, 27 * ssbo and push constant loads/stores. 38 * - It won't turn four consecutive vec3 loads into 3 vec4 loads. 59 int base_src; /* offset which it loads/stores from */ 60 int deref_src; /* deref which is loads/stores from */ 192 struct hash_table *loads[nir_num_variable_modes]; member in struct:vectorize_ctx 734 /* update the load's destination size and extract data for each of the original loads */ 1056 * and high loads. 1109 /* we can only vectorize non-volatile loads/store [all...] |
| /xsrc/external/mit/MesaLib.old/dist/src/compiler/nir/ |
| H A D | nir_lower_load_const_to_scalar.c | 30 * Replaces vector nir_load_const instructions with a series of loads and a 35 * same value was used in different vector contant loads. 48 /* Emit the individual loads. */ 49 nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS]; local in function:lower_load_const_instr_scalar 55 loads[i] = &load_comp->def; 59 nir_ssa_def *vec = nir_vec(&b, loads, lower->def.num_components);
|
| H A D | nir_lower_io_to_scalar.c | 41 nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS]; local in function:lower_load_input_to_scalar 57 loads[i] = &chan_intr->dest.ssa; 61 nir_src_for_ssa(nir_vec(b, loads, 180 nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS]; local in function:lower_load_to_scalar_early 223 loads[i] = &chan_intr->dest.ssa; 227 nir_src_for_ssa(nir_vec(b, loads,
|
| H A D | nir_lower_vars_to_ssa.c | 48 struct set *loads; member in struct:deref_node 76 * At the moment, we only lower loads, stores, and copies that can be 77 * trivially lowered to loads and stores, i.e. copies with no indirects 80 * wildcards, then we lower that copy operation to loads and stores, but 82 * used in these loads, stores, and trivial copies are ones with no 378 if (node->loads == NULL) 379 node->loads = _mesa_pointer_set_create(state->dead_ctx); 381 _mesa_set_add(node->loads, load_instr);
|
| /xsrc/external/mit/MesaLib.old/dist/src/gallium/drivers/lima/ir/ |
| H A D | lima_nir_lower_uniform_to_scalar.c | 34 nir_ssa_def *loads[4]; local in function:lower_load_uniform_to_scalar 50 loads[i] = &chan_intr->dest.ssa; 54 nir_src_for_ssa(nir_vec(b, loads,
|
| /xsrc/external/mit/MesaLib/dist/src/gallium/drivers/lima/ir/ |
| H A D | lima_nir_lower_uniform_to_scalar.c | 34 nir_ssa_def *loads[4]; local in function:lower_load_uniform_to_scalar 51 loads[i] = &chan_intr->dest.ssa; 55 nir_vec(b, loads, intr->num_components));
|
| /xsrc/external/mit/MesaLib/dist/src/intel/compiler/ |
| H A D | brw_nir_lower_mem_access_bit_sizes.c | 117 /* Otherwise, we have to break it into smaller loads. We could end up 118 * with as many as 32 loads if we're loading a u64vec16 from scratch. 120 nir_ssa_def *loads[32]; local in function:lower_mem_load_bit_size 137 loads[num_loads++] = dup_mem_intrinsic(b, intrin, NULL, load_offset, 143 assert(num_loads <= ARRAY_SIZE(loads)); 144 result = nir_extract_bits(b, loads, num_loads, 0, 275 * This pass loads arbitrary SSBO and shared memory load/store operations to
|
| /xsrc/external/mit/MesaLib/dist/src/compiler/nir/tests/ |
| H A D | load_store_vectorizer_tests.cpp | 84 std::map<unsigned, nir_alu_src*> loads; member in class:__anon343ea75b0110::nir_load_store_vectorize_test 255 loads[id] = &mov->src[0]; 322 loads[id] = &mov->src[0]; 452 ASSERT_EQ(loads[0x1]->src.ssa, &load->dest.ssa); 453 ASSERT_EQ(loads[0x2]->src.ssa, &load->dest.ssa); 454 ASSERT_EQ(loads[0x1]->swizzle[0], 0); 455 ASSERT_EQ(loads[0x1]->swizzle[1], 1); 456 ASSERT_EQ(loads[0x1]->swizzle[2], 2); 457 ASSERT_EQ(loads[0x1]->swizzle[3], 3); 458 ASSERT_EQ(loads[ [all...] |
| /xsrc/external/mit/MesaLib/dist/docs/relnotes/ |
| H A D | 20.3.3.rst | 37 - star conflict crashes on iris, but loads fine on i965, on HD 5500 129 - radv/llvm,aco: always split typed vertex buffer loads on GFX6 and GFX10+
|
| H A D | 10.2.rst | 61 gpu driver(svga), whereas now it loads a shared pipe_*.so driver.
|
| H A D | 17.2.8.rst | 70 - i965/vec4: use a temp register to compute offsets for pull loads
|
| H A D | 19.2.5.rst | 93 - tgsi_to_nir: fix masked out image loads
|
| H A D | 21.1.8.rst | 85 - nir/lower_vectorize_tess_levels: set num_components for vectorized loads
|
| H A D | 12.0.4.rst | 193 - radeonsi: fix FP64 UBO loads with indirect uniform block indexing 245 - radeonsi: fix indirect loads of 64 bit constants 250 - radeonsi: fix 64-bit loads from LDS
|
| H A D | 18.0.2.rst | 58 - ac/nir: Make the GFX9 buffer size fix apply to image loads/atomics
|
| H A D | 20.1.5.rst | 59 - aco: fix scratch loads which cross element_size boundaries
|
| H A D | 10.2.2.rst | 127 - i965/vec4: Use the sampler for pull constant loads on Broadwell.
|
| H A D | 13.0.1.rst | 141 - st/glsl_to_tgsi: fix dvec[34] loads from SSBO
|
| /xsrc/external/mit/MesaLib/dist/src/amd/llvm/ |
| H A D | ac_llvm_build.c | 1452 * fixups suitable for vertex fetch, using non-format buffer loads. 1499 LLVMValueRef loads[32]; /* up to 32 bytes */ local in function:ac_build_opencoded_load_format 1506 loads[i] = 1510 loads[i] = ac_to_integer(ctx, loads[i]); 1520 tmp = LLVMBuildZExt(ctx->builder, loads[src], dst_type, ""); 1528 loads[dst] = accum; 1534 LLVMValueRef loaded = loads[0]; 1541 loads[i] = LLVMBuildExtractElement(ctx->builder, loaded, tmp, ""); 1551 LLVMValueRef loaded = loads[sr [all...] |
| /xsrc/external/mit/MesaLib/dist/src/gallium/frontends/clover/nir/ |
| H A D | invocation.cpp | 276 nir_ssa_def *loads[3]; local in function:clover_lower_nir_instr 300 loads[i] = var ? nir_load_var(b, var) : nir_imm_int(b, 0); 303 return nir_u2u(b, nir_vec(b, loads, state->global_dims),
|
| /xsrc/external/mit/MesaLib/dist/src/gallium/drivers/zink/ |
| H A D | zink_compiler.c | 588 nir_ssa_def *loads[4]; local in function:lower_attrib 590 loads[i] = nir_load_deref(b, nir_build_deref_var(b, split[i+1])); 593 loads[3] = nir_channel(b, loads[0], 3); 594 loads[0] = nir_channel(b, loads[0], 0); 596 nir_ssa_def *new_load = nir_vec(b, loads, num_components);
|
| /xsrc/external/mit/libdrm/dist/man/ |
| H A D | drm.7.rst | 35 When a GPU is detected, the DRM system loads a driver for the detected
|