HomeSort by: relevance | last modified time | path
    Searched refs:vectorize (Results 1 - 25 of 44) sorted by relevancy

1 2

  /src/external/gpl2/gettext/dist/gettext-tools/src/
format-perl.c 152 bool vectorize = false; local
186 vectorize = true;
196 vectorize = true;
227 vectorize = true;
245 if (vectorize)
445 if (type != FAT_NONE && !vectorize)
  /src/external/apache2/llvm/dist/llvm/utils/gn/build/
write_library_dependencies.py 25 { "all", nullptr, true, {"demangle", "support", "tablegen", "core", "fuzzmutate", "filecheck", "interfacestub", "irreader", "codegen", "selectiondag", "asmprinter", "mirparser", "globalisel", "binaryformat", "bitreader", "bitwriter", "bitstreamreader", "dwarflinker", "extensions", "frontendopenmp", "transformutils", "instrumentation", "aggressiveinstcombine", "instcombine", "scalaropts", "ipo", "vectorize", "objcarcopts", "coroutines", "cfguard", "linker", "analysis", "lto", "mc", "mcparser", "mcdisassembler", "mca", "object", "objectyaml", "option", "remarks", "debuginfodwarf", "debuginfogsym", "debuginfomsf", "debuginfocodeview", "debuginfopdb", "symbolize", "executionengine", "interpreter", "jitlink", "mcjit", "orcjit", "orcshared", "orctargetprocess", "runtimedyld", "target", "asmparser", "lineeditor", "profiledata", "coverage", "passes", "textapi", "dlltooldriver", "libdriver", "xray", "windowsmanifest"} },
58 { "ipo", "LLVMipo", true, {"aggressiveinstcombine", "analysis", "bitreader", "bitwriter", "core", "frontendopenmp", "instcombine", "irreader", "linker", "object", "profiledata", "scalaropts", "support", "transformutils", "vectorize", "instrumentation"} },
80 { "passes", "LLVMPasses", true, {"aggressiveinstcombine", "analysis", "core", "coroutines", "ipo", "instcombine", "objcarcopts", "scalaropts", "support", "target", "transformutils", "vectorize", "instrumentation"} },
92 { "vectorize", "LLVMVectorize", true, {"analysis", "core", "support", "transformutils"} },
  /src/external/gpl3/gcc/dist/gcc/
optabs-tree.cc 600 vmode = targetm.vectorize.preferred_simd_mode (smode);
602 && targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
607 targetm.vectorize.autovectorize_vector_modes (&vector_modes, true);
610 && targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
637 if (targetm.vectorize.get_mask_mode (mode).exists (&mask_mode)
optabs-query.cc 449 if (targetm.vectorize.vec_perm_const != NULL)
451 if (targetm.vectorize.vec_perm_const (mode, op_mode, NULL_RTX, NULL_RTX,
tree-vect-data-refs.cc 286 /* If there is a loop invariant read involved we might vectorize it in
357 in this case, and vectorize the fallback loop with the
746 "dependencies of %G to vectorize "
761 store is marked volatile we don't vectorize the loop
788 " to vectorize the early exit. %G may alias with"
1221 targetm.vectorize.preferred_vector_alignment (vectype)))
1263 /* If the region we're going to vectorize is reached, all unconditional
1333 = exact_div (targetm.vectorize.preferred_vector_alignment (vectype),
1445 the analysis phase, before deciding to vectorize the loop. */
1686 return targetm.vectorize.vector_alignment_reachable (type, is_packed)
    [all...]
tree-vectorizer.h 892 /* When the loop has early breaks that we can vectorize we need to peel
916 (or #pragma simd or #pragma ivdep) we can vectorize this and it will
918 vectorize this, so this field would be false. */
1193 /* Describes how we're going to vectorize an individual load or store,
1343 /* For stores, number of stores from this group seen. We vectorize the last
1843 /* Alias targetm.vectorize.builtin_vectorization_cost. */
1849 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
1861 /* Alias targetm.vectorize.init_cost. */
1866 return targetm.vectorize.create_costs (vinfo, costing_for_scalar);
1873 /* Alias targetm.vectorize.add_stmt_cost. *
    [all...]
target.def 1788 HOOK_VECTOR (TARGET_VECTORIZE, vectorize)
2053 The default is @code{NULL_TREE} which means to not vectorize gather\n\
2065 The default is @code{NULL_TREE} which means to not vectorize scatter\n\
2086 HOOK_VECTOR_END (vectorize)
tree-vectorizer.cc 952 /* If we are going to vectorize outer loop, prevent vectorization
1046 /* Try to vectorize LOOP. */
1077 "couldn't vectorize loop\n");
1088 /* If we applied if-conversion then try to vectorize the
1090 ??? Ideally BB vectorization would learn to vectorize
1138 loop, don't vectorize its inner loop; we'll attempt to
1139 vectorize LOOP_VECTORIZED guarded inner loop of the scalar
1175 /* Try to vectorize LOOP. */
1293 /* Make sure we don't vectorize it twice. */
1381 && targetm.vectorize.empty_mask_is_expensive (IFN_MASK_STORE)
    [all...]
internal-fn.cc 4928 if (!targetm.vectorize.get_mask_mode (mode).exists (&mask_mode))
4988 machine_mode vmode = targetm.vectorize.preferred_simd_mode (smode);
4997 targetm.vectorize.autovectorize_vector_modes (&vector_modes, true);
optabs.cc 6530 if (targetm.vectorize.vec_perm_const != NULL)
6537 if (targetm.vectorize.vec_perm_const (mode, op_mode, target, v0, v1,
6551 if (targetm.vectorize.vec_perm_const != NULL
6552 && targetm.vectorize.vec_perm_const (qimode, qimode, target_qi, v0_qi,
  /src/external/gpl3/gcc.old/dist/gcc/
optabs-query.cc 449 if (targetm.vectorize.vec_perm_const != NULL)
451 if (targetm.vectorize.vec_perm_const (mode, NULL_RTX, NULL_RTX,
584 vmode = targetm.vectorize.preferred_simd_mode (smode);
586 && targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
591 targetm.vectorize.autovectorize_vector_modes (&vector_modes, true);
594 && targetm.vectorize.get_mask_mode (vmode).exists (&mask_mode)
tree-vect-data-refs.cc 308 in this case, and vectorize the fallback loop with the
925 targetm.vectorize.preferred_vector_alignment (vectype)))
967 /* If the region we're going to vectorize is reached, all unconditional
1037 = exact_div (targetm.vectorize.preferred_vector_alignment (vectype),
1149 the analysis phase, before deciding to vectorize the loop. */
1390 return targetm.vectorize.vector_alignment_reachable (type, is_packed);
2568 = exact_div (targetm.vectorize.preferred_vector_alignment (vectype),
4199 ? (targetm.vectorize.builtin_gather
4200 && targetm.vectorize.builtin_gather (vectype,
4203 : (targetm.vectorize.builtin_scatte
    [all...]
tree-vectorizer.h 837 (or #pragma simd or #pragma ivdep) we can vectorize this and it will
839 vectorize this, so this field would be false. */
1073 /* Describes how we're going to vectorize an individual load or store,
1222 /* For stores, number of stores from this group seen. We vectorize the last
1721 /* Alias targetm.vectorize.builtin_vectorization_cost. */
1727 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
1739 /* Alias targetm.vectorize.init_cost. */
1744 return targetm.vectorize.create_costs (vinfo, costing_for_scalar);
1751 /* Alias targetm.vectorize.add_stmt_cost. */
1777 /* Alias targetm.vectorize.add_stmt_cost. *
    [all...]
target.def 1785 HOOK_VECTOR (TARGET_VECTORIZE, vectorize)
2034 The default is @code{NULL_TREE} which means to not vectorize gather\n\
2046 The default is @code{NULL_TREE} which means to not vectorize scatter\n\
2067 HOOK_VECTOR_END (vectorize)
omp-general.cc 960 targetm.vectorize.autovectorize_vector_modes (&modes, true);
972 machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
tree-vectorizer.cc 952 /* If we are going to vectorize outer loop, prevent vectorization
1030 /* Try to vectorize LOOP. */
1060 "couldn't vectorize loop\n");
1071 /* If we applied if-conversion then try to vectorize the
1073 ??? Ideally BB vectorization would learn to vectorize
1119 loop, don't vectorize its inner loop; we'll attempt to
1120 vectorize LOOP_VECTORIZED guarded inner loop of the scalar
1155 /* Try to vectorize LOOP. */
1273 /* Make sure we don't vectorize it twice. */
1356 && targetm.vectorize.empty_mask_is_expensive (IFN_MASK_STORE)
    [all...]
tree-vect-stmts.cc 602 inductions. Otherwise we'll needlessly vectorize the IV increment
1304 if (targetm.vectorize.builtin_mask_for_load)
1333 if (targetm.vectorize.builtin_mask_for_load)
1654 /* We want to vectorize a call to combined function CFN with function
1808 if (targetm.vectorize.get_mask_mode (vecmode).exists (&mask_mode)
1867 /* Determine whether we can use a gather load or scatter store to vectorize
1963 vectorize STMT_INFO, which is a grouped or strided load or store.
2211 /* For SLP vectorization we directly vectorize a subchain
3262 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
3327 interesting builtin functions to vectorize with more than two argument
    [all...]
optabs.cc 6271 if (targetm.vectorize.vec_perm_const != NULL)
6276 if (targetm.vectorize.vec_perm_const (mode, target, v0, v1, indices))
6289 if (targetm.vectorize.vec_perm_const != NULL
6290 && targetm.vectorize.vec_perm_const (qimode, target_qi, v0_qi,
  /src/external/gpl3/gcc/lib/crtstuff/
Makefile 27 -fno-tree-vectorize \
  /src/external/gpl3/gcc.old/lib/crtstuff/
Makefile 27 -fno-tree-vectorize \
  /src/external/gpl3/gcc/lib/libgfortran/
Makefile 150 FOPTS.${_f}+= -ffast-math -ftree-vectorize -funroll-loops --param max-unroll-times=4
155 FOPTS.${_f}+= -ffast-math -ftree-vectorize -funroll-loops --param max-unroll-times=4 -mprefer-avx128
  /src/external/bsd/zstd/dist/lib/
Makefile 68 decompress/zstd_decompress_block.o : CFLAGS+=-fno-tree-vectorize
  /src/external/gpl3/gcc/dist/gcc/config/riscv/
riscv-vector-costs.cc 55 - Collect all vectorize STMTs locally for each loop block.
56 - Build program point based graph, ignore non-vectorize STMTs:
58 vectorize STMT 0 - point 0
60 vectorize STMT 1 - point 1
1035 /* If NITERS is unknown, we should not use VLS modes to vectorize
1140 = targetm.vectorize.builtin_vectorization_cost (kind, vectype, misalign);
  /src/external/gpl3/gcc/dist/libgfortran/
Makefile.am 1079 $(patsubst %.c,%.lo,$(notdir $(i_matmul_c))): AM_CFLAGS += -ffast-math -ftree-vectorize -funroll-loops --param max-unroll-times=4
1083 $(patsubst %.c,%.lo,$(notdir $(i_matmulavx128_c))): AM_CFLAGS += -ffast-math -ftree-vectorize -funroll-loops --param max-unroll-times=4 -mprefer-avx128
1085 # Logical matmul doesn't vectorize.
  /src/external/gpl3/gcc.old/dist/libgfortran/
Makefile.am 1074 $(patsubst %.c,%.lo,$(notdir $(i_matmul_c))): AM_CFLAGS += -ffast-math -ftree-vectorize -funroll-loops --param max-unroll-times=4
1078 $(patsubst %.c,%.lo,$(notdir $(i_matmulavx128_c))): AM_CFLAGS += -ffast-math -ftree-vectorize -funroll-loops --param max-unroll-times=4 -mprefer-avx128
1080 # Logical matmul doesn't vectorize.

Completed in 96 milliseconds

1 2