HomeSort by: relevance | last modified time | path
    Searched refs:ndelta (Results 1 - 18 of 18) sorted by relevancy

  /src/external/bsd/jemalloc/dist/src/
sc.c 17 reg_size_compute(int lg_base, int lg_delta, int ndelta) {
18 return (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta);
23 slab_size(int lg_page, int lg_base, int lg_delta, int ndelta) {
25 size_t reg_size = reg_size_compute(lg_base, lg_delta, ndelta);
34 * base + ndelta * delta == (ndelta + base/ndelta) * delta, which is
35 * (ndelta + ngroup) * delta. The way we choose slabbing strategies
36 * means that delta is at most the page size and ndelta < ngroup. So
60 int index, int lg_base, int lg_delta, int ndelta) {
102 int ndelta = 0; local
    [all...]
sz.c 63 + (ZU(sc->ndelta) << sc->lg_delta);
80 + (ZU(sc->ndelta) << (sc->lg_delta));
99 + (ZU(sc->ndelta) << sc->lg_delta);
bin_info.c 15 + ((size_t)sc->ndelta << sc->lg_delta);
arena.c 1779 (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta));
  /src/external/bsd/jemalloc.old/dist/src/
sc.c 17 reg_size_compute(int lg_base, int lg_delta, int ndelta) {
18 return (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta);
23 slab_size(int lg_page, int lg_base, int lg_delta, int ndelta) {
25 size_t reg_size = reg_size_compute(lg_base, lg_delta, ndelta);
34 * base + ndelta * delta == (ndelta + base/ndelta) * delta, which is
35 * (ndelta + ngroup) * delta. The way we choose slabbing strategies
36 * means that delta is at most the page size and ndelta < ngroup. So
60 int index, int lg_base, int lg_delta, int ndelta) {
102 int ndelta = 0; local
    [all...]
sz.c 63 + (ZU(sc->ndelta) << sc->lg_delta);
80 + (ZU(sc->ndelta) << (sc->lg_delta));
99 + (ZU(sc->ndelta) << sc->lg_delta);
bin_info.c 15 + ((size_t)sc->ndelta << sc->lg_delta);
arena.c 1779 (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta));
  /src/external/bsd/jemalloc.old/dist/include/jemalloc/internal/
size_classes.sh 57 ndelta=$3
61 reg_size=$((${grp} + ${delta}*${ndelta}))
68 ndelta=$4
71 reg_size_compute ${lg_grp} ${lg_delta} ${ndelta}
95 ndelta=$4
105 sz=$((${grp} + ${delta} * ${ndelta}))
114 lg ${ndelta}; lg_ndelta=${lg_result}; pow2 ${lg_ndelta}
115 if [ ${pow2_result} -lt ${ndelta} ] ; then
131 slab_size ${lg_p} ${lg_grp} ${lg_delta} ${ndelta}; pgs=${slab_size_pgs}
142 printf ' SC(%3d, %6d, %8d, %6d, %3s, %3s, %3d, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${psz} ${bin} ${pgs} ${lg_delta_lookup
    [all...]
sc.h 139 * lg_delta, and ndelta (i.e. number of deltas above the base) on a
141 * classes, size == (1 << lg_base) + ndelta * (1 << lg_delta).
144 * lg_delta is lg_base - SC_LG_NGROUP, and ndelta goes from 1 to SC_NGROUP.
148 * subsequent ones. ndelta is always 0.
151 * lg_base == LG_QUANTUM, lg_delta == LG_QUANTUM, and have ndelta range from 0
157 * lg_delta == LG_QUANTUM - 1, and ndelta == 1, keeping the rest of the
298 /* Delta multiplier. size == 1<<lg_base + ndelta<<lg_delta */
299 int ndelta; member in struct:sc_s
347 size_t reg_size_compute(int lg_base, int lg_delta, int ndelta);
  /src/external/bsd/jemalloc/dist/test/unit/
sc.c 9 + (ZU(tiny->ndelta) << tiny->lg_delta);
18 + (ZU(sc->ndelta) << sc->lg_delta);
sz.c 31 data.sc[base_ind].ndelta) < base_psz) {
36 data.sc[base_ind].lg_delta, data.sc[base_ind].ndelta),
48 gt_sc.ndelta),
53 le_sc.ndelta),
  /src/external/bsd/jemalloc.old/dist/test/unit/
sc.c 9 + (ZU(tiny->ndelta) << tiny->lg_delta);
18 + (ZU(sc->ndelta) << sc->lg_delta);
sz.c 31 data.sc[base_ind].ndelta) < base_psz) {
36 data.sc[base_ind].lg_delta, data.sc[base_ind].ndelta),
48 gt_sc.ndelta),
53 le_sc.ndelta),
  /src/usr.sbin/timed/timed/
correct.c 122 long ndelta; local
143 ndelta = (unsigned long)delta >> smoother;
148 ndelta |= mask;
153 delta, ndelta);
154 adj.tv_usec = ndelta;
  /src/external/bsd/jemalloc/dist/include/jemalloc/internal/
sc.h 139 * lg_delta, and ndelta (i.e. number of deltas above the base) on a
141 * classes, size == (1 << lg_base) + ndelta * (1 << lg_delta).
144 * lg_delta is lg_base - SC_LG_NGROUP, and ndelta goes from 1 to SC_NGROUP.
148 * subsequent ones. ndelta is always 0.
151 * lg_base == LG_QUANTUM, lg_delta == LG_QUANTUM, and have ndelta range from 0
157 * lg_delta == LG_QUANTUM - 1, and ndelta == 1, keeping the rest of the
298 /* Delta multiplier. size == 1<<lg_base + ndelta<<lg_delta */
299 int ndelta; member in struct:sc_s
347 size_t reg_size_compute(int lg_base, int lg_delta, int ndelta);
  /src/external/bsd/jemalloc/include/jemalloc/internal/
sc.h 139 * lg_delta, and ndelta (i.e. number of deltas above the base) on a
141 * classes, size == (1 << lg_base) + ndelta * (1 << lg_delta).
144 * lg_delta is lg_base - SC_LG_NGROUP, and ndelta goes from 1 to SC_NGROUP.
148 * subsequent ones. ndelta is always 0.
151 * lg_base == LG_QUANTUM, lg_delta == LG_QUANTUM, and have ndelta range from 0
157 * lg_delta == LG_QUANTUM - 1, and ndelta == 1, keeping the rest of the
298 /* Delta multiplier. size == 1<<lg_base + ndelta<<lg_delta */
299 int ndelta; member in struct:sc_s
347 size_t reg_size_compute(int lg_base, int lg_delta, int ndelta);
  /src/external/bsd/jemalloc.old/include/jemalloc/internal/
sc.h 139 * lg_delta, and ndelta (i.e. number of deltas above the base) on a
141 * classes, size == (1 << lg_base) + ndelta * (1 << lg_delta).
144 * lg_delta is lg_base - SC_LG_NGROUP, and ndelta goes from 1 to SC_NGROUP.
148 * subsequent ones. ndelta is always 0.
151 * lg_base == LG_QUANTUM, lg_delta == LG_QUANTUM, and have ndelta range from 0
157 * lg_delta == LG_QUANTUM - 1, and ndelta == 1, keeping the rest of the
298 /* Delta multiplier. size == 1<<lg_base + ndelta<<lg_delta */
299 int ndelta; member in struct:sc_s
347 size_t reg_size_compute(int lg_base, int lg_delta, int ndelta);

Completed in 23 milliseconds