HomeSort by: relevance | last modified time | path
    Searched defs:bucket (Results 1 - 15 of 15) sorted by relevancy

  /src/sys/dev/raidframe/
rf_debugMem.c 157 size_t bucket = (size_t)HASHADDR(addr); local in function:memory_hash_insert
163 for (p = mh_table[bucket]; p && (p->address != addr); p = p->next)
168 p->next = mh_table[bucket];
169 mh_table[bucket] = p;
189 size_t bucket = HASHADDR(addr); local in function:memory_hash_remove
193 for (p = mh_table[bucket]; p && (p->address != addr); p = p->next)
  /src/usr.sbin/bootp/common/
hash.c 101 * Frees an entire linked list of bucket members (used in the open
122 * memory and resets all bucket pointers to NULL.
205 * to determine the bucket number, and "compare" and "key" to determine
209 * already exists in the given bucket of the hash table, or some other error
331 unsigned bucket;
344 * until we find a new chain (non-empty bucket) or run out of buckets.
346 bucket = hashtable->bucketnum + 1;
347 while ((bucket < hashtable->size) &&
348 !(memberptr = (hashtable->table)[bucket])) {
349 bucket++
323 unsigned bucket; local in function:hash_NextEntry
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/
amdgpu_bo_list.c 204 /* This is based on the bucket sort with O(n) time complexity.
205 * An item with priority "i" is added to bucket[i]. The lists are then
208 struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS]; local in function:amdgpu_bo_list_get_list
213 INIT_LIST_HEAD(&bucket[i]);
225 list_add_tail(&e->tv.head, &bucket[priority]);
232 list_splice(&bucket[i], validated);
  /src/libexec/ld.elf_so/
symbol.c 333 Elf32_Word bucket; local in function:_rtld_symlook_obj_gnu
349 bucket = obj->buckets_gnu[fast_remainder32(hash, obj->nbuckets_gnu,
351 if (bucket == 0)
354 hashval = &obj->chains_gnu[bucket];
xmalloc.c 117 u_char ovu_index; /* bucket # */
150 static size_t pagebucket; /* page size bucket */
179 size_t bucket; local in function:imalloc
200 bucket = 0;
204 bucket++;
206 pagebucket = bucket;
217 bucket = 1;
219 amt = sizeof(union overhead); /* size of first bucket */
220 bucket = 0;
225 bucket = pagebucket
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/radeon/
radeon_cs.c 48 /* This is based on the bucket sort with O(n) time complexity.
49 * An item with priority "i" is added to bucket[i]. The lists are then
53 struct list_head bucket[RADEON_CS_NUM_BUCKETS]; member in struct:radeon_cs_buckets
61 INIT_LIST_HEAD(&b->bucket[i]);
72 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
82 list_splice(&b->bucket[i], out_list);
  /src/sys/net/npf/
lpm.c 88 lpm_ent_t ** bucket; member in struct:__anonb46cf3be0108
115 KASSERT(!hmap->bucket);
119 lpm_ent_t *entry = hmap->bucket[i];
133 kmem_free(hmap->bucket, hmap->hashsize * sizeof(lpm_ent_t *));
134 hmap->bucket = NULL;
172 lpm_ent_t **bucket; local in function:hashmap_rehash
178 bucket = kmem_zalloc(hashsize * sizeof(lpm_ent_t *), flags);
179 if (bucket == NULL)
182 lpm_ent_t *list = hmap->bucket[n];
190 entry->next = bucket[i]
    [all...]
  /src/lib/libbsdmalloc/
malloc.c 85 u_char ovu_index; /* bucket # */
117 static int pagebucket; /* page size bucket */
181 int bucket; local in function:malloc
204 bucket = 0;
208 bucket++;
210 pagebucket = bucket;
219 amt = 8; /* size of first bucket */
220 bucket = 0;
222 amt = 16; /* size of first bucket */
223 bucket = 1
    [all...]
  /src/lib/libc/db/hash/
hash.c 165 * maximum bucket number, so the number of buckets is
216 "BUCKET SIZE ", hashp->BSIZE,
217 "BUCKET SHIFT ", hashp->BSHIFT,
222 "MAX BUCKET ", hashp->MAX_BUCKET,
296 /* Fix bucket size to be optimal for file system */
596 /* Pin the bucket chain */
725 uint32_t bucket; local in function:hash_seq
747 for (bucket = hashp->cbucket;
748 bucket <= (uint32_t)hashp->MAX_BUCKET;
749 bucket++)
899 int n, bucket; local in function:__call_hash
    [all...]
  /src/sys/kern/
uipc_sem.c 333 u_long bucket = KSEM_PSHARED_HASH(id); local in function:ksem_lookup_pshared_locked
338 LIST_FOREACH(ksem, &ksem_pshared_hashtab[bucket], ks_entry) {
387 u_long bucket = KSEM_PSHARED_HASH(ksem->ks_pshared_id); local in function:ksem_alloc_pshared_id
388 LIST_INSERT_HEAD(&ksem_pshared_hashtab[bucket], ksem, ks_entry);
  /src/usr.sbin/lockstat/
main.c 73 SLIST_HEAD(bucket, lockstruct);
74 typedef struct bucket bucket_t;
78 SLIST_ENTRY(lockstruct) bucket;
134 static bucket_t bucket[256]; variable in typeref:typename:bucket_t[256]
136 #define HASH(a) (&bucket[((a) >> 6) & (__arraycount(bucket) - 1)])
621 for (i = 0; i < __arraycount(bucket); i++) {
622 SLIST_INIT(&bucket[i]);
638 SLIST_FOREACH(l, bp, bucket) {
655 SLIST_INSERT_HEAD(bp, l, bucket);
    [all...]
  /src/sys/arch/mips/rmi/
rmixl_fmn.c 58 * index CPU-dependent table by (global) bucket ID to obtain logical Station ID
526 * initialize bucket sizes and (minimum) credits for non-core stations to ZERO
537 for (u_int bucket=0; bucket < buckets_max; bucket++) { local in function:rmixl_fmn_config_noncore
550 * - initialize bucket sizes and (minimum) credits for a core
715 rmixl_fmn_intr_poll(u_int bucket, rmixl_fmn_rxmsg_t *rxmsg)
717 uint32_t bit = 1 << bucket;
720 KASSERT(bucket < 8);
726 if (rmixl_fmn_msg_recv(bucket, rxmsg) == 0
753 for (u_int bucket=0; bucket < 8; bucket++) { local in function:rmixl_fmn_intr_dispatch
    [all...]
  /src/sys/external/bsd/compiler_rt/dist/lib/interception/
interception_win.cc 350 for (size_t bucket = 0; bucket < kMaxTrampolineRegion; ++bucket) { local in function:__interception::TestOnlyReleaseTrampolineRegions
351 TrampolineMemoryRegion *current = &TrampolineRegions[bucket];
362 for (size_t bucket = 0; bucket < kMaxTrampolineRegion; ++bucket) { local in function:__interception::AllocateMemoryForTrampoline
363 TrampolineMemoryRegion* current = &TrampolineRegions[bucket];
  /src/sys/uvm/
uvm_page.c 160 * freelist locks - one per bucket.
392 /* We always start with only 1 bucket. */
798 * bucket for the page. For NUMA
864 * uvm_page_rebucket: Determine a bucket structure and redim the free
917 * Now tell each CPU which bucket to use. In the outer loop, scroll
926 * and assign the same bucket ID.
938 "%d package(s) per bucket\n", 1 << shift);
998 * specific freelist and specific bucket only.
1013 * Skip the bucket if empty, no lock needed. There could be many
1021 /* Skip bucket if low on memory. *
1417 int bucket, s; local in function:uvm_pagefree
    [all...]
uvm_pdpolicy_clockpro.c 351 struct bucket { struct
359 static struct bucket static_bucket;
360 static struct bucket *buckets = &static_bucket;
406 struct bucket *newbuckets;
407 struct bucket *oldbuckets;
419 struct bucket *b = &newbuckets[i];
439 static struct bucket *
449 nonresident_rotate(struct bucket *b)
480 struct bucket *b = nonresident_getbucket(obj, idx);
546 struct bucket *b = nonresident_getbucket(obj, idx)
    [all...]

Completed in 28 milliseconds