/src/sys/external/bsd/drm2/dist/drm/amd/amdkfd/ |
soc15_int.h | 37 #define SOC15_CLIENT_ID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) & 0xff) 38 #define SOC15_SOURCE_ID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 8 & 0xff) 39 #define SOC15_RING_ID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 16 & 0xff) 40 #define SOC15_VMID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 24 & 0xf) 41 #define SOC15_VMID_TYPE_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 31 & 0x1 [all...] |
/src/lib/libc/stdlib/ |
cxa_thread_atexit.c | 53 struct cxa_dtor *entry; local in function:__cxa_thread_run_atexit 55 while ((entry = SLIST_FIRST(&cxa_dtors)) != NULL) { 57 (*entry->dtor)(entry->obj); 58 if (entry->dso_symbol) 59 __dl_cxa_refcount(entry->dso_symbol, -1); 60 free(entry); 74 struct cxa_dtor *entry; local in function:__cxa_thread_atexit_impl 78 entry = malloc(sizeof(*entry)); [all...] |
/src/sys/external/bsd/drm2/dist/drm/ |
drm_scatter.c | 60 static void drm_sg_cleanup(struct drm_sg_mem * entry) 65 for (i = 0; i < entry->pages; i++) { 66 page = entry->pagelist[i]; 71 vfree(entry->virtual); 73 kfree(entry->busaddr); 74 kfree(entry->pagelist); 75 kfree(entry); 96 struct drm_sg_mem *entry; local in function:drm_legacy_sg_alloc 110 entry = kzalloc(sizeof(*entry), GFP_KERNEL) 212 struct drm_sg_mem *entry; local in function:drm_legacy_sg_free [all...] |
/src/sys/external/bsd/drm/dist/bsd-core/ |
drm_linux_list.h | 62 list_add_tail(struct list_head *entry, struct list_head *head) { 63 (entry)->prev = (head)->prev; 64 (entry)->next = head; 65 (head)->prev->next = entry; 66 (head)->prev = entry; 70 list_del(struct list_head *entry) { 71 (entry)->next->prev = (entry)->prev; 72 (entry)->prev->next = (entry)->next [all...] |
drm_scatter.c | 47 struct drm_sg_mem *entry; local in function:drm_sg_alloc 58 entry = malloc(sizeof(*entry), DRM_MEM_SGLISTS, M_WAITOK | M_ZERO); 59 if (!entry) 65 entry->pages = pages; 67 entry->busaddr = malloc(pages * sizeof(*entry->busaddr), DRM_MEM_PAGES, 69 if (!entry->busaddr) { 70 free(entry, DRM_MEM_SGLISTS); 78 free(entry->busaddr, DRM_MEM_PAGES) 222 struct drm_sg_mem *entry = arg; local in function:drm_sg_alloc_cb 270 struct drm_sg_mem *entry; local in function:drm_sg_free [all...] |
/src/tests/include/sys/ |
t_pslist.c | 59 struct pslist_entry entry; member in struct:ATF_TC_BODY::element 61 { .i = 0, .entry = PSLIST_ENTRY_INITIALIZER }, 78 PSLIST_ENTRY_DESTROY(&elements[0], entry); 81 PSLIST_ENTRY_INIT(&elements[i], entry); 83 PSLIST_WRITER_INSERT_HEAD(&h, &elements[4], entry); 84 PSLIST_WRITER_INSERT_BEFORE(&elements[4], &elements[2], entry); 85 PSLIST_WRITER_INSERT_BEFORE(&elements[4], &elements[3], entry); 86 PSLIST_WRITER_INSERT_BEFORE(&elements[2], &elements[1], entry); 87 PSLIST_WRITER_INSERT_HEAD(&h, &elements[0], entry); 88 PSLIST_WRITER_INSERT_AFTER(&elements[4], &elements[5], entry); [all...] |
/src/sys/uvm/ |
uvm_coredump.c | 83 * entry being processed is deleted (dsl). 93 struct vm_map_entry *entry; local in function:uvm_coredump_walkmap 96 entry = NULL; 100 if (entry == NULL) 101 entry = map->header.next; 102 else if (!uvm_map_lookup_entry(map, state.end, &entry)) 103 entry = entry->next; 104 if (entry == &map->header) 108 if (state.end > entry->start) [all...] |
uvm_map.c | 196 * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging 212 * uvm_map_entry_link: insert entry into a map 216 #define uvm_map_entry_link(map, after_where, entry) do { \ 217 uvm_mapent_check(entry); \ 219 (entry)->prev = (after_where); \ 220 (entry)->next = (after_where)->next; \ 221 (entry)->prev->next = (entry); \ 222 (entry)->next->prev = (entry); \ 1886 struct vm_map_entry *entry = NULL; local in function:uvm_map_findspace 2319 struct vm_map_entry *entry, *first_entry, *next; local in function:uvm_unmap_remove 2742 struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry, local in function:uvm_map_extract 3102 struct vm_map_entry *entry; local in function:uvm_map_submap 3164 struct vm_map_entry *current, *entry; local in function:uvm_map_protect 3325 struct vm_map_entry *entry, *temp_entry; local in function:uvm_map_inherit 3368 struct vm_map_entry *entry, *temp_entry; local in function:uvm_map_advice 3417 struct vm_map_entry *entry; local in function:uvm_map_willneed 3480 struct vm_map_entry *entry, *start_entry, *failed_entry; local in function:uvm_map_pageable 3735 struct vm_map_entry *entry, *failed_entry; local in function:uvm_map_pageable_all 3966 struct vm_map_entry *current, *entry; local in function:uvm_map_clean 4134 struct vm_map_entry *entry; local in function:uvm_map_checkprot 4957 struct vm_map_entry *entry; local in function:uvm_voaddr_acquire 5219 struct vm_map_entry *entry; local in function:uvm_map_printit 5254 struct vm_map_entry *entry; local in function:uvm_whatis 5377 struct vm_map_entry *entry; local in function:fill_vmentries [all...] |
/src/sys/external/isc/atheros_hal/dist/ar5416/ |
ar5416_keycache.c | 36 * Clear the specified key cache entry and any associated MIC entry. 39 ar5416ResetKeyCacheEntry(struct ath_hal *ah, uint16_t entry) 43 if (ar5212ResetKeyCacheEntry(ah, entry)) { 44 ahp->ah_keytype[entry] = keyType[HAL_CIPHER_CLR]; 51 * Sets the contents of the specified key cache entry 52 * and any associated MIC entry. 55 ar5416SetKeyCacheEntry(struct ath_hal *ah, uint16_t entry, 61 if (ar5212SetKeyCacheEntry(ah, entry, k, mac, xorKey)) { 62 ahp->ah_keytype[entry] = keyType[k->kv_type] [all...] |
/src/sys/external/bsd/drm2/dist/drm/amd/powerplay/hwmgr/ |
amdgpu_common_baco.c | 88 const struct baco_cmd_entry *entry, 94 if ((entry[i].cmd == CMD_WRITE) || 95 (entry[i].cmd == CMD_READMODIFYWRITE) || 96 (entry[i].cmd == CMD_WAITFOR)) 97 reg = entry[i].reg_offset; 98 if (!baco_cmd_handler(hwmgr, entry[i].cmd, reg, entry[i].mask, 99 entry[i].shift, entry[i].val, entry[i].timeout) [all...] |
/src/lib/libc/posix1e/ |
acl_from_text_nfs4.c | 55 * Parse the tag field of ACL entry passed as "str". If qualifier 60 parse_tag(const char *str, acl_entry_t entry, int *need_qualifier) 67 return (acl_set_tag_type(entry, ACL_USER_OBJ)); 69 return (acl_set_tag_type(entry, ACL_GROUP_OBJ)); 71 return (acl_set_tag_type(entry, ACL_EVERYONE)); 76 return (acl_set_tag_type(entry, ACL_USER)); 78 return (acl_set_tag_type(entry, ACL_GROUP)); 86 * Parse the qualifier field of ACL entry passed as "str". 92 parse_qualifier(char *str, acl_entry_t entry, int *need_qualifier) 109 error = acl_get_tag_type(entry, &tag) 212 acl_entry_t entry; local in function:_nfs4_acl_entry_from_text [all...] |
/src/sys/miscfs/procfs/ |
procfs_map.c | 114 struct vm_map_entry *entry; local in function:procfs_domap 145 for (entry = map->header.next; entry != &map->header; 146 entry = entry->next) { 148 if (UVM_ET_ISSUBMAP(entry)) 155 if (UVM_ET_ISOBJ(entry) && 156 UVM_OBJ_IS_VNODE(entry->object.uvm_obj)) { 157 vp = (struct vnode *)entry->object.uvm_obj; 171 width, entry->start [all...] |
/src/sys/external/bsd/drm2/dist/drm/amd/powerplay/smumgr/ |
amdgpu_vega12_smumgr.c | 54 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, 56 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, 60 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, 64 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0, 76 memcpy(table, priv->smu_tables.entry[table_id].table, 77 priv->smu_tables.entry[table_id].size); 96 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, 98 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, 101 memcpy(priv->smu_tables.entry[table_id].table, table, 102 priv->smu_tables.entry[table_id].size) [all...] |
amdgpu_vega10_smumgr.c | 51 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, 53 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, 57 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 60 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)); 63 priv->smu_tables.entry[table_id].table_id); 68 memcpy(table, priv->smu_tables.entry[table_id].table, 69 priv->smu_tables.entry[table_id].size); 88 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, 90 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, 93 memcpy(priv->smu_tables.entry[table_id].table, table [all...] |
amdgpu_vega20_smumgr.c | 176 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, 178 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, 183 upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, 188 lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, 199 memcpy(table, priv->smu_tables.entry[table_id].table, 200 priv->smu_tables.entry[table_id].size); 220 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, 222 PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, 225 memcpy(priv->smu_tables.entry[table_id].table, table, 226 priv->smu_tables.entry[table_id].size) [all...] |
/src/sys/arch/bebox/stand/boot/ |
cpu.c | 84 runCPU1(void *entry) 99 PEF_vector[0] = (long)entry; 100 PEF_vector[1] = (long)entry; 101 PEF_vector[2] = (long)entry; 102 PEF_vector[3] = (long)entry; 103 PEF_vector[0] = (long)entry; 104 PEF_vector2[0] = (long)entry;
|
/src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/bios/ |
nouveau_nvkm_subdev_bios_bit.c | 37 u32 entry = bios->bit_offset + 12; local in function:bit_entry 39 if (nvbios_rd08(bios, entry + 0) == id) { 40 bit->id = nvbios_rd08(bios, entry + 0); 41 bit->version = nvbios_rd08(bios, entry + 1); 42 bit->length = nvbios_rd16(bios, entry + 2); 43 bit->offset = nvbios_rd16(bios, entry + 4); 47 entry += nvbios_rd08(bios, bios->bit_offset + 9);
|
/src/sys/external/isc/atheros_hal/dist/ar5211/ |
ar5211_keycache.c | 44 * Return true if the specific key cache entry is valid. 47 ar5211IsKeyCacheEntryValid(struct ath_hal *ah, uint16_t entry) 49 if (entry < AR_KEYTABLE_SIZE) { 50 uint32_t val = OS_REG_READ(ah, AR_KEYTABLE_MAC1(entry)); 58 * Clear the specified key cache entry 61 ar5211ResetKeyCacheEntry(struct ath_hal *ah, uint16_t entry) 63 if (entry < AR_KEYTABLE_SIZE) { 64 OS_REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0); 65 OS_REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0); 66 OS_REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0) [all...] |
/src/sys/external/isc/atheros_hal/dist/ar5212/ |
ar5212_keycache.c | 49 * Return true if the specific key cache entry is valid. 52 ar5212IsKeyCacheEntryValid(struct ath_hal *ah, uint16_t entry) 54 if (entry < AH_PRIVATE(ah)->ah_caps.halKeyCacheSize) { 55 uint32_t val = OS_REG_READ(ah, AR_KEYTABLE_MAC1(entry)); 63 * Clear the specified key cache entry and any associated MIC entry. 66 ar5212ResetKeyCacheEntry(struct ath_hal *ah, uint16_t entry) 70 if (entry >= AH_PRIVATE(ah)->ah_caps.halKeyCacheSize) { 71 HALDEBUG(ah, HAL_DEBUG_ANY, "%s: entry %u out of range\n", 72 __func__, entry); [all...] |
/src/sbin/restore/ |
extern.h | 34 struct entry *addentry(const char *, ino_t, int); 37 void badentry(struct entry *, const char *); 47 void delwhiteout(struct entry *); 55 char *flagvalues(struct entry *); 56 void freeentry(struct entry *); 59 char *gentempname(struct entry *); 66 struct entry *lookupino(ino_t); 67 struct entry *lookupname(const char *); 70 void mktempname(struct entry *); 71 void moveentry(struct entry *, const char *) [all...] |
/src/sys/external/bsd/drm2/dist/drm/vmwgfx/ |
vmwgfx_cmdbuf_res.c | 39 * struct vmw_cmdbuf_res - Command buffer managed resource entry. 42 * @hash: Hash entry for the manager hash table. 45 * @state: Staging state of this resource entry. 46 * @man: Pointer to a resource manager for this entry. 104 * @entry: Pointer to a struct vmw_cmdbuf_res. 106 * Frees a struct vmw_cmdbuf_res entry and drops its reference to the 110 struct vmw_cmdbuf_res *entry) 112 list_del(&entry->head); 113 WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash)); 114 vmw_resource_unreference(&entry->res) 130 struct vmw_cmdbuf_res *entry, *next; local in function:vmw_cmdbuf_res_commit 167 struct vmw_cmdbuf_res *entry, *next; local in function:vmw_cmdbuf_res_revert 253 struct vmw_cmdbuf_res *entry; local in function:vmw_cmdbuf_res_remove 325 struct vmw_cmdbuf_res *entry, *next; local in function:vmw_cmdbuf_res_man_destroy [all...] |
/src/sys/external/isc/atheros_hal/dist/ar5210/ |
ar5210_keycache.c | 43 ar5210IsKeyCacheEntryValid(struct ath_hal *ah, uint16_t entry) 45 if (entry < AR_KEYTABLE_SIZE) { 46 uint32_t val = OS_REG_READ(ah, AR_KEYTABLE_MAC1(entry)); 54 * Clear the specified key cache entry. 57 ar5210ResetKeyCacheEntry(struct ath_hal *ah, uint16_t entry) 59 if (entry < AR_KEYTABLE_SIZE) { 60 OS_REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0); 61 OS_REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0); 62 OS_REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0); 63 OS_REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0) [all...] |
/src/sys/external/bsd/drm2/dist/drm/ttm/ |
ttm_execbuf_util.c | 42 struct ttm_validate_buffer *entry) 44 list_for_each_entry_continue_reverse(entry, list, head) { 45 struct ttm_buffer_object *bo = entry->bo; 54 struct ttm_validate_buffer *entry; local in function:ttm_eu_backoff_reservation 60 list_for_each_entry(entry, list, head) { 61 struct ttm_buffer_object *bo = entry->bo; 89 struct ttm_validate_buffer *entry; local in function:ttm_eu_reserve_buffers 98 list_for_each_entry(entry, list, head) { 99 struct ttm_buffer_object *bo = entry->bo; 103 struct ttm_validate_buffer *safe = entry; 165 struct ttm_validate_buffer *entry; local in function:ttm_eu_fence_buffer_objects [all...] |
/src/sys/net/npf/ |
lpm.c | 119 lpm_ent_t *entry = hmap->bucket[i]; local in function:lpm_clear 121 while (entry) { 122 lpm_ent_t *next = entry->next; 125 dtor(arg, entry->key, 126 entry->len, entry->val); 128 kmem_free(entry, 129 offsetof(lpm_ent_t, key[entry->len])); 130 entry = next; 185 lpm_ent_t *entry = list local in function:hashmap_rehash 207 lpm_ent_t *entry; local in function:hashmap_insert 239 lpm_ent_t *entry; local in function:hashmap_lookup 260 lpm_ent_t *prev = NULL, *entry; local in function:hashmap_remove 325 lpm_ent_t *entry; local in function:lpm_insert 380 lpm_ent_t *entry; local in function:lpm_lookup 403 lpm_ent_t *entry; local in function:lpm_lookup_prefix [all...] |
/src/sys/sys/ |
pslist.h | 79 pslist_entry_init(struct pslist_entry *entry) 82 entry->ple_next = NULL; 83 entry->ple_prevp = NULL; 87 pslist_entry_destroy(struct pslist_entry *entry) 90 _PSLIST_ASSERT(entry->ple_prevp == NULL); 93 * Poison the next entry. If we used NULL here, then readers 97 atomic_store_relaxed(&entry->ple_next, _PSLIST_POISON); 104 * Writes to initialize a new entry must precede its publication by 129 pslist_writer_insert_before(struct pslist_entry *entry, 133 _PSLIST_ASSERT(entry->ple_next != _PSLIST_POISON) [all...] |