/src/sys/external/bsd/compiler_rt/dist/lib/asan/ |
asan_poisoning.h | 12 // Shadow memory poisoning by ASan RTL and by user application. 26 // Poisons the shadow memory for "size" bytes starting from "addr". 29 // Poisons the shadow memory for "redzone_size" bytes starting from 46 // for mapping shadow and zeroing out pages doesn't "just work", so we should 50 // TODO(mcgrathr): Fuchsia doesn't allow the shadow mapping to be 82 u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr); local in function:__asan::FastPoisonShadowPartialRightRedzone 83 for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) { 85 *shadow = 0; // fully addressable 87 *shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable 90 *shadow = poison_partial ? static_cast<u8>(size - i) : 0 [all...] |
asan_premap_shadow.cc | 12 // Reserve shadow memory with an ifunc resolver. 55 uptr shadow = reinterpret_cast<uptr>(&__asan_shadow); local in function:__asan::PremapShadowFailed 57 // shadow == resolver is how Android KitKat and older handles ifunc. 58 // shadow == 0 just in case. 59 if (shadow == 0 || shadow == resolver) 67 // The resolver may be called multiple times. Map the shadow just once. 74 // the shadow mapping.
|
asan_debugging.cc | 89 if (auto shadow = descr.AsShadow()) { local in function:__asan_locate_address 91 switch (shadow->kind) { 93 region_kind = "low shadow"; 96 region_kind = "shadow gap"; 99 region_kind = "high shadow";
|
asan_fake_stack.cc | 31 u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr)); local in function:__asan::SetShadow 35 shadow[i] = magic;
|
asan_descriptions.h | 90 static const char *const ShadowNames[] = {"low shadow", "shadow gap", 91 "high shadow"}; 171 // a shadow, global (variable), stack, or heap address. 193 ShadowAddressDescription shadow; member in union:__asan::AddressDescription::AddressDescriptionData::__anon1fc955ce010a 217 return data.shadow.addr; 233 return data.shadow.Print(); 248 return data.kind == kAddressKindShadow ? &data.shadow : nullptr;
|
asan_allocator.cc | 198 // Mark the corresponding shadow memory as not needed. 519 u8 *shadow = local in function:__asan::Allocator::Allocate 521 *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
|
/src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/bios/ |
nouveau_nvkm_subdev_bios_shadow.c | 35 struct shadow { struct 44 shadow_fetch(struct nvkm_bios *bios, struct shadow *mthd, u32 upto) 57 shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd) 115 shadow_method(struct nvkm_bios *bios, struct shadow *mthd, const char *name) 176 struct shadow mthds[] = {
|
/src/sys/external/bsd/compiler_rt/dist/lib/asan/tests/ |
asan_noinst_test.cc | 114 fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size); 117 uptr shadow = __asan::MemToShadow(ptr + i); local in function:PrintShadow 120 if (shadow != prev_shadow) { 121 prev_shadow = shadow; 122 fprintf(stderr, "%02x", (int)*(u8*)shadow); 224 // Check that __asan_region_is_poisoned works for shadow regions.
|
/src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/mxm/ |
nouveau_nvkm_subdev_mxm_base.c | 233 struct mxm_shadow_h *shadow = _mxm_shadow; local in function:mxm_shadow 235 nvkm_debug(&mxm->subdev, "checking %s\n", shadow->name); 236 if (shadow->exec(mxm, version)) { 242 } while ((++shadow)->name);
|
/src/usr.sbin/rpc.pcnfsd/ |
pcnfsd_misc.c | 49 #include <shadow.h> 113 ** Check the existence of SHADOW. If it is there, then we are 117 if (access(SHADOW, 0)) 118 shadowfile = 0; /* SHADOW is not there */ 124 (void) setspent(); /* Setting the shadow password file */ 144 * may have an 'x' in which case look in /etc/shadow .. 147 struct spwd *shadow = getspnam(usrnam); local in function:get_password 149 if (!shadow) 151 pswd = shadow->sp_pwdp;
|
/src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
amdgpu_object.h | 104 struct amdgpu_bo *shadow; member in struct:amdgpu_bo 287 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
|
amdgpu_device.c | 3843 struct amdgpu_bo *shadow; local in function:amdgpu_device_recover_vram 3851 DRM_INFO("recover vram bo from shadow start\n"); 3853 list_for_each_entry(shadow, &adev->shadow_list, shadow_list) { 3856 if (shadow->tbo.mem.mem_type != TTM_PL_TT || 3857 shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET || 3858 shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) 3861 r = amdgpu_bo_restore_shadow(shadow, &next); 3887 DRM_ERROR("recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo); 3891 DRM_INFO("recover vram bo from shadow done\n");
|
/src/sys/external/bsd/drm2/dist/drm/i915/gvt/ |
scheduler.h | 88 bool shadow; /* if workload has done shadow of guest request */ member in struct:intel_vgpu_workload 113 /* shadow batch buffer */
|
gvt.h | 154 struct intel_context *shadow[I915_NUM_ENGINES]; member in struct:intel_vgpu_submission 445 /* Macros for easily accessing vGPU virtual/shadow register.
|
/src/sys/arch/x68k/stand/libiocs/ |
iocs.h | 157 short shadow[16]; member in struct:iocs_patst
|
/src/sys/arch/alpha/include/ |
logout.h | 192 uint64_t shadow[8]; /* Shadow reg. 8-14, 25 */ member in struct:__anon326cdc600408
|
/src/sys/dist/pf/net/ |
pf_table.c | 1519 struct pfr_ktable *kt, *rt, *shadow, key; local in function:pfr_ina_define 1564 shadow = pfr_create_ktable(tbl, 0, 0); 1565 if (shadow == NULL) { 1575 if (pfr_lookup_addr(shadow, &ad, 1) != NULL) 1580 if (pfr_route_kentry(shadow, p)) { 1592 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? 1594 kt->pfrkt_shadow = shadow; 1596 pfr_clean_node_mask(shadow, &addrq); 1597 pfr_destroy_ktable(shadow, 0); 1607 pfr_destroy_ktable(shadow, 0) 1694 struct pfr_ktable *shadow = kt->pfrkt_shadow; local in function:pfr_commit_ktable [all...] |
/src/sys/external/bsd/drm2/dist/drm/ |
drm_fb_helper.c | 109 * It will automatically set up deferred I/O if the driver requires a shadow 146 * fbdev shadow buffer and call drm_fbdev_generic_setup() instead. 434 /* Generic fbdev uses a shadow buffer */ 2044 void *shadow = NULL; local in function:drm_fbdev_cleanup 2052 shadow = fbi->screen_buffer; 2058 vfree(shadow); 2273 * Drivers that set the dirty callback on their framebuffer will get a shadow 2275 * make deferred I/O work with all kinds of buffers. A shadow buffer can be
|
/src/sys/external/bsd/drm2/dist/drm/qxl/ |
qxl_drv.h | 93 struct qxl_bo *shadow; member in struct:qxl_bo
|
/src/sys/external/bsd/drm2/dist/drm/i915/gem/ |
i915_gem_execbuffer.c | 221 * copy the user's batchbuffer to a shadow (so that the user doesn't have 2007 struct i915_vma *shadow; member in struct:eb_parse_work 2021 pw->shadow, 2031 i915_active_release(&pw->shadow->active); 2042 struct i915_vma *shadow, 2056 err = i915_active_acquire(&shadow->active); 2072 pw->shadow = shadow; 2096 dma_resv_lock(shadow->resv, NULL); 2097 dma_resv_add_excl_fence(shadow->resv, &pw->base.dma) 2120 struct i915_vma *shadow, *trampoline; local in function:eb_parse [all...] |