HomeSort by: relevance | last modified time | path
    Searched defs:vaddr (Results 1 - 25 of 369) sorted by relevancy

1 2 3 4 5 6 7 8 91011>>

  /src/sys/dev/qbus/
if_uba.c 80 void *vaddr; local in function:if_ubaminit
97 if ((error = bus_dmamem_map(uh->uh_dmat, &seg, rseg, totsz, &vaddr,
143 ifw[i].ifw_vaddr = (char *)vaddr + size * i;
if_uba.c 80 void *vaddr; local in function:if_ubaminit
97 if ((error = bus_dmamem_map(uh->uh_dmat, &seg, rseg, totsz, &vaddr,
143 ifw[i].ifw_vaddr = (char *)vaddr + size * i;
if_uba.c 80 void *vaddr; local in function:if_ubaminit
97 if ((error = bus_dmamem_map(uh->uh_dmat, &seg, rseg, totsz, &vaddr,
143 ifw[i].ifw_vaddr = (char *)vaddr + size * i;
  /src/sys/external/bsd/drm2/dist/drm/i915/gem/selftests/
i915_gem_client_blt.c 27 u32 *vaddr; local in function:__igt_client_fill
52 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
53 if (IS_ERR(vaddr)) {
54 err = PTR_ERR(vaddr);
68 memset32(vaddr, val ^ 0xdeadbeaf,
87 if (vaddr[i] != val) {
88 pr_err("vaddr[%u]=%x, expected=%x\n", i,
89 vaddr[i], val);
i915_gem_client_blt.c 27 u32 *vaddr; local in function:__igt_client_fill
52 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
53 if (IS_ERR(vaddr)) {
54 err = PTR_ERR(vaddr);
68 memset32(vaddr, val ^ 0xdeadbeaf,
87 if (vaddr[i] != val) {
88 pr_err("vaddr[%u]=%x, expected=%x\n", i,
89 vaddr[i], val);
i915_gem_client_blt.c 27 u32 *vaddr; local in function:__igt_client_fill
52 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
53 if (IS_ERR(vaddr)) {
54 err = PTR_ERR(vaddr);
68 memset32(vaddr, val ^ 0xdeadbeaf,
87 if (vaddr[i] != val) {
88 pr_err("vaddr[%u]=%x, expected=%x\n", i,
89 vaddr[i], val);
  /src/sys/external/bsd/drm2/dist/drm/i915/gt/
intel_ring_types.h 30 void *vaddr; member in struct:intel_ring
intel_ring_types.h 30 void *vaddr; member in struct:intel_ring
intel_ring_types.h 30 void *vaddr; member in struct:intel_ring
  /src/sys/external/bsd/drm2/include/drm/
drm_gem_cma_helper.h 57 void *vaddr; member in struct:drm_gem_cma_object
drm_gem_cma_helper.h 57 void *vaddr; member in struct:drm_gem_cma_object
drm_gem_cma_helper.h 57 void *vaddr; member in struct:drm_gem_cma_object
  /src/sys/arch/arc/arc/
bus_space_sparse.c 92 vaddr_t vaddr = uvm_km_alloc(kernel_map, (vsize_t)(end - start), local in function:arc_sparse_bus_space_compose_handle
95 if (vaddr == 0)
99 for (vaddr_t va = vaddr; start < end;
105 vaddr += (offset & PGOFSET);
106 *bshp = vaddr;
bus_space_sparse.c 92 vaddr_t vaddr = uvm_km_alloc(kernel_map, (vsize_t)(end - start), local in function:arc_sparse_bus_space_compose_handle
95 if (vaddr == 0)
99 for (vaddr_t va = vaddr; start < end;
105 vaddr += (offset & PGOFSET);
106 *bshp = vaddr;
bus_space_sparse.c 92 vaddr_t vaddr = uvm_km_alloc(kernel_map, (vsize_t)(end - start), local in function:arc_sparse_bus_space_compose_handle
95 if (vaddr == 0)
99 for (vaddr_t va = vaddr; start < end;
105 vaddr += (offset & PGOFSET);
106 *bshp = vaddr;
  /src/sys/arch/xen/include/
xenio_gntdev.h 83 * to @vaddr. This can be used to perform a munmap(), followed by an
86 * @vaddr is returned in @count.
89 * supplied @vaddr must correspond to the start of the range; otherwise
98 uint64_t vaddr; member in struct:ioctl_gntdev_get_offset_for_vaddr
102 /* The number of pages mapped in the VM area that begins at @vaddr. */
xenio_gntdev.h 83 * to @vaddr. This can be used to perform a munmap(), followed by an
86 * @vaddr is returned in @count.
89 * supplied @vaddr must correspond to the start of the range; otherwise
98 uint64_t vaddr; member in struct:ioctl_gntdev_get_offset_for_vaddr
102 /* The number of pages mapped in the VM area that begins at @vaddr. */
  /src/sys/external/bsd/drm2/dist/drm/i915/gt/uc/
intel_huc.c 68 void *vaddr; local in function:intel_huc_rsa_data_create
90 vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
91 if (IS_ERR(vaddr)) {
93 return PTR_ERR(vaddr);
96 copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size);
intel_huc.c 68 void *vaddr; local in function:intel_huc_rsa_data_create
90 vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
91 if (IS_ERR(vaddr)) {
93 return PTR_ERR(vaddr);
96 copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size);
  /src/sys/external/bsd/drm2/dist/drm/i915/selftests/
igt_spinner.c 20 void *vaddr; local in function:igt_spinner_init
39 vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
40 if (IS_ERR(vaddr)) {
41 err = PTR_ERR(vaddr);
44 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
47 vaddr = i915_gem_object_pin_map(spin->obj, mode);
48 if (IS_ERR(vaddr)) {
49 err = PTR_ERR(vaddr);
52 spin->batch = vaddr;
igt_spinner.c 20 void *vaddr; local in function:igt_spinner_init
39 vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
40 if (IS_ERR(vaddr)) {
41 err = PTR_ERR(vaddr);
44 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
47 vaddr = i915_gem_object_pin_map(spin->obj, mode);
48 if (IS_ERR(vaddr)) {
49 err = PTR_ERR(vaddr);
52 spin->batch = vaddr;
  /src/sys/external/bsd/drm2/drm/
drm_cache.c 81 drm_clflush_virt_range(void *vaddr, unsigned long nbytes)
86 drm_md_clflush_virt_range(vaddr, nbytes);
132 void *const vaddr = kmap_atomic(page); local in function:drm_md_clflush_page
134 drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
136 kunmap_atomic(vaddr);
143 const vaddr_t vaddr = (vaddr_t)ptr; local in function:drm_md_clflush_virt_range
144 const vaddr_t start = rounddown(vaddr, clflush_size);
145 const vaddr_t end = roundup(vaddr + nbytes, clflush_size);
192 void *const vaddr = kmap_atomic(page);
194 cache_flush(vaddr, PAGE_SIZE)
239 void *const vaddr = kmap_atomic(page); local in function:drm_md_clflush_page
    [all...]
drm_cache.c 81 drm_clflush_virt_range(void *vaddr, unsigned long nbytes)
86 drm_md_clflush_virt_range(vaddr, nbytes);
132 void *const vaddr = kmap_atomic(page); local in function:drm_md_clflush_page
134 drm_md_clflush_virt_range(vaddr, PAGE_SIZE);
136 kunmap_atomic(vaddr);
143 const vaddr_t vaddr = (vaddr_t)ptr; local in function:drm_md_clflush_virt_range
144 const vaddr_t start = rounddown(vaddr, clflush_size);
145 const vaddr_t end = roundup(vaddr + nbytes, clflush_size);
192 void *const vaddr = kmap_atomic(page);
194 cache_flush(vaddr, PAGE_SIZE)
239 void *const vaddr = kmap_atomic(page); local in function:drm_md_clflush_page
    [all...]
  /src/lib/libkvm/
kvm_sparc64.c 152 u_long vaddr; local in function:_kvm_kvatop
154 vaddr = va - cpup->ktextbase;
155 *pa = cpup->ktextp + vaddr;
156 return (int)(cpup->ktextsz - vaddr);
160 u_long vaddr; local in function:_kvm_kvatop
162 vaddr = va - cpup->kdatabase;
163 *pa = cpup->kdatap + vaddr;
164 return (int)(cpup->kdatasz - vaddr);
  /src/sys/arch/vax/uba/
uba_mainbus.c 119 vaddr_t vaddr; local in function:qba_attach
152 vaddr = vax_map_physmem(paddr, 1);
153 if (badaddr((void *)vaddr, 2) == 0) {
163 vax_unmap_physmem(vaddr, 1);

Completed in 43 milliseconds

1 2 3 4 5 6 7 8 91011>>