Lines Matching refs:mem

63 add_gtt_bo_map(struct aub_mem *mem, struct gen_batch_decode_bo bo, bool ppgtt, bool unmap_after_use)
70 list_add(&m->link, &mem->maps);
74 aub_mem_clear_bo_maps(struct aub_mem *mem)
76 list_for_each_entry_safe(struct bo_map, i, &mem->maps, link) {
113 ensure_ggtt_entry(struct aub_mem *mem, uint64_t virt_addr)
115 struct rb_node *node = rb_tree_search_sloppy(&mem->ggtt, &virt_addr,
121 rb_tree_insert_at(&mem->ggtt, node, &new_entry->node, cmp > 0);
129 search_ggtt_entry(struct aub_mem *mem, uint64_t virt_addr)
133 struct rb_node *node = rb_tree_search(&mem->ggtt, &virt_addr, cmp_ggtt_entry);
144 struct phys_mem *mem = rb_node_data(struct phys_mem, node, node);
145 return cmp_uint64(mem->phys_addr, *(uint64_t *)addr);
149 ensure_phys_mem(struct aub_mem *mem, uint64_t phys_addr)
151 struct rb_node *node = rb_tree_search_sloppy(&mem->mem, &phys_addr, cmp_phys_mem);
156 new_mem->fd_offset = mem->mem_fd_len;
158 MAYBE_UNUSED int ftruncate_res = ftruncate(mem->mem_fd, mem->mem_fd_len += 4096);
162 mem->mem_fd, new_mem->fd_offset);
165 rb_tree_insert_at(&mem->mem, node, &new_mem->node, cmp > 0);
173 search_phys_mem(struct aub_mem *mem, uint64_t phys_addr)
177 struct rb_node *node = rb_tree_search(&mem->mem, &phys_addr, cmp_phys_mem);
189 struct aub_mem *mem = _mem;
195 add_gtt_bo_map(mem, bo, false, false);
202 struct aub_mem *mem = _mem;
209 struct ggtt_entry *pt = ensure_ggtt_entry(mem, virt_addr);
218 struct aub_mem *mem = _mem;
221 struct phys_mem *pmem = ensure_phys_mem(mem, page);
235 struct aub_mem *mem = _mem;
238 struct ggtt_entry *entry = search_ggtt_entry(mem, page);
246 aub_mem_phys_write(mem, phys_page + offset, data, size_this_page);
254 struct aub_mem *mem = _mem;
257 list_for_each_entry(struct bo_map, i, &mem->maps, link)
264 (struct ggtt_entry *)rb_tree_search_sloppy(&mem->ggtt, &address,
286 struct phys_mem *phys_mem = search_phys_mem(mem, phys_addr);
294 MAP_SHARED | MAP_FIXED, mem->mem_fd, phys_mem->fd_offset);
298 add_gtt_bo_map(mem, bo, false, true);
304 ppgtt_walk(struct aub_mem *mem, uint64_t pml4, uint64_t address)
309 struct phys_mem *table = search_phys_mem(mem, addr);
319 return search_phys_mem(mem, addr);
323 ppgtt_mapped(struct aub_mem *mem, uint64_t pml4, uint64_t address)
325 return ppgtt_walk(mem, pml4, address) != NULL;
331 struct aub_mem *mem = _mem;
334 list_for_each_entry(struct bo_map, i, &mem->maps, link)
340 if (!ppgtt_mapped(mem, mem->pml4, address))
347 while (ppgtt_mapped(mem, mem->pml4, end))
356 struct phys_mem *phys_mem = ppgtt_walk(mem, mem->pml4, page);
360 MAP_SHARED | MAP_FIXED, mem->mem_fd, phys_mem->fd_offset);
364 add_gtt_bo_map(mem, bo, true, true);
370 aub_mem_init(struct aub_mem *mem)
372 memset(mem, 0, sizeof(*mem));
374 list_inithead(&mem->maps);
376 mem->mem_fd = memfd_create("phys memory", 0);
378 return mem->mem_fd != -1;
382 aub_mem_fini(struct aub_mem *mem)
384 if (mem->mem_fd == -1)
387 aub_mem_clear_bo_maps(mem);
390 rb_tree_foreach_safe(struct ggtt_entry, entry, &mem->ggtt, node) {
391 rb_tree_remove(&mem->ggtt, &entry->node);
394 rb_tree_foreach_safe(struct phys_mem, entry, &mem->mem, node) {
395 rb_tree_remove(&mem->mem, &entry->node);
399 close(mem->mem_fd);
400 mem->mem_fd = -1;
404 aub_mem_get_phys_addr_data(struct aub_mem *mem, uint64_t phys_addr)
406 struct phys_mem *page = search_phys_mem(mem, phys_addr);
413 aub_mem_get_ppgtt_addr_data(struct aub_mem *mem, uint64_t virt_addr)
415 struct phys_mem *page = ppgtt_walk(mem, mem->pml4, virt_addr);
422 aub_mem_get_ppgtt_addr_aub_data(struct aub_mem *mem, uint64_t virt_addr)
424 struct phys_mem *page = ppgtt_walk(mem, mem->pml4, virt_addr);