| /src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/mmu/ | 
| priv.h | 60 		struct nvkm_mmu_ptp *ptp;  member in union:nvkm_mmu_pt::__anon502b8078040a 
 | 
| nouveau_nvkm_subdev_mmu_base.c | 51 	const int slot = pt->base >> pt->ptp->shift; 52 	struct nvkm_mmu_ptp *ptp = pt->ptp;  local in function:nvkm_mmu_ptp_put
 55 	 * there will be now, so return PTP to the cache.
 57 	if (!ptp->free)
 58 		list_add(&ptp->head, &mmu->ptp.list);
 59 	ptp->free |= BIT(slot);
 61 	/* If there's no more sub-allocations, destroy PTP. */
 62 	if (ptp->free == ptp->mask)
 75  struct nvkm_mmu_ptp *ptp;  local in function:nvkm_mmu_ptp_get
 [all...]
 | 
| /src/sys/external/bsd/drm2/dist/drm/nouveau/include/nvkm/subdev/ | 
| mmu.h | 131 	} ptc, ptp;  member in struct:nvkm_mmu 
 | 
| /src/sys/arch/xen/x86/ | 
| x86_xpmap.c | 172 	pt_entry_t *ptp;  local in function:xen_set_ldt 183 		ptp = kvtopte(va);
 184 		pmap_pte_clearbits(ptp, PTE_W);
 
 | 
| /src/sys/arch/mips/ralink/ | 
| ralink_gpio.c | 265 #define RA_GPIO_PIN_INIT(sc, var, pin, ptp, regname)			\ 267 		const u_int _reg_bit = 1 << (pin - ptp->pin_reg_base);	\
 268 		const u_int _mask_bit = 1 << (pin - ptp->pin_mask_base);\
 269 		var = gp_read(sc, ptp->regname.reg);			\
 270 		if ((ptp->regname.mask & _mask_bit) != 0) {		\
 275 		gp_write(sc, ptp->regname.reg, var);			\
 278 #define RA_GPIO_PIN_INIT_DIR(sc, var, pin, ptp)				\
 280 		const u_int _reg_bit = 1 << (pin - ptp->pin_reg_base);	\
 281 		const u_int _mask_bit = 1 << (pin - ptp->pin_mask_base);\
 282 		var = gp_read(sc, ptp->pin_dir.reg);
 807  const pin_tab_t * const ptp = &pin_tab[index];  local in function:ra_gpio_pin_init
 908  const pin_tab_t * const ptp = &pin_tab[index];  local in function:ra_gpio_pin_read
 1008  const pin_tab_t * const ptp = &pin_tab[index];  local in function:ra_gpio_pin_write
 1439  const pin_tab_t * const ptp = &pin_tab[index];  local in function:disable_gpio_interrupt
 1463  const pin_tab_t * const ptp = &pin_tab[index];  local in function:enable_gpio_interrupt
 [all...]
 | 
| /src/sys/arch/sh3/sh3/ | 
| pmap.c | 184 			pt_entry_t *ptp = (pt_entry_t *)  local in function:pmap_growkernel 186 			if (ptp == NULL)
 188 			__pmap_kernel.pm_ptp[i] = ptp;
 189 			memset(ptp, 0, PAGE_SIZE);
 967 	pt_entry_t *ptp, *pte;  local in function:__pmap_pte_alloc
 977 	ptp = (pt_entry_t *)SH3_PHYS_TO_P1SEG(VM_PAGE_TO_PHYS(pg));
 978 	pmap->pm_ptp[__PMAP_PTP_INDEX(va)] = ptp;
 980 	return ptp + __PMAP_PTP_OFSET(va);
 990 	pt_entry_t *ptp;  local in function:__pmap_pte_lookup
 996 	ptp = pmap->pm_ptp[__PMAP_PTP_INDEX(va)]
 1010  pt_entry_t *ptp;  local in function:__pmap_kpte_lookup
 [all...]
 | 
| /src/sys/arch/hppa/hppa/ | 
| pmap.c | 334 pmap_pde_set(pmap_t pm, vaddr_t va, paddr_t ptp) 337 	UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx ptp %#jx", (uintptr_t)pm,
 338 	    va, ptp, 0);
 340 	KASSERT((ptp & PGOFSET) == 0);
 342 	pm->pm_pdir[va >> 22] = ptp;
 371 	pm->pm_stats.resident_count++;	/* count PTP as resident */
 397 pmap_pde_release(pmap_t pmap, vaddr_t va, struct vm_page *ptp)
 400 	UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx ptp %#jx", (uintptr_t)pmap,
 401 	    va, (uintptr_t)ptp, 0);
 404 	if (--ptp->wire_count <= 1)
 1493  struct vm_page *pg, *ptp;  local in function:pmap_remove
 [all...]
 | 
| /src/sys/arch/vax/vax/ | 
| pmap.c | 992 	int *ptp, opte;  local in function:pmap_kenter_pa 994 	ptp = (int *)kvtopte(va);
 995 	PMDEBUG(("pmap_kenter_pa: va: %lx, pa %lx, prot %x ptp %p\n",
 996 	    va, pa, prot, ptp));
 997 	opte = ptp[0];
 998 	ptp[0] = PG_V | ((prot & VM_PROT_WRITE)? PG_KW : PG_KR) |
 1000 	ptp[1] = ptp[0] + 1;
 1001 	ptp[2] = ptp[0] + 2
 [all...]
 | 
| /src/sys/arch/powerpc/ibm4xx/ | 
| pmap.c | 985 	volatile u_int *ptp;  local in function:pmap_remove 991 		if ((ptp = pte_find(pm, va)) && (pa = *ptp)) {
 994 			*ptp = 0;
 1033 	volatile u_int *ptp;  local in function:pmap_protect
 1051 		if ((ptp = pte_find(pm, sva)) != NULL) {
 1052 			*ptp &= ~bic;
 
 | 
| /src/sys/arch/sparc64/sparc64/ | 
| pmap.c | 403 pseg_set_locksafe(struct pmap *pm, vaddr_t va, int64_t data, paddr_t ptp) 410 	rv = pseg_set_real(pm, va, data, ptp);
 417 #define pseg_set(pm, va, data, ptp)	pseg_set_locksafe(pm, va, data, ptp)
 422 #define pseg_set(pm, va, data, ptp)	pseg_set_real(pm, va, data, ptp)
 1633 	paddr_t ptp;  local in function:pmap_kenter_pa
 1659 	ptp = 0;
 1662 	i = pseg_set(pm, va, tte.data, ptp);
 1665 		ptp = 0
 1772  paddr_t opa = 0, ptp; \/* XXX: gcc *\/  local in function:pmap_enter
 [all...]
 | 
| /src/sys/arch/x86/x86/ | 
| pmap.c | 295  * PTP macros: 296  *   a PTP's index is the PD index of the PDE that points to it
 297  *   a PTP's offset is the byte-offset in the PTE space that this PTP is at
 298  *   a PTP's VA is the first VA mapped by that PTP
 572  * ptp_to_pmap: lookup pmap by ptp
 575 ptp_to_pmap(struct vm_page *ptp)
 579 	if (ptp == NULL) {
 582 	pmap = (struct pmap *)ptp->uobject
 2550  struct vm_page *ptp;  local in function:pmap_get_ptp
 2610  struct vm_page *ptp;  local in function:pmap_install_ptp
 4291  struct vm_page *ptp;  local in function:pmap_remove_locked
 4393  struct vm_page *ptp;  local in function:pmap_sync_pv
 4499  struct vm_page *ptp;  local in function:pmap_pp_remove
 4992  struct vm_page *ptp;  local in function:pmap_enter_ma
 5365  struct vm_page *ptp;  local in function:pmap_enter_gnt
 5536  struct vm_page *ptp;  local in function:pmap_remove_gnt
 5609  struct vm_page *ptp;  local in function:pmap_get_physpage
 5919  struct vm_page *ptp;  local in function:pmap_update
 6240  struct vm_page *ptp;  local in function:pmap_ept_install_ptp
 6291  struct vm_page *ptp;  local in function:pmap_ept_enter
 6680  struct vm_page *ptp;  local in function:pmap_ept_remove
 [all...]
 |