Home | History | Annotate | Line # | Download | only in arm32
vm_machdep.c revision 1.8
      1 /*	$NetBSD: vm_machdep.c,v 1.8 2001/10/18 09:26:08 rearnsha Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1994-1998 Mark Brinicombe.
      5  * Copyright (c) 1994 Brini.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software written for Brini by Mark Brinicombe
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by Brini.
     21  * 4. The name of the company nor the name of the author may be used to
     22  *    endorse or promote products derived from this software without specific
     23  *    prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
     26  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     27  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     28  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35  * SUCH DAMAGE.
     36  *
     37  * RiscBSD kernel project
     38  *
     39  * vm_machdep.h
     40  *
     41  * vm machine specific bits
     42  *
     43  * Created      : 08/10/94
     44  */
     45 
     46 #include "opt_armfpe.h"
     47 #include "opt_pmap_debug.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/proc.h>
     52 #include <sys/malloc.h>
     53 #include <sys/vnode.h>
     54 #include <sys/buf.h>
     55 #include <sys/user.h>
     56 #include <sys/exec.h>
     57 #include <sys/syslog.h>
     58 
     59 #include <uvm/uvm_extern.h>
     60 
     61 #include <machine/cpu.h>
     62 #include <machine/pmap.h>
     63 #include <machine/reg.h>
     64 #include <machine/vmparam.h>
     65 
     66 #ifdef ARMFPE
     67 #include <arm32/fpe-arm/armfpe.h>
     68 #endif
     69 
     70 extern pv_addr_t systempage;
     71 
     72 int process_read_regs	__P((struct proc *p, struct reg *regs));
     73 int process_read_fpregs	__P((struct proc *p, struct fpreg *regs));
     74 
     75 void	switch_exit	__P((struct proc *p, struct proc *proc0));
     76 extern void proc_trampoline	__P((void));
     77 
     78 /*
     79  * Special compilation symbols:
     80  *
     81  * STACKCHECKS - Fill undefined and supervisor stacks with a known pattern
     82  *		 on forking and check the pattern on exit, reporting
     83  *		 the amount of stack used.
     84  */
     85 
     86 /*
     87  * Finish a fork operation, with process p2 nearly set up.
     88  * Copy and update the pcb and trap frame, making the child ready to run.
     89  *
     90  * Rig the child's kernel stack so that it will start out in
     91  * proc_trampoline() and call child_return() with p2 as an
     92  * argument. This causes the newly-created child process to go
     93  * directly to user level with an apparent return value of 0 from
     94  * fork(), while the parent process returns normally.
     95  *
     96  * p1 is the process being forked; if p1 == &proc0, we are creating
     97  * a kernel thread, and the return path and argument are specified with
     98  * `func' and `arg'.
     99  *
    100  * If an alternate user-level stack is requested (with non-zero values
    101  * in both the stack and stacksize args), set up the user stack pointer
    102  * accordingly.
    103  */
    104 void
    105 cpu_fork(p1, p2, stack, stacksize, func, arg)
    106 	struct proc *p1;
    107 	struct proc *p2;
    108 	void *stack;
    109 	size_t stacksize;
    110 	void (*func) __P((void *));
    111 	void *arg;
    112 {
    113 	struct pcb *pcb = (struct pcb *)&p2->p_addr->u_pcb;
    114 	struct trapframe *tf;
    115 	struct switchframe *sf;
    116 
    117 #ifdef PMAP_DEBUG
    118 	if (pmap_debug_level >= 0)
    119 		printf("cpu_fork: %p %p %p %p\n", p1, p2, curproc, &proc0);
    120 #endif	/* PMAP_DEBUG */
    121 
    122 #if 0 /* XXX */
    123 	if (p1 == curproc) {
    124 		/* Sync the PCB before we copy it. */
    125 		savectx(curpcb);
    126 	}
    127 #endif
    128 
    129 	/* Copy the pcb */
    130 	*pcb = p1->p_addr->u_pcb;
    131 
    132 	/*
    133 	 * Set up the undefined stack for the process.
    134 	 * Note: this stack is not in use if we are forking from p1
    135 	 */
    136 	pcb->pcb_un.un_32.pcb32_und_sp = (u_int)p2->p_addr +
    137 	    USPACE_UNDEF_STACK_TOP;
    138 	pcb->pcb_un.un_32.pcb32_sp = (u_int)p2->p_addr + USPACE_SVC_STACK_TOP;
    139 
    140 #ifdef STACKCHECKS
    141 	/* Fill the undefined stack with a known pattern */
    142 	memset(((u_char *)p2->p_addr) + USPACE_UNDEF_STACK_BOTTOM, 0xdd,
    143 	    (USPACE_UNDEF_STACK_TOP - USPACE_UNDEF_STACK_BOTTOM));
    144 	/* Fill the kernel stack with a known pattern */
    145 	memset(((u_char *)p2->p_addr) + USPACE_SVC_STACK_BOTTOM, 0xdd,
    146 	    (USPACE_SVC_STACK_TOP - USPACE_SVC_STACK_BOTTOM));
    147 #endif	/* STACKCHECKS */
    148 
    149 #ifdef PMAP_DEBUG
    150 	if (pmap_debug_level >= 0) {
    151 		printf("p1->procaddr=%p p1->procaddr->u_pcb=%p pid=%d pmap=%p\n",
    152 		    p1->p_addr, &p1->p_addr->u_pcb, p1->p_pid,
    153 		    p1->p_vmspace->vm_map.pmap);
    154 		printf("p2->procaddr=%p p2->procaddr->u_pcb=%p pid=%d pmap=%p\n",
    155 		    p2->p_addr, &p2->p_addr->u_pcb, p2->p_pid,
    156 		    p2->p_vmspace->vm_map.pmap);
    157 	}
    158 #endif	/* PMAP_DEBUG */
    159 
    160 	pmap_activate(p2);
    161 
    162 #ifdef ARMFPE
    163 	/* Initialise a new FP context for p2 and copy the context from p1 */
    164 	arm_fpe_core_initcontext(FP_CONTEXT(p2));
    165 	arm_fpe_copycontext(FP_CONTEXT(p1), FP_CONTEXT(p2));
    166 #endif	/* ARMFPE */
    167 
    168 	p2->p_addr->u_pcb.pcb_tf = tf =
    169 	    (struct trapframe *)pcb->pcb_un.un_32.pcb32_sp - 1;
    170 	*tf = *p1->p_addr->u_pcb.pcb_tf;
    171 
    172 	/*
    173 	 * If specified, give the child a different stack.
    174 	 */
    175 	if (stack != NULL)
    176 		tf->tf_usr_sp = (u_int)stack + stacksize;
    177 
    178 	sf = (struct switchframe *)tf - 1;
    179 	sf->sf_spl = _SPL_0;
    180 	sf->sf_r4 = (u_int)func;
    181 	sf->sf_r5 = (u_int)arg;
    182 	sf->sf_pc = (u_int)proc_trampoline;
    183 	pcb->pcb_un.un_32.pcb32_sp = (u_int)sf;
    184 }
    185 
    186 /*
    187  * cpu_exit is called as the last action during exit.
    188  *
    189  * We clean up a little and then call switch_exit() with the old proc as an
    190  * argument.  switch_exit() first switches to proc0's context, and finally
    191  * jumps into switch() to wait for another process to wake up.
    192  */
    193 
    194 void
    195 cpu_exit(p)
    196 	register struct proc *p;
    197 {
    198 #ifdef ARMFPE
    199 	/* Abort any active FP operation and deactivate the context */
    200 	arm_fpe_core_abort(FP_CONTEXT(p), NULL, NULL);
    201 	arm_fpe_core_changecontext(0);
    202 #endif	/* ARMFPE */
    203 
    204 #ifdef STACKCHECKS
    205 	/* Report how much stack has been used - debugging */
    206 	if (p) {
    207 		u_char *ptr;
    208 		int loop;
    209 
    210 		ptr = ((u_char *)p2->p_addr) + USPACE_UNDEF_STACK_BOTTOM;
    211 		for (loop = 0; loop < (USPACE_UNDEF_STACK_TOP - USPACE_UNDEF_STACK_BOTTOM)
    212 		    && *ptr == 0xdd; ++loop, ++ptr) ;
    213 		log(LOG_INFO, "%d bytes of undefined stack fill pattern\n", loop);
    214 		ptr = ((u_char *)p2->p_addr) + USPACE_SVC_STACK_BOTTOM;
    215 		for (loop = 0; loop < (USPACE_SVC_STACK_TOP - USPACE_SVC_STACK_BOTTOM)
    216 		    && *ptr == 0xdd; ++loop, ++ptr) ;
    217 		log(LOG_INFO, "%d bytes of svc stack fill pattern\n", loop);
    218 	}
    219 #endif	/* STACKCHECKS */
    220 	uvmexp.swtch++;
    221 	switch_exit(p, &proc0);
    222 }
    223 
    224 
    225 void
    226 cpu_swapin(p)
    227 	struct proc *p;
    228 {
    229 #if 0
    230 	/* Don't do this.  See the comment in cpu_swapout().  */
    231 #ifdef PMAP_DEBUG
    232 	if (pmap_debug_level >= 0)
    233 		printf("cpu_swapin(%p, %d, %s, %p)\n", p, p->p_pid,
    234 		    p->p_comm, p->p_vmspace->vm_map.pmap);
    235 #endif	/* PMAP_DEBUG */
    236 
    237 	/* Map the system page */
    238 	pmap_enter(p->p_vmspace->vm_map.pmap, 0x00000000, systempage.pv_pa,
    239 	    VM_PROT_READ, VM_PROT_READ|PMAP_WIRED);
    240 	pmap_update(p->p_vmspace->vm_map.pmap);
    241 #endif
    242 }
    243 
    244 
    245 void
    246 cpu_swapout(p)
    247 	struct proc *p;
    248 {
    249 #if 0
    250 	/*
    251 	 * Don't do this!  If the pmap is shared with another process,
    252 	 * it will loose it's page0 entry.  That's bad news indeed.
    253 	 */
    254 #ifdef PMAP_DEBUG
    255 	if (pmap_debug_level >= 0)
    256 		printf("cpu_swapout(%p, %d, %s, %p)\n", p, p->p_pid,
    257 		    p->p_comm, &p->p_vmspace->vm_map.pmap);
    258 #endif	/* PMAP_DEBUG */
    259 
    260 	/* Free the system page mapping */
    261 	pmap_remove(p->p_vmspace->vm_map.pmap, 0x00000000, 0x00000000 + NBPG);
    262 	pmap_update(p->p_vmspace->vm_map.pmap);
    263 #endif
    264 }
    265 
    266 
    267 /*
    268  * Move pages from one kernel virtual address to another.
    269  * Both addresses are assumed to reside in the Sysmap,
    270  * and size must be a multiple of CLSIZE.
    271  */
    272 
    273 void
    274 pagemove(from, to, size)
    275 	caddr_t from, to;
    276 	size_t size;
    277 {
    278 	register pt_entry_t *fpte, *tpte;
    279 
    280 	if (size % NBPG)
    281 		panic("pagemove: size=%08lx", (u_long) size);
    282 
    283 #ifdef PMAP_DEBUG
    284 	if (pmap_debug_level >= 0)
    285 		printf("pagemove: V%p to %p size %08lx\n",
    286 		    from, to, (u_long) size);
    287 #endif	/* PMAP_DEBUG */
    288 
    289 	fpte = vtopte((vaddr_t)from);
    290 	tpte = vtopte((vaddr_t)to);
    291 
    292 	/*
    293 	 * Make sure the cache does not have dirty data for the
    294 	 * pages we are moving. Pages in the buffers are only
    295 	 * ever moved with pagemove, so we only need to clean
    296 	 * the 'from' area.
    297 	 */
    298 
    299 	cpu_cache_purgeD_rng((u_int)from, size);
    300 
    301 	while (size > 0) {
    302 		*tpte++ = *fpte;
    303 		*fpte++ = 0;
    304 		size -= NBPG;
    305 	}
    306 	//cpu_tlb_flushD();
    307 }
    308 
    309 extern struct vm_map *phys_map;
    310 
    311 /*
    312  * Map a user I/O request into kernel virtual address space.
    313  * Note: the pages are already locked by uvm_vslock(), so we
    314  * do not need to pass an access_type to pmap_enter().
    315  */
    316 void
    317 vmapbuf(bp, len)
    318 	struct buf *bp;
    319 	vsize_t len;
    320 {
    321 	vaddr_t faddr, taddr, off;
    322 	paddr_t fpa;
    323 
    324 
    325 #ifdef PMAP_DEBUG
    326 	if (pmap_debug_level >= 0)
    327 		printf("vmapbuf: bp=%08x buf=%08x len=%08x\n", (u_int)bp,
    328 		    (u_int)bp->b_data, (u_int)len);
    329 #endif	/* PMAP_DEBUG */
    330 
    331 	if ((bp->b_flags & B_PHYS) == 0)
    332 		panic("vmapbuf");
    333 
    334 	faddr = trunc_page((vaddr_t)bp->b_saveaddr = bp->b_data);
    335 	off = (vaddr_t)bp->b_data - faddr;
    336 	len = round_page(off + len);
    337 	taddr = uvm_km_valloc_wait(phys_map, len);
    338 	bp->b_data = (caddr_t)(taddr + off);
    339 
    340 	/*
    341 	 * The region is locked, so we expect that pmap_pte() will return
    342 	 * non-NULL.
    343 	 */
    344 	while (len) {
    345 		(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
    346 		    faddr, &fpa);
    347 		pmap_enter(pmap_kernel(), taddr, fpa,
    348 			VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
    349 		faddr += PAGE_SIZE;
    350 		taddr += PAGE_SIZE;
    351 		len -= PAGE_SIZE;
    352 	}
    353 	pmap_update(pmap_kernel());
    354 }
    355 
    356 /*
    357  * Unmap a previously-mapped user I/O request.
    358  */
    359 void
    360 vunmapbuf(bp, len)
    361 	struct buf *bp;
    362 	vsize_t len;
    363 {
    364 	vaddr_t addr, off;
    365 
    366 #ifdef PMAP_DEBUG
    367 	if (pmap_debug_level >= 0)
    368 		printf("vunmapbuf: bp=%08x buf=%08x len=%08x\n",
    369 		    (u_int)bp, (u_int)bp->b_data, (u_int)len);
    370 #endif	/* PMAP_DEBUG */
    371 
    372 	if ((bp->b_flags & B_PHYS) == 0)
    373 		panic("vunmapbuf");
    374 
    375 	/*
    376 	 * Make sure the cache does not have dirty data for the
    377 	 * pages we had mapped.
    378 	 */
    379 	addr = trunc_page((vaddr_t)bp->b_data);
    380 	off = (vaddr_t)bp->b_data - addr;
    381 	len = round_page(off + len);
    382 
    383 	pmap_remove(pmap_kernel(), addr, addr + len);
    384 	pmap_update(pmap_kernel());
    385 	uvm_km_free_wakeup(phys_map, addr, len);
    386 	bp->b_data = bp->b_saveaddr;
    387 	bp->b_saveaddr = 0;
    388 }
    389 
    390 /* End of vm_machdep.c */
    391