Home | History | Annotate | Line # | Download | only in arm32
vm_machdep.c revision 1.16
      1 /*	$NetBSD: vm_machdep.c,v 1.16 2002/04/03 23:33:29 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1994-1998 Mark Brinicombe.
      5  * Copyright (c) 1994 Brini.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software written for Brini by Mark Brinicombe
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by Brini.
     21  * 4. The name of the company nor the name of the author may be used to
     22  *    endorse or promote products derived from this software without specific
     23  *    prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
     26  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     27  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     28  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35  * SUCH DAMAGE.
     36  *
     37  * RiscBSD kernel project
     38  *
     39  * vm_machdep.h
     40  *
     41  * vm machine specific bits
     42  *
     43  * Created      : 08/10/94
     44  */
     45 
     46 #include "opt_armfpe.h"
     47 #include "opt_pmap_debug.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/proc.h>
     52 #include <sys/malloc.h>
     53 #include <sys/vnode.h>
     54 #include <sys/buf.h>
     55 #include <sys/user.h>
     56 #include <sys/exec.h>
     57 #include <sys/syslog.h>
     58 
     59 #include <uvm/uvm_extern.h>
     60 
     61 #include <machine/cpu.h>
     62 #include <machine/pmap.h>
     63 #include <machine/reg.h>
     64 #include <machine/vmparam.h>
     65 
     66 #ifdef ARMFPE
     67 #include <arm/fpe-arm/armfpe.h>
     68 #endif
     69 
     70 extern pv_addr_t systempage;
     71 
     72 int process_read_regs	__P((struct proc *p, struct reg *regs));
     73 int process_read_fpregs	__P((struct proc *p, struct fpreg *regs));
     74 
     75 void	switch_exit	__P((struct proc *p));
     76 extern void proc_trampoline	__P((void));
     77 
     78 /*
     79  * Special compilation symbols:
     80  *
     81  * STACKCHECKS - Fill undefined and supervisor stacks with a known pattern
     82  *		 on forking and check the pattern on exit, reporting
     83  *		 the amount of stack used.
     84  */
     85 
     86 /*
     87  * Finish a fork operation, with process p2 nearly set up.
     88  * Copy and update the pcb and trap frame, making the child ready to run.
     89  *
     90  * Rig the child's kernel stack so that it will start out in
     91  * proc_trampoline() and call child_return() with p2 as an
     92  * argument. This causes the newly-created child process to go
     93  * directly to user level with an apparent return value of 0 from
     94  * fork(), while the parent process returns normally.
     95  *
     96  * p1 is the process being forked; if p1 == &proc0, we are creating
     97  * a kernel thread, and the return path and argument are specified with
     98  * `func' and `arg'.
     99  *
    100  * If an alternate user-level stack is requested (with non-zero values
    101  * in both the stack and stacksize args), set up the user stack pointer
    102  * accordingly.
    103  */
    104 void
    105 cpu_fork(p1, p2, stack, stacksize, func, arg)
    106 	struct proc *p1;
    107 	struct proc *p2;
    108 	void *stack;
    109 	size_t stacksize;
    110 	void (*func) __P((void *));
    111 	void *arg;
    112 {
    113 	struct pcb *pcb = (struct pcb *)&p2->p_addr->u_pcb;
    114 	struct trapframe *tf;
    115 	struct switchframe *sf;
    116 
    117 #ifdef PMAP_DEBUG
    118 	if (pmap_debug_level >= 0)
    119 		printf("cpu_fork: %p %p %p %p\n", p1, p2, curproc, &proc0);
    120 #endif	/* PMAP_DEBUG */
    121 
    122 #if 0 /* XXX */
    123 	if (p1 == curproc) {
    124 		/* Sync the PCB before we copy it. */
    125 		savectx(curpcb);
    126 	}
    127 #endif
    128 
    129 	/* Copy the pcb */
    130 	*pcb = p1->p_addr->u_pcb;
    131 
    132 	/*
    133 	 * Set up the undefined stack for the process.
    134 	 * Note: this stack is not in use if we are forking from p1
    135 	 */
    136 	pcb->pcb_un.un_32.pcb32_und_sp = (u_int)p2->p_addr +
    137 	    USPACE_UNDEF_STACK_TOP;
    138 	pcb->pcb_un.un_32.pcb32_sp = (u_int)p2->p_addr + USPACE_SVC_STACK_TOP;
    139 
    140 #ifdef STACKCHECKS
    141 	/* Fill the undefined stack with a known pattern */
    142 	memset(((u_char *)p2->p_addr) + USPACE_UNDEF_STACK_BOTTOM, 0xdd,
    143 	    (USPACE_UNDEF_STACK_TOP - USPACE_UNDEF_STACK_BOTTOM));
    144 	/* Fill the kernel stack with a known pattern */
    145 	memset(((u_char *)p2->p_addr) + USPACE_SVC_STACK_BOTTOM, 0xdd,
    146 	    (USPACE_SVC_STACK_TOP - USPACE_SVC_STACK_BOTTOM));
    147 #endif	/* STACKCHECKS */
    148 
    149 #ifdef PMAP_DEBUG
    150 	if (pmap_debug_level >= 0) {
    151 		printf("p1->procaddr=%p p1->procaddr->u_pcb=%p pid=%d pmap=%p\n",
    152 		    p1->p_addr, &p1->p_addr->u_pcb, p1->p_pid,
    153 		    p1->p_vmspace->vm_map.pmap);
    154 		printf("p2->procaddr=%p p2->procaddr->u_pcb=%p pid=%d pmap=%p\n",
    155 		    p2->p_addr, &p2->p_addr->u_pcb, p2->p_pid,
    156 		    p2->p_vmspace->vm_map.pmap);
    157 	}
    158 #endif	/* PMAP_DEBUG */
    159 
    160 	pmap_activate(p2);
    161 
    162 #ifdef ARMFPE
    163 	/* Initialise a new FP context for p2 and copy the context from p1 */
    164 	arm_fpe_core_initcontext(FP_CONTEXT(p2));
    165 	arm_fpe_copycontext(FP_CONTEXT(p1), FP_CONTEXT(p2));
    166 #endif	/* ARMFPE */
    167 
    168 	p2->p_addr->u_pcb.pcb_tf = tf =
    169 	    (struct trapframe *)pcb->pcb_un.un_32.pcb32_sp - 1;
    170 	*tf = *p1->p_addr->u_pcb.pcb_tf;
    171 
    172 	/*
    173 	 * If specified, give the child a different stack.
    174 	 */
    175 	if (stack != NULL)
    176 		tf->tf_usr_sp = (u_int)stack + stacksize;
    177 
    178 	sf = (struct switchframe *)tf - 1;
    179 	sf->sf_spl = 0;		/* always equivalent to spl0() */
    180 	sf->sf_r4 = (u_int)func;
    181 	sf->sf_r5 = (u_int)arg;
    182 	sf->sf_pc = (u_int)proc_trampoline;
    183 	pcb->pcb_un.un_32.pcb32_sp = (u_int)sf;
    184 }
    185 
    186 /*
    187  * cpu_exit is called as the last action during exit.
    188  *
    189  * We clean up a little and then call switch_exit() with the old proc as an
    190  * argument.  switch_exit() first switches to proc0's context, and finally
    191  * jumps into switch() to wait for another process to wake up.
    192  */
    193 
    194 void
    195 cpu_exit(p)
    196 	register struct proc *p;
    197 {
    198 #ifdef ARMFPE
    199 	/* Abort any active FP operation and deactivate the context */
    200 	arm_fpe_core_abort(FP_CONTEXT(p), NULL, NULL);
    201 	arm_fpe_core_changecontext(0);
    202 #endif	/* ARMFPE */
    203 
    204 #ifdef STACKCHECKS
    205 	/* Report how much stack has been used - debugging */
    206 	if (p) {
    207 		u_char *ptr;
    208 		int loop;
    209 
    210 		ptr = ((u_char *)p2->p_addr) + USPACE_UNDEF_STACK_BOTTOM;
    211 		for (loop = 0; loop < (USPACE_UNDEF_STACK_TOP - USPACE_UNDEF_STACK_BOTTOM)
    212 		    && *ptr == 0xdd; ++loop, ++ptr) ;
    213 		log(LOG_INFO, "%d bytes of undefined stack fill pattern\n", loop);
    214 		ptr = ((u_char *)p2->p_addr) + USPACE_SVC_STACK_BOTTOM;
    215 		for (loop = 0; loop < (USPACE_SVC_STACK_TOP - USPACE_SVC_STACK_BOTTOM)
    216 		    && *ptr == 0xdd; ++loop, ++ptr) ;
    217 		log(LOG_INFO, "%d bytes of svc stack fill pattern\n", loop);
    218 	}
    219 #endif	/* STACKCHECKS */
    220 	uvmexp.swtch++;
    221 	switch_exit(p);
    222 }
    223 
    224 
    225 void
    226 cpu_swapin(p)
    227 	struct proc *p;
    228 {
    229 #if 0
    230 	/* Don't do this.  See the comment in cpu_swapout().  */
    231 #ifdef PMAP_DEBUG
    232 	if (pmap_debug_level >= 0)
    233 		printf("cpu_swapin(%p, %d, %s, %p)\n", p, p->p_pid,
    234 		    p->p_comm, p->p_vmspace->vm_map.pmap);
    235 #endif	/* PMAP_DEBUG */
    236 
    237 	if (vector_page < KERNEL_BASE) {
    238 		/* Map the vector page */
    239 		pmap_enter(p->p_vmspace->vm_map.pmap, vector_page,
    240 		    systempage.pv_pa, VM_PROT_READ, VM_PROT_READ|PMAP_WIRED);
    241 		pmap_update(p->p_vmspace->vm_map.pmap);
    242 	}
    243 #endif
    244 }
    245 
    246 
    247 void
    248 cpu_swapout(p)
    249 	struct proc *p;
    250 {
    251 #if 0
    252 	/*
    253 	 * Don't do this!  If the pmap is shared with another process,
    254 	 * it will loose it's page0 entry.  That's bad news indeed.
    255 	 */
    256 #ifdef PMAP_DEBUG
    257 	if (pmap_debug_level >= 0)
    258 		printf("cpu_swapout(%p, %d, %s, %p)\n", p, p->p_pid,
    259 		    p->p_comm, &p->p_vmspace->vm_map.pmap);
    260 #endif	/* PMAP_DEBUG */
    261 
    262 	if (vector_page < KERNEL_BASE) {
    263 		/* Free the system page mapping */
    264 		pmap_remove(p->p_vmspace->vm_map.pmap, vector_page,
    265 		    vector_page + NBPG);
    266 		pmap_update(p->p_vmspace->vm_map.pmap);
    267 	}
    268 #endif
    269 }
    270 
    271 
    272 /*
    273  * Move pages from one kernel virtual address to another.
    274  * Both addresses are assumed to reside in the Sysmap,
    275  * and size must be a multiple of NBPG.
    276  */
    277 
    278 void
    279 pagemove(from, to, size)
    280 	caddr_t from, to;
    281 	size_t size;
    282 {
    283 	register pt_entry_t *fpte, *tpte;
    284 
    285 	if (size % NBPG)
    286 		panic("pagemove: size=%08lx", (u_long) size);
    287 
    288 #ifdef PMAP_DEBUG
    289 	if (pmap_debug_level >= 0)
    290 		printf("pagemove: V%p to %p size %08lx\n",
    291 		    from, to, (u_long) size);
    292 #endif	/* PMAP_DEBUG */
    293 
    294 	fpte = vtopte((vaddr_t)from);
    295 	tpte = vtopte((vaddr_t)to);
    296 
    297 	/*
    298 	 * Make sure the cache does not have dirty data for the
    299 	 * pages we are moving. Pages in the buffers are only
    300 	 * ever moved with pagemove, so we only need to clean
    301 	 * the 'from' area.
    302 	 */
    303 
    304 	cpu_dcache_wbinv_range((vaddr_t) from, size);
    305 
    306 	while (size > 0) {
    307 		*tpte++ = *fpte;
    308 		*fpte++ = 0;
    309 		size -= NBPG;
    310 	}
    311 	//cpu_tlb_flushD();
    312 }
    313 
    314 /*
    315  * Map a user I/O request into kernel virtual address space.
    316  * Note: the pages are already locked by uvm_vslock(), so we
    317  * do not need to pass an access_type to pmap_enter().
    318  */
    319 void
    320 vmapbuf(bp, len)
    321 	struct buf *bp;
    322 	vsize_t len;
    323 {
    324 	vaddr_t faddr, taddr, off;
    325 	paddr_t fpa;
    326 
    327 
    328 #ifdef PMAP_DEBUG
    329 	if (pmap_debug_level >= 0)
    330 		printf("vmapbuf: bp=%08x buf=%08x len=%08x\n", (u_int)bp,
    331 		    (u_int)bp->b_data, (u_int)len);
    332 #endif	/* PMAP_DEBUG */
    333 
    334 	if ((bp->b_flags & B_PHYS) == 0)
    335 		panic("vmapbuf");
    336 
    337 	faddr = trunc_page((vaddr_t)bp->b_saveaddr = bp->b_data);
    338 	off = (vaddr_t)bp->b_data - faddr;
    339 	len = round_page(off + len);
    340 	taddr = uvm_km_valloc_wait(phys_map, len);
    341 	bp->b_data = (caddr_t)(taddr + off);
    342 
    343 	/*
    344 	 * The region is locked, so we expect that pmap_pte() will return
    345 	 * non-NULL.
    346 	 */
    347 	while (len) {
    348 		(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
    349 		    faddr, &fpa);
    350 		pmap_enter(pmap_kernel(), taddr, fpa,
    351 			VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
    352 		faddr += PAGE_SIZE;
    353 		taddr += PAGE_SIZE;
    354 		len -= PAGE_SIZE;
    355 	}
    356 	pmap_update(pmap_kernel());
    357 }
    358 
    359 /*
    360  * Unmap a previously-mapped user I/O request.
    361  */
    362 void
    363 vunmapbuf(bp, len)
    364 	struct buf *bp;
    365 	vsize_t len;
    366 {
    367 	vaddr_t addr, off;
    368 
    369 #ifdef PMAP_DEBUG
    370 	if (pmap_debug_level >= 0)
    371 		printf("vunmapbuf: bp=%08x buf=%08x len=%08x\n",
    372 		    (u_int)bp, (u_int)bp->b_data, (u_int)len);
    373 #endif	/* PMAP_DEBUG */
    374 
    375 	if ((bp->b_flags & B_PHYS) == 0)
    376 		panic("vunmapbuf");
    377 
    378 	/*
    379 	 * Make sure the cache does not have dirty data for the
    380 	 * pages we had mapped.
    381 	 */
    382 	addr = trunc_page((vaddr_t)bp->b_data);
    383 	off = (vaddr_t)bp->b_data - addr;
    384 	len = round_page(off + len);
    385 
    386 	pmap_remove(pmap_kernel(), addr, addr + len);
    387 	pmap_update(pmap_kernel());
    388 	uvm_km_free_wakeup(phys_map, addr, len);
    389 	bp->b_data = bp->b_saveaddr;
    390 	bp->b_saveaddr = 0;
    391 }
    392 
    393 /* End of vm_machdep.c */
    394