Home | History | Annotate | Line # | Download | only in arm32
vm_machdep.c revision 1.3
      1 /*	$NetBSD: vm_machdep.c,v 1.3 2001/08/11 12:57:25 chris Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1994-1998 Mark Brinicombe.
      5  * Copyright (c) 1994 Brini.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software written for Brini by Mark Brinicombe
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by Brini.
     21  * 4. The name of the company nor the name of the author may be used to
     22  *    endorse or promote products derived from this software without specific
     23  *    prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
     26  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     27  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     28  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     29  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     30  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     31  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     35  * SUCH DAMAGE.
     36  *
     37  * RiscBSD kernel project
     38  *
     39  * vm_machdep.h
     40  *
     41  * vm machine specific bits
     42  *
     43  * Created      : 08/10/94
     44  */
     45 
     46 #include "opt_armfpe.h"
     47 #include "opt_pmap_debug.h"
     48 
     49 #include <sys/param.h>
     50 #include <sys/systm.h>
     51 #include <sys/proc.h>
     52 #include <sys/malloc.h>
     53 #include <sys/vnode.h>
     54 #include <sys/buf.h>
     55 #include <sys/user.h>
     56 #include <sys/exec.h>
     57 #include <sys/syslog.h>
     58 
     59 #include <uvm/uvm_extern.h>
     60 
     61 #include <machine/cpu.h>
     62 #include <machine/pmap.h>
     63 #include <machine/reg.h>
     64 #include <machine/vmparam.h>
     65 
     66 #ifdef ARMFPE
     67 #include <arm32/fpe-arm/armfpe.h>
     68 #endif
     69 
     70 extern pv_addr_t systempage;
     71 
     72 int process_read_regs	__P((struct proc *p, struct reg *regs));
     73 int process_read_fpregs	__P((struct proc *p, struct fpreg *regs));
     74 
     75 void	switch_exit	__P((struct proc *p, struct proc *proc0));
     76 extern void proc_trampoline	__P((void));
     77 
     78 /*
     79  * Special compilation symbols:
     80  *
     81  * STACKCHECKS - Fill undefined and supervisor stacks with a known pattern
     82  *		 on forking and check the pattern on exit, reporting
     83  *		 the amount of stack used.
     84  */
     85 
     86 /*
     87  * Finish a fork operation, with process p2 nearly set up.
     88  * Copy and update the pcb and trap frame, making the child ready to run.
     89  *
     90  * Rig the child's kernel stack so that it will start out in
     91  * proc_trampoline() and call child_return() with p2 as an
     92  * argument. This causes the newly-created child process to go
     93  * directly to user level with an apparent return value of 0 from
     94  * fork(), while the parent process returns normally.
     95  *
     96  * p1 is the process being forked; if p1 == &proc0, we are creating
     97  * a kernel thread, and the return path and argument are specified with
     98  * `func' and `arg'.
     99  *
    100  * If an alternate user-level stack is requested (with non-zero values
    101  * in both the stack and stacksize args), set up the user stack pointer
    102  * accordingly.
    103  */
    104 void
    105 cpu_fork(p1, p2, stack, stacksize, func, arg)
    106 	struct proc *p1;
    107 	struct proc *p2;
    108 	void *stack;
    109 	size_t stacksize;
    110 	void (*func) __P((void *));
    111 	void *arg;
    112 {
    113 	struct pcb *pcb = (struct pcb *)&p2->p_addr->u_pcb;
    114 	struct trapframe *tf;
    115 	struct switchframe *sf;
    116 
    117 #ifdef PMAP_DEBUG
    118 	if (pmap_debug_level >= 0)
    119 		printf("cpu_fork: %p %p %p %p\n", p1, p2, curproc, &proc0);
    120 #endif	/* PMAP_DEBUG */
    121 
    122 #if 0 /* XXX */
    123 	if (p1 == curproc) {
    124 		/* Sync the PCB before we copy it. */
    125 		savectx(curpcb);
    126 	}
    127 #endif
    128 
    129 	/* Copy the pcb */
    130 	*pcb = p1->p_addr->u_pcb;
    131 
    132 	/*
    133 	 * Set up the undefined stack for the process.
    134 	 * Note: this stack is not in use if we are forking from p1
    135 	 */
    136 	pcb->pcb_und_sp = (u_int)p2->p_addr + USPACE_UNDEF_STACK_TOP;
    137 	pcb->pcb_sp = (u_int)p2->p_addr + USPACE_SVC_STACK_TOP;
    138 
    139 #ifdef STACKCHECKS
    140 	/* Fill the undefined stack with a known pattern */
    141 	memset(((u_char *)p2->p_addr) + USPACE_UNDEF_STACK_BOTTOM, 0xdd,
    142 	    (USPACE_UNDEF_STACK_TOP - USPACE_UNDEF_STACK_BOTTOM));
    143 	/* Fill the kernel stack with a known pattern */
    144 	memset(((u_char *)p2->p_addr) + USPACE_SVC_STACK_BOTTOM, 0xdd,
    145 	    (USPACE_SVC_STACK_TOP - USPACE_SVC_STACK_BOTTOM));
    146 #endif	/* STACKCHECKS */
    147 
    148 #ifdef PMAP_DEBUG
    149 	if (pmap_debug_level >= 0) {
    150 		printf("p1->procaddr=%p p1->procaddr->u_pcb=%p pid=%d pmap=%p\n",
    151 		    p1->p_addr, &p1->p_addr->u_pcb, p1->p_pid,
    152 		    p1->p_vmspace->vm_map.pmap);
    153 		printf("p2->procaddr=%p p2->procaddr->u_pcb=%p pid=%d pmap=%p\n",
    154 		    p2->p_addr, &p2->p_addr->u_pcb, p2->p_pid,
    155 		    p2->p_vmspace->vm_map.pmap);
    156 	}
    157 #endif	/* PMAP_DEBUG */
    158 
    159 	pmap_activate(p2);
    160 
    161 #ifdef ARMFPE
    162 	/* Initialise a new FP context for p2 and copy the context from p1 */
    163 	arm_fpe_core_initcontext(FP_CONTEXT(p2));
    164 	arm_fpe_copycontext(FP_CONTEXT(p1), FP_CONTEXT(p2));
    165 #endif	/* ARMFPE */
    166 
    167 	p2->p_addr->u_pcb.pcb_tf = tf = (struct trapframe *)pcb->pcb_sp - 1;
    168 	*tf = *p1->p_addr->u_pcb.pcb_tf;
    169 
    170 	/*
    171 	 * If specified, give the child a different stack.
    172 	 */
    173 	if (stack != NULL)
    174 		tf->tf_usr_sp = (u_int)stack + stacksize;
    175 
    176 	sf = (struct switchframe *)tf - 1;
    177 	sf->sf_spl = _SPL_0;
    178 	sf->sf_r4 = (u_int)func;
    179 	sf->sf_r5 = (u_int)arg;
    180 	sf->sf_pc = (u_int)proc_trampoline;
    181 	pcb->pcb_sp = (u_int)sf;
    182 }
    183 
    184 /*
    185  * cpu_exit is called as the last action during exit.
    186  *
    187  * We clean up a little and then call switch_exit() with the old proc as an
    188  * argument.  switch_exit() first switches to proc0's context, and finally
    189  * jumps into switch() to wait for another process to wake up.
    190  */
    191 
    192 void
    193 cpu_exit(p)
    194 	register struct proc *p;
    195 {
    196 #ifdef ARMFPE
    197 	/* Abort any active FP operation and deactivate the context */
    198 	arm_fpe_core_abort(FP_CONTEXT(p), NULL, NULL);
    199 	arm_fpe_core_changecontext(0);
    200 #endif	/* ARMFPE */
    201 
    202 #ifdef STACKCHECKS
    203 	/* Report how much stack has been used - debugging */
    204 	if (p) {
    205 		u_char *ptr;
    206 		int loop;
    207 
    208 		ptr = ((u_char *)p2->p_addr) + USPACE_UNDEF_STACK_BOTTOM;
    209 		for (loop = 0; loop < (USPACE_UNDEF_STACK_TOP - USPACE_UNDEF_STACK_BOTTOM)
    210 		    && *ptr == 0xdd; ++loop, ++ptr) ;
    211 		log(LOG_INFO, "%d bytes of undefined stack fill pattern\n", loop);
    212 		ptr = ((u_char *)p2->p_addr) + USPACE_SVC_STACK_BOTTOM;
    213 		for (loop = 0; loop < (USPACE_SVC_STACK_TOP - USPACE_SVC_STACK_BOTTOM)
    214 		    && *ptr == 0xdd; ++loop, ++ptr) ;
    215 		log(LOG_INFO, "%d bytes of svc stack fill pattern\n", loop);
    216 	}
    217 #endif	/* STACKCHECKS */
    218 	uvmexp.swtch++;
    219 	switch_exit(p, &proc0);
    220 }
    221 
    222 
    223 void
    224 cpu_swapin(p)
    225 	struct proc *p;
    226 {
    227 
    228 #ifdef PMAP_DEBUG
    229 	if (pmap_debug_level >= 0)
    230 		printf("cpu_swapin(%p, %d, %s, %p)\n", p, p->p_pid,
    231 		    p->p_comm, p->p_vmspace->vm_map.pmap);
    232 #endif	/* PMAP_DEBUG */
    233 
    234 	/* Map the system page */
    235 	pmap_enter(p->p_vmspace->vm_map.pmap, 0x00000000, systempage.pv_pa,
    236 	    VM_PROT_READ, VM_PROT_READ|PMAP_WIRED);
    237 	pmap_update();
    238 }
    239 
    240 
    241 void
    242 cpu_swapout(p)
    243 	struct proc *p;
    244 {
    245 
    246 #ifdef PMAP_DEBUG
    247 	if (pmap_debug_level >= 0)
    248 		printf("cpu_swapout(%p, %d, %s, %p)\n", p, p->p_pid,
    249 		    p->p_comm, &p->p_vmspace->vm_map.pmap);
    250 #endif	/* PMAP_DEBUG */
    251 
    252 	/* Free the system page mapping */
    253 	pmap_remove(p->p_vmspace->vm_map.pmap, 0x00000000, 0x00000000 + NBPG);
    254 	pmap_update();
    255 }
    256 
    257 
    258 /*
    259  * Move pages from one kernel virtual address to another.
    260  * Both addresses are assumed to reside in the Sysmap,
    261  * and size must be a multiple of CLSIZE.
    262  */
    263 
    264 void
    265 pagemove(from, to, size)
    266 	caddr_t from, to;
    267 	size_t size;
    268 {
    269 	register pt_entry_t *fpte, *tpte;
    270 
    271 	if (size % NBPG)
    272 		panic("pagemove: size=%08lx", (u_long) size);
    273 
    274 #ifdef PMAP_DEBUG
    275 	if (pmap_debug_level >= 0)
    276 		printf("pagemove: V%p to %p size %08lx\n",
    277 		    from, to, (u_long) size);
    278 #endif	/* PMAP_DEBUG */
    279 
    280 	fpte = vtopte((vaddr_t)from);
    281 	tpte = vtopte((vaddr_t)to);
    282 
    283 	/*
    284 	 * Make sure the cache does not have dirty data for the
    285 	 * pages we are moving. Pages in the buffers are only
    286 	 * ever moved with pagemove, so we only need to clean
    287 	 * the 'from' area.
    288 	 */
    289 
    290 	cpu_cache_purgeD_rng((u_int)from, size);
    291 
    292 	while (size > 0) {
    293 		*tpte++ = *fpte;
    294 		*fpte++ = 0;
    295 		size -= NBPG;
    296 	}
    297 	//cpu_tlb_flushD();
    298 }
    299 
    300 extern struct vm_map *phys_map;
    301 
    302 /*
    303  * Map a user I/O request into kernel virtual address space.
    304  * Note: the pages are already locked by uvm_vslock(), so we
    305  * do not need to pass an access_type to pmap_enter().
    306  */
    307 void
    308 vmapbuf(bp, len)
    309 	struct buf *bp;
    310 	vsize_t len;
    311 {
    312 	vaddr_t faddr, taddr, off;
    313 	paddr_t fpa;
    314 
    315 
    316 #ifdef PMAP_DEBUG
    317 	if (pmap_debug_level >= 0)
    318 		printf("vmapbuf: bp=%08x buf=%08x len=%08x\n", (u_int)bp,
    319 		    (u_int)bp->b_data, (u_int)len);
    320 #endif	/* PMAP_DEBUG */
    321 
    322 	if ((bp->b_flags & B_PHYS) == 0)
    323 		panic("vmapbuf");
    324 
    325 	taddr = uvm_km_valloc_wait(phys_map, len);
    326 
    327 	faddr = trunc_page((vaddr_t)bp->b_saveaddr = bp->b_data);
    328 	off = (vaddr_t)bp->b_data - faddr;
    329 	len = round_page(off + len);
    330 	bp->b_data = (caddr_t)(taddr + off);
    331 
    332 	/*
    333 	 * The region is locked, so we expect that pmap_pte() will return
    334 	 * non-NULL.
    335 	 */
    336 	while (len) {
    337 		(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
    338 		    faddr, &fpa);
    339 		pmap_kenter_pa(taddr, fpa, VM_PROT_READ|VM_PROT_WRITE);
    340 		faddr += PAGE_SIZE;
    341 		taddr += PAGE_SIZE;
    342 		len -= PAGE_SIZE;
    343 	}
    344 	pmap_update();
    345 }
    346 
    347 /*
    348  * Unmap a previously-mapped user I/O request.
    349  */
    350 void
    351 vunmapbuf(bp, len)
    352 	struct buf *bp;
    353 	vsize_t len;
    354 {
    355 	vaddr_t addr, off;
    356 
    357 #ifdef PMAP_DEBUG
    358 	if (pmap_debug_level >= 0)
    359 		printf("vunmapbuf: bp=%08x buf=%08x len=%08x\n",
    360 		    (u_int)bp, (u_int)bp->b_data, (u_int)len);
    361 #endif	/* PMAP_DEBUG */
    362 
    363 	if ((bp->b_flags & B_PHYS) == 0)
    364 		panic("vunmapbuf");
    365 
    366 	/*
    367 	 * Make sure the cache does not have dirty data for the
    368 	 * pages we had mapped.
    369 	 */
    370 	addr = trunc_page((vaddr_t)bp->b_data);
    371 	off = (vaddr_t)bp->b_data - addr;
    372 	len = round_page(off + len);
    373 	pmap_kremove(addr, len);
    374 	pmap_update();
    375 	uvm_km_free_wakeup(phys_map, addr, len);
    376 	bp->b_data = bp->b_saveaddr;
    377 	bp->b_saveaddr = 0;
    378 }
    379 
    380 /* End of vm_machdep.c */
    381