machdep.c revision 1.2
1/*	$NetBSD: machdep.c,v 1.2 1999/09/14 11:21:27 tsubai Exp $	*/
2
3/*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the NetBSD
22 *	Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 *    contributors may be used to endorse or promote products derived
25 *    from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*-
41 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
42 * All rights reserved.
43 *
44 * This code is derived from software contributed to Berkeley by
45 * William Jolitz.
46 *
47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions
49 * are met:
50 * 1. Redistributions of source code must retain the above copyright
51 *    notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright
53 *    notice, this list of conditions and the following disclaimer in the
54 *    documentation and/or other materials provided with the distribution.
55 * 3. All advertising materials mentioning features or use of this software
56 *    must display the following acknowledgement:
57 *	This product includes software developed by the University of
58 *	California, Berkeley and its contributors.
59 * 4. Neither the name of the University nor the names of its contributors
60 *    may be used to endorse or promote products derived from this software
61 *    without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 *	@(#)machdep.c	7.4 (Berkeley) 6/3/91
76 */
77
78#include "opt_compat_netbsd.h"
79#include "opt_ddb.h"
80#include "opt_memsize.h"
81#include "opt_initbsc.h"
82#include "opt_sysv.h"
83
84#include <sys/param.h>
85#include <sys/systm.h>
86#include <sys/signalvar.h>
87#include <sys/kernel.h>
88#include <sys/map.h>
89#include <sys/proc.h>
90#include <sys/user.h>
91#include <sys/exec.h>
92#include <sys/buf.h>
93#include <sys/reboot.h>
94#include <sys/conf.h>
95#include <sys/file.h>
96#include <sys/callout.h>
97#include <sys/malloc.h>
98#include <sys/mbuf.h>
99#include <sys/msgbuf.h>
100#include <sys/mount.h>
101#include <sys/vnode.h>
102#include <sys/device.h>
103#include <sys/extent.h>
104#include <sys/syscallargs.h>
105
106#ifdef KGDB
107#include <sys/kgdb.h>
108#endif
109
110#include <dev/cons.h>
111
112#include <vm/vm.h>
113#include <vm/vm_kern.h>
114#include <vm/vm_page.h>
115
116#include <uvm/uvm_extern.h>
117
118#include <sys/sysctl.h>
119
120#include <machine/cpu.h>
121#include <machine/cpufunc.h>
122#include <machine/psl.h>
123#include <machine/bootinfo.h>
124#include <machine/bus.h>
125#include <sh3/bscreg.h>
126#include <sh3/ccrreg.h>
127#include <sh3/cpgreg.h>
128#include <sh3/intcreg.h>
129#include <sh3/pfcreg.h>
130#include <sh3/wdtreg.h>
131
132#include <sys/termios.h>
133#include "sci.h"
134
135/* the following is used externally (sysctl_hw) */
136char machine[] = MACHINE;		/* cpu "architecture" */
137char machine_arch[] = MACHINE_ARCH;	/* machine_arch = "sh3" */
138
139#ifdef sh3_debug
140int cpu_debug_mode = 1;
141#else
142int cpu_debug_mode = 0;
143#endif
144
145char cpu_model[120];
146
147char bootinfo[BOOTINFO_MAXSIZE];
148
149int	physmem;
150int	dumpmem_low;
151int	dumpmem_high;
152extern int	boothowto;
153int	cpu_class;
154
155paddr_t msgbuf_paddr;
156
157vm_map_t exec_map = NULL;
158vm_map_t mb_map = NULL;
159vm_map_t phys_map = NULL;
160
161extern paddr_t avail_start, avail_end;
162extern u_long atdevbase;
163extern int etext,_start;
164
165#ifdef	SYSCALL_DEBUG
166#define	SCDEBUG_ALL 0x0004
167extern int	scdebug;
168#endif
169
170#define IOM_RAM_END	((paddr_t)IOM_RAM_BEGIN + IOM_RAM_SIZE - 1)
171
172/*
173 * Extent maps to manage I/O and ISA memory hole space.  Allocate
174 * storage for 8 regions in each, initially.  Later, ioport_malloc_safe
175 * will indicate that it's safe to use malloc() to dynamically allocate
176 * region descriptors.
177 *
178 * N.B. At least two regions are _always_ allocated from the iomem
179 * extent map; (0 -> ISA hole) and (end of ISA hole -> end of RAM).
180 *
181 * The extent maps are not static!  Machine-dependent ISA and EISA
182 * routines need access to them for bus address space allocation.
183 */
184static	long iomem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(8) / sizeof(long)];
185struct	extent *ioport_ex;
186struct	extent *iomem_ex;
187static	int ioport_malloc_safe;
188
189void	setup_bootinfo __P((void));
190void	dumpsys __P((void));
191void	identifycpu __P((void));
192void	initSH3 __P((vaddr_t));
193void	InitializeSci  __P((unsigned char));
194void	Send16550 __P((int c));
195void	Init16550 __P((void));
196void	sh3_cache_on __P((void));
197void	LoadAndReset __P((char *osimage));
198void	XLoadAndReset __P((char *osimage));
199void	Sh3Reset __P((void));
200
201#include <dev/ic/comreg.h>
202#include <dev/ic/comvar.h>
203
204void	consinit __P((void));
205
206#ifdef COMPAT_NOMID
207static int exec_nomid	__P((struct proc *, struct exec_package *));
208#endif
209
210
211
212/*
213 * Machine-dependent startup code
214 *
215 * This is called from main() in kern/main.c.
216 */
217void
218cpu_startup()
219{
220	unsigned i;
221	caddr_t v;
222	int sz;
223	int base, residual;
224	vaddr_t minaddr, maxaddr;
225	vsize_t size;
226	struct pcb *pcb;
227	char pbuf[9];
228	/* int x; */
229
230	printf(version);
231
232	sprintf(cpu_model, "Hitachi SH3");
233
234	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
235	printf("total memory = %s\n", pbuf);
236
237	/*
238	 * Find out how much space we need, allocate it,
239	 * and then give everything true virtual addresses.
240	 */
241	sz = (int)allocsys(NULL, NULL);
242	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
243		panic("startup: no room for tables");
244	if (allocsys(v, NULL) - v != sz)
245		panic("startup: table size inconsistency");
246
247	/*
248	 * Now allocate buffers proper.  They are different than the above
249	 * in that they usually occupy more virtual memory than physical.
250	 */
251	size = MAXBSIZE * nbuf;
252	buffers = 0;
253	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
254		    NULL, UVM_UNKNOWN_OFFSET,
255		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
256				UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
257		panic("cpu_startup: cannot allocate VM for buffers");
258	minaddr = (vaddr_t)buffers;
259
260	if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
261		/* don't want to alloc more physical mem than needed */
262		bufpages = btoc(MAXBSIZE) * nbuf;
263	}
264
265	base = bufpages / nbuf;
266	residual = bufpages % nbuf;
267
268	for (i = 0; i < nbuf; i++) {
269		vsize_t curbufsize;
270		vaddr_t curbuf;
271		struct vm_page *pg;
272
273		/*
274		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
275		 * that MAXBSIZE space, we allocate and map (base+1) pages
276		 * for the first "residual" buffers, and then we allocate
277		 * "base" pages for the rest.
278		 */
279		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
280		curbufsize = CLBYTES * ((i < residual) ? (base+1) : base);
281
282		while (curbufsize) {
283			/*
284			 * Attempt to allocate buffers from the first
285			 * 16M of RAM to avoid bouncing file system
286			 * transfers.
287			 */
288			pg = uvm_pagealloc(NULL, 0, NULL, 0);
289			if (pg == NULL)
290				panic("cpu_startup: not enough memory for "
291				    "buffer cache");
292			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
293					VM_PROT_READ|VM_PROT_WRITE);
294			curbuf += PAGE_SIZE;
295			curbufsize -= PAGE_SIZE;
296		}
297	}
298
299	/*
300	 * Allocate a submap for exec arguments.  This map effectively
301	 * limits the number of processes exec'ing at any time.
302	 */
303	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
304				   16*NCARGS, TRUE, FALSE, NULL);
305
306	/*
307	 * Allocate a submap for physio
308	 */
309	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
310				   VM_PHYS_SIZE, TRUE, FALSE, NULL);
311
312	/*
313	 * Finally, allocate mbuf cluster submap.
314	 */
315	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
316	    VM_MBUF_SIZE, FALSE, FALSE, NULL);
317
318	/*
319	 * Initialize callouts
320	 */
321	callfree = callout;
322	for (i = 1; i < ncallout; i++)
323		callout[i-1].c_next = &callout[i];
324
325	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
326	printf("avail memory = %s\n", pbuf);
327	format_bytes(pbuf, sizeof(pbuf), bufpages * CLBYTES);
328	printf("using %d buffers containing %s of memory\n", nbuf, pbuf);
329
330	/*
331	 * Set up buffers, so they can be used to read disk labels.
332	 */
333	bufinit();
334
335	/* Safe for i/o port allocation to use malloc now. */
336	ioport_malloc_safe = 1;
337
338	curpcb = pcb = &proc0.p_addr->u_pcb;
339	pcb->r15 = (int)proc0.p_addr + USPACE - 16;
340
341	proc0.p_md.md_regs = (struct trapframe *)pcb->r15 - 1;
342
343#ifdef SYSCALL_DEBUG
344	scdebug |= SCDEBUG_ALL;
345#endif
346
347#if 0
348	boothowto |= RB_SINGLE;
349#endif
350}
351
352/*
353 * Info for CTL_HW
354 */
355extern	char version[];
356
357#define CPUDEBUG
358
359/*
360 * machine dependent system variables.
361 */
362int
363cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
364	int *name;
365	u_int namelen;
366	void *oldp;
367	size_t *oldlenp;
368	void *newp;
369	size_t newlen;
370	struct proc *p;
371{
372	dev_t consdev;
373	struct btinfo_bootpath *bibp;
374	struct trapframe *tf;
375	char *osimage;
376
377	/* all sysctl names at this level are terminal */
378	if (namelen != 1)
379		return (ENOTDIR);		/* overloaded */
380
381	switch (name[0]) {
382	case CPU_CONSDEV:
383		if (cn_tab != NULL)
384			consdev = cn_tab->cn_dev;
385		else
386			consdev = NODEV;
387		return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
388		    sizeof consdev));
389
390	case CPU_NKPDE:
391		return (sysctl_rdint(oldp, oldlenp, newp, nkpde));
392
393	case CPU_BOOTED_KERNEL:
394	        bibp = lookup_bootinfo(BTINFO_BOOTPATH);
395	        if (!bibp)
396			return (ENOENT); /* ??? */
397		return (sysctl_rdstring(oldp, oldlenp, newp, bibp->bootpath));
398
399	case CPU_SETPRIVPROC:
400		if (newp == NULL)
401			return (0);
402
403		/* set current process to priviledged process */
404		tf = p->p_md.md_regs;
405		tf->tf_ssr |= PSL_MD;
406		return (0);
407
408	case CPU_DEBUGMODE:
409		return (sysctl_int(oldp, oldlenp, newp, newlen,
410				   &cpu_debug_mode));
411
412	case CPU_LOADANDRESET:
413		if (newp != NULL) {
414			osimage = (char *)(*(u_long *)newp);
415
416			LoadAndReset(osimage);
417			/* not reach here */
418		}
419		return (0);
420
421	default:
422		return (EOPNOTSUPP);
423	}
424	/* NOTREACHED */
425}
426
427/*
428 * Send an interrupt to process.
429 *
430 * Stack is set up to allow sigcode stored
431 * in u. to call routine, followed by kcall
432 * to sigreturn routine below.  After sigreturn
433 * resets the signal mask, the stack, and the
434 * frame pointer, it returns to the user
435 * specified pc, psl.
436 */
437void
438sendsig(catcher, sig, mask, code)
439	sig_t catcher;
440	int sig;
441	sigset_t *mask;
442	u_long code;
443{
444	struct proc *p = curproc;
445	struct trapframe *tf;
446	struct sigframe *fp, frame;
447	struct sigacts *psp = p->p_sigacts;
448	int onstack;
449
450	tf = p->p_md.md_regs;
451
452	/* Do we need to jump onto the signal stack? */
453	onstack =
454	    (psp->ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
455	    (psp->ps_sigact[sig].sa_flags & SA_ONSTACK) != 0;
456
457	/* Allocate space for the signal handler context. */
458	if (onstack)
459		fp = (struct sigframe *)((caddr_t)psp->ps_sigstk.ss_sp +
460						  psp->ps_sigstk.ss_size);
461	else
462		fp = (struct sigframe *)tf->tf_r15;
463	fp--;
464
465	/* Build stack frame for signal trampoline. */
466	frame.sf_signum = sig;
467	frame.sf_code = code;
468	frame.sf_scp = &fp->sf_sc;
469	frame.sf_handler = catcher;
470
471	/* Save register context. */
472	frame.sf_sc.sc_ssr = tf->tf_ssr;
473	frame.sf_sc.sc_spc = tf->tf_spc;
474	frame.sf_sc.sc_pr = tf->tf_pr;
475	frame.sf_sc.sc_r15 = tf->tf_r15;
476	frame.sf_sc.sc_r14 = tf->tf_r14;
477	frame.sf_sc.sc_r13 = tf->tf_r13;
478	frame.sf_sc.sc_r12 = tf->tf_r12;
479	frame.sf_sc.sc_r11 = tf->tf_r11;
480	frame.sf_sc.sc_r10 = tf->tf_r10;
481	frame.sf_sc.sc_r9 = tf->tf_r9;
482	frame.sf_sc.sc_r8 = tf->tf_r8;
483	frame.sf_sc.sc_r7 = tf->tf_r7;
484	frame.sf_sc.sc_r6 = tf->tf_r6;
485	frame.sf_sc.sc_r5 = tf->tf_r5;
486	frame.sf_sc.sc_r4 = tf->tf_r4;
487	frame.sf_sc.sc_r3 = tf->tf_r3;
488	frame.sf_sc.sc_r2 = tf->tf_r2;
489	frame.sf_sc.sc_r1 = tf->tf_r1;
490	frame.sf_sc.sc_r0 = tf->tf_r0;
491	frame.sf_sc.sc_trapno = tf->tf_trapno;
492#ifdef TODO
493	frame.sf_sc.sc_err = tf->tf_err;
494#endif
495
496	/* Save signal stack. */
497	frame.sf_sc.sc_onstack = psp->ps_sigstk.ss_flags & SS_ONSTACK;
498
499	/* Save signal mask. */
500	frame.sf_sc.sc_mask = *mask;
501
502#ifdef COMPAT_13
503	/*
504	 * XXX We always have to save an old style signal mask because
505	 * XXX we might be delivering a signal to a process which will
506	 * XXX escape from the signal in a non-standard way and invoke
507	 * XXX sigreturn() directly.
508	 */
509	native_sigset_to_sigset13(mask, &frame.sf_sc.__sc_mask13);
510#endif
511
512	if (copyout(&frame, fp, sizeof(frame)) != 0) {
513		/*
514		 * Process has trashed its stack; give it an illegal
515		 * instruction to halt it in its tracks.
516		 */
517		sigexit(p, SIGILL);
518		/* NOTREACHED */
519	}
520
521	/*
522	 * Build context to run handler in.
523	 */
524	tf->tf_spc = (int)psp->ps_sigcode;
525#ifdef TODO
526	tf->tf_ssr &= ~(PSL_T|PSL_VM|PSL_AC);
527#endif
528	tf->tf_r15 = (int)fp;
529
530	/* Remember that we're now on the signal stack. */
531	if (onstack)
532		psp->ps_sigstk.ss_flags |= SS_ONSTACK;
533}
534
535/*
536 * System call to cleanup state after a signal
537 * has been taken.  Reset signal mask and
538 * stack state from context left by sendsig (above).
539 * Return to previous pc and psl as specified by
540 * context left by sendsig. Check carefully to
541 * make sure that the user has not modified the
542 * psl to gain improper privileges or to cause
543 * a machine fault.
544 */
545int
546sys___sigreturn14(p, v, retval)
547	struct proc *p;
548	void *v;
549	register_t *retval;
550{
551	struct sys___sigreturn14_args /* {
552		syscallarg(struct sigcontext *) sigcntxp;
553	} */ *uap = v;
554	struct sigcontext *scp, context;
555	struct trapframe *tf;
556
557	/*
558	 * The trampoline code hands us the context.
559	 * It is unsafe to keep track of it ourselves, in the event that a
560	 * program jumps out of a signal handler.
561	 */
562	scp = SCARG(uap, sigcntxp);
563	if (copyin((caddr_t)scp, &context, sizeof(*scp)) != 0)
564		return (EFAULT);
565
566	/* Restore signal context. */
567	tf = p->p_md.md_regs;
568	{
569		/*
570		 * Check for security violations.  If we're returning to
571		 * protected mode, the CPU will validate the segment registers
572		 * automatically and generate a trap on violations.  We handle
573		 * the trap, rather than doing all of the checking here.
574		 */
575#ifdef TODO
576	  if (((context.sc_ssr ^ tf->tf_ssr) & PSL_USERSTATIC) != 0) {
577	    return (EINVAL);
578	  }
579#endif
580
581	  tf->tf_ssr = context.sc_ssr;
582	}
583	tf->tf_r0 = context.sc_r0;
584	tf->tf_r1 = context.sc_r1;
585	tf->tf_r2 = context.sc_r2;
586	tf->tf_r3 = context.sc_r3;
587	tf->tf_r4 = context.sc_r4;
588	tf->tf_r5 = context.sc_r5;
589	tf->tf_r6 = context.sc_r6;
590	tf->tf_r7 = context.sc_r7;
591	tf->tf_r8 = context.sc_r8;
592	tf->tf_r9 = context.sc_r9;
593	tf->tf_r10 = context.sc_r10;
594	tf->tf_r11 = context.sc_r11;
595	tf->tf_r12 = context.sc_r12;
596	tf->tf_r13 = context.sc_r13;
597	tf->tf_r14 = context.sc_r14;
598	tf->tf_spc = context.sc_spc;
599	tf->tf_r15 = context.sc_r15;
600	tf->tf_pr = context.sc_pr;
601
602	/* Restore signal stack. */
603	if (context.sc_onstack & SS_ONSTACK)
604		p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK;
605	else
606		p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK;
607	/* Restore signal mask. */
608	(void) sigprocmask1(p, SIG_SETMASK, &context.sc_mask, 0);
609
610	return (EJUSTRETURN);
611}
612
613int	waittime = -1;
614struct pcb dumppcb;
615
616void
617cpu_reboot(howto, bootstr)
618	int howto;
619	char *bootstr;
620{
621	extern int cold;
622
623	if (cold) {
624		howto |= RB_HALT;
625		goto haltsys;
626	}
627
628	boothowto = howto;
629	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
630		waittime = 0;
631		vfs_shutdown();
632		/*
633		 * If we've been adjusting the clock, the todr
634		 * will be out of synch; adjust it now.
635		 */
636		/* resettodr(); */
637	}
638
639	/* Disable interrupts. */
640	splhigh();
641
642	/* Do a dump if requested. */
643	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
644		dumpsys();
645
646haltsys:
647	doshutdownhooks();
648
649	if (howto & RB_HALT) {
650		printf("\n");
651		printf("The operating system has halted.\n");
652		printf("Please press any key to reboot.\n\n");
653		cngetc();
654	}
655
656	printf("rebooting...\n");
657	cpu_reset();
658	for(;;)
659		;
660	/*NOTREACHED*/
661}
662
663/*
664 * These variables are needed by /sbin/savecore
665 */
666u_long	dumpmag = 0x8fca0101;	/* magic number */
667int 	dumpsize = 0;		/* pages */
668long	dumplo = 0; 		/* blocks */
669
670/*
671 * This is called by main to set dumplo and dumpsize.
672 * Dumps always skip the first CLBYTES of disk space
673 * in case there might be a disk label stored there.
674 * If there is extra space, put dump at the end to
675 * reduce the chance that swapping trashes it.
676 */
677void
678cpu_dumpconf()
679{
680#ifdef	TODO
681	int nblks;	/* size of dump area */
682	int maj;
683
684	if (dumpdev == NODEV)
685		return;
686	maj = major(dumpdev);
687	if (maj < 0 || maj >= nblkdev)
688		panic("dumpconf: bad dumpdev=0x%x", dumpdev);
689	if (bdevsw[maj].d_psize == NULL)
690		return;
691	nblks = (*bdevsw[maj].d_psize)(dumpdev);
692	if (nblks <= ctod(1))
693		return;
694
695	dumpsize = btoc(IOM_END + ctob(dumpmem_high));
696
697	/* Always skip the first CLBYTES, in case there is a label there. */
698	if (dumplo < ctod(1))
699		dumplo = ctod(1);
700
701	/* Put dump at end of partition, and make it fit. */
702	if (dumpsize > dtoc(nblks - dumplo))
703		dumpsize = dtoc(nblks - dumplo);
704	if (dumplo < nblks - ctod(dumpsize))
705		dumplo = nblks - ctod(dumpsize);
706#endif
707}
708
709/*
710 * Doadump comes here after turning off memory management and
711 * getting on the dump stack, either when called above, or by
712 * the auto-restart code.
713 */
714#define BYTES_PER_DUMP  NBPG	/* must be a multiple of pagesize XXX small */
715static vaddr_t dumpspace;
716
717vaddr_t
718reserve_dumppages(p)
719	vaddr_t p;
720{
721
722	dumpspace = p;
723	return (p + BYTES_PER_DUMP);
724}
725
726void
727dumpsys()
728{
729#ifdef	TODO
730	unsigned bytes, i, n;
731	int maddr, psize;
732	daddr_t blkno;
733	int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
734	int error;
735
736	/* Save registers. */
737	savectx(&dumppcb);
738
739	msgbufmapped = 0;	/* don't record dump msgs in msgbuf */
740	if (dumpdev == NODEV)
741		return;
742
743	/*
744	 * For dumps during autoconfiguration,
745	 * if dump device has already configured...
746	 */
747	if (dumpsize == 0)
748		cpu_dumpconf();
749	if (dumplo < 0)
750		return;
751	printf("\ndumping to dev %x, offset %ld\n", dumpdev, dumplo);
752
753	psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
754	printf("dump ");
755	if (psize == -1) {
756		printf("area unavailable\n");
757		return;
758	}
759
760#if 0	/* XXX this doesn't work.  grr. */
761        /* toss any characters present prior to dump */
762	while (sget() != NULL); /*syscons and pccons differ */
763#endif
764
765	bytes = ctob(dumpmem_high) + IOM_END;
766	maddr = 0;
767	blkno = dumplo;
768	dump = bdevsw[major(dumpdev)].d_dump;
769	error = 0;
770	for (i = 0; i < bytes; i += n) {
771		/*
772		 * Avoid dumping the ISA memory hole, and areas that
773		 * BIOS claims aren't in low memory.
774		 */
775		if (i >= ctob(dumpmem_low) && i < IOM_END) {
776			n = IOM_END - i;
777			maddr += n;
778			blkno += btodb(n);
779			continue;
780		}
781
782		/* Print out how many MBs we to go. */
783		n = bytes - i;
784		if (n && (n % (1024*1024)) == 0)
785			printf("%d ", n / (1024 * 1024));
786
787		/* Limit size for next transfer. */
788		if (n > BYTES_PER_DUMP)
789			n =  BYTES_PER_DUMP;
790
791		(void) pmap_map(dumpspace, maddr, maddr + n, VM_PROT_READ);
792		error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, n);
793		if (error)
794			break;
795		maddr += n;
796		blkno += btodb(n);			/* XXX? */
797
798#if 0	/* XXX this doesn't work.  grr. */
799		/* operator aborting dump? */
800		if (sget() != NULL) {
801			error = EINTR;
802			break;
803		}
804#endif
805	}
806
807	switch (error) {
808
809	case ENXIO:
810		printf("device bad\n");
811		break;
812
813	case EFAULT:
814		printf("device not ready\n");
815		break;
816
817	case EINVAL:
818		printf("area improper\n");
819		break;
820
821	case EIO:
822		printf("i/o error\n");
823		break;
824
825	case EINTR:
826		printf("aborted from console\n");
827		break;
828
829	case 0:
830		printf("succeeded\n");
831		break;
832
833	default:
834		printf("error %d\n", error);
835		break;
836	}
837	printf("\n\n");
838	delay(5000000);		/* 5 seconds */
839#endif	/* TODO */
840}
841
842/*
843 * Clear registers on exec
844 */
845void
846setregs(p, pack, stack)
847	struct proc *p;
848	struct exec_package *pack;
849	u_long stack;
850{
851	register struct pcb *pcb = &p->p_addr->u_pcb;
852	register struct trapframe *tf;
853
854	p->p_md.md_flags &= ~MDP_USEDFPU;
855	pcb->pcb_flags = 0;
856
857	tf = p->p_md.md_regs;
858
859	tf->tf_r0 = 0;
860	tf->tf_r1 = 0;
861	tf->tf_r2 = 0;
862	tf->tf_r3 = 0;
863	tf->tf_r4 = *(int *)stack;	/* argc */
864	tf->tf_r5 = stack+4;		/* argv */
865	tf->tf_r6 = stack+4*tf->tf_r4 + 8; /* envp */
866	tf->tf_r7 = 0;
867	tf->tf_r8 = 0;
868	tf->tf_r9 = 0;
869	tf->tf_r10 = 0;
870	tf->tf_r11 = 0;
871	tf->tf_r12 = 0;
872	tf->tf_r13 = 0;
873	tf->tf_r14 = 0;
874	tf->tf_spc = pack->ep_entry;
875	tf->tf_ssr = PSL_USERSET;
876	tf->tf_r15 = stack;
877#ifdef TODO
878	tf->tf_ebx = (int)PS_STRINGS;
879#endif
880}
881
882/*
883 * Initialize segments and descriptor tables
884 */
885
886extern  struct user *proc0paddr;
887
888void
889initSH3(first_avail)
890	vaddr_t first_avail;
891{
892	unsigned short *p;
893	unsigned short sum;
894	int	size;
895	extern void consinit __P((void));
896
897	proc0.p_addr = proc0paddr; /* page dir address */
898
899	/*
900	 * Initialize the I/O port and I/O mem extent maps.
901	 * Note: we don't have to check the return value since
902	 * creation of a fixed extent map will never fail (since
903	 * descriptor storage has already been allocated).
904	 *
905	 * N.B. The iomem extent manages _all_ physical addresses
906	 * on the machine.  When the amount of RAM is found, the two
907	 * extents of RAM are allocated from the map (0 -> ISA hole
908	 * and end of ISA hole -> end of RAM).
909	 */
910	iomem_ex = extent_create("iomem", 0x0, 0xffffffff, M_DEVBUF,
911	    (caddr_t)iomem_ex_storage, sizeof(iomem_ex_storage),
912	    EX_NOCOALESCE|EX_NOWAIT);
913
914#if 0	/* XXX (msaitoh) */
915	consinit();	/* XXX SHOULD NOT BE DONE HERE */
916#endif
917
918	splraise(-1);
919	enable_intr();
920
921	avail_end = sh3_trunc_page(IOM_RAM_END + 1);
922
923#if 0	/* XXX (msaitoh) */
924	printf("initSH3\r\n");
925#endif
926
927	/*
928	 * Calculate check sum
929	 */
930	size = (char *)&etext - (char *)&_start;
931	p = (unsigned short *)&_start;
932	sum = 0;
933	size >>= 1;
934	while (size--)
935		sum += *p++;
936#if 0
937	printf("Check Sum = 0x%x", sum);
938#endif
939
940	/*
941	 * Allocate the physical addresses used by RAM from the iomem
942	 * extent map.  This is done before the addresses are
943	 * page rounded just to make sure we get them all.
944	 */
945	if (extent_alloc_region(iomem_ex, IOM_RAM_BEGIN,
946				IOM_RAM_SIZE,
947				EX_NOWAIT)) {
948		/* XXX What should we do? */
949#if 1
950		printf("WARNING: CAN'T ALLOCATE RAM MEMORY FROM IOMEM EXTENT MAP!\n");
951#endif
952	}
953
954#if 0 /* avail_start is set in locore.s to first available page rounded
955	 physical mem */
956	avail_start = IOM_RAM_BEGIN + NBPG;
957#endif
958
959	/* number of pages of physmem addr space */
960	physmem = btoc(IOM_RAM_SIZE);
961#ifdef	TODO
962	dumpmem = physmem;
963#endif
964
965	/*
966	 * Initialize for pmap_free_pages and pmap_next_page.
967	 * These guys should be page-aligned.
968	 */
969	if (physmem < btoc(2 * 1024 * 1024)) {
970		printf("warning: too little memory available; "
971		       "have %d bytes, want %d bytes\n"
972		       "running in degraded mode\n"
973		       "press a key to confirm\n\n",
974		       ctob(physmem), 2*1024*1024);
975		cngetc();
976	}
977
978	/* Call pmap initialization to make new kernel address space */
979	pmap_bootstrap((vaddr_t)atdevbase);
980
981	/*
982	 * Initialize error message buffer (at end of core).
983	 */
984	initmsgbuf((caddr_t)msgbuf_paddr, round_page(MSGBUFSIZE));
985
986	/*
987	 * set boot device information
988	 */
989	setup_bootinfo();
990
991#if 0
992	sh3_cache_on();
993#endif
994
995}
996
997struct queue {
998	struct queue *q_next, *q_prev;
999};
1000
1001/*
1002 * insert an element into a queue
1003 */
1004void
1005_insque(v1, v2)
1006	void *v1;
1007	void *v2;
1008{
1009	struct queue *elem = v1, *head = v2;
1010	struct queue *next;
1011
1012	next = head->q_next;
1013	elem->q_next = next;
1014	head->q_next = elem;
1015	elem->q_prev = head;
1016	next->q_prev = elem;
1017}
1018
1019/*
1020 * remove an element from a queue
1021 */
1022void
1023_remque(v)
1024	void *v;
1025{
1026	struct queue *elem = v;
1027	struct queue *next, *prev;
1028
1029	next = elem->q_next;
1030	prev = elem->q_prev;
1031	next->q_prev = prev;
1032	prev->q_next = next;
1033	elem->q_prev = 0;
1034}
1035
1036#ifdef COMPAT_NOMID
1037static int
1038exec_nomid(p, epp)
1039	struct proc *p;
1040	struct exec_package *epp;
1041{
1042	int error;
1043	u_long midmag, magic;
1044	u_short mid;
1045	struct exec *execp = epp->ep_hdr;
1046
1047	/* check on validity of epp->ep_hdr performed by exec_out_makecmds */
1048
1049	midmag = ntohl(execp->a_midmag);
1050	mid = (midmag >> 16) & 0xffff;
1051	magic = midmag & 0xffff;
1052
1053	if (magic == 0) {
1054		magic = (execp->a_midmag & 0xffff);
1055		mid = MID_ZERO;
1056	}
1057
1058	midmag = mid << 16 | magic;
1059
1060	switch (midmag) {
1061	case (MID_ZERO << 16) | ZMAGIC:
1062		/*
1063		 * 386BSD's ZMAGIC format:
1064		 */
1065		error = exec_aout_prep_oldzmagic(p, epp);
1066		break;
1067
1068	case (MID_ZERO << 16) | QMAGIC:
1069		/*
1070		 * BSDI's QMAGIC format:
1071		 * same as new ZMAGIC format, but with different magic number
1072		 */
1073		error = exec_aout_prep_zmagic(p, epp);
1074		break;
1075
1076	case (MID_ZERO << 16) | NMAGIC:
1077		/*
1078		 * BSDI's NMAGIC format:
1079		 * same as NMAGIC format, but with different magic number
1080		 * and with text starting at 0.
1081		 */
1082		error = exec_aout_prep_oldnmagic(p, epp);
1083		break;
1084
1085	case (MID_ZERO << 16) | OMAGIC:
1086		/*
1087		 * BSDI's OMAGIC format:
1088		 * same as OMAGIC format, but with different magic number
1089		 * and with text starting at 0.
1090		 */
1091		error = exec_aout_prep_oldomagic(p, epp);
1092		break;
1093
1094	default:
1095		error = ENOEXEC;
1096	}
1097
1098	return error;
1099}
1100#endif
1101
1102/*
1103 * cpu_exec_aout_makecmds():
1104 *	cpu-dependent a.out format hook for execve().
1105 *
1106 * Determine of the given exec package refers to something which we
1107 * understand and, if so, set up the vmcmds for it.
1108 *
1109 * On the i386, old (386bsd) ZMAGIC binaries and BSDI QMAGIC binaries
1110 * if COMPAT_NOMID is given as a kernel option.
1111 */
1112int
1113cpu_exec_aout_makecmds(p, epp)
1114	struct proc *p;
1115	struct exec_package *epp;
1116{
1117	int error = ENOEXEC;
1118
1119#ifdef COMPAT_NOMID
1120	if ((error = exec_nomid(p, epp)) == 0)
1121		return error;
1122#endif /* ! COMPAT_NOMID */
1123
1124	return error;
1125}
1126
1127void
1128setup_bootinfo(void)
1129{
1130	struct btinfo_bootdisk *help;
1131
1132	*(int *)bootinfo = 1;
1133	help = (struct btinfo_bootdisk *)(bootinfo + sizeof(int));
1134	help->biosdev = 0;
1135	help->partition = 0;
1136	((struct btinfo_common *)help)->len = sizeof(struct btinfo_bootdisk);
1137	((struct btinfo_common *)help)->type = BTINFO_BOOTDISK;
1138}
1139
1140void *
1141lookup_bootinfo(type)
1142	int type;
1143{
1144	struct btinfo_common *help;
1145	int n = *(int*)bootinfo;
1146	help = (struct btinfo_common *)(bootinfo + sizeof(int));
1147	while (n--) {
1148		if (help->type == type)
1149			return (help);
1150		help = (struct btinfo_common *)((char*)help + help->len);
1151	}
1152	return (0);
1153}
1154
1155
1156/*
1157 * consinit:
1158 * initialize the system console.
1159 * XXX - shouldn't deal with this initted thing, but then,
1160 * it shouldn't be called from init386 either.
1161 */
1162void
1163consinit()
1164{
1165	static int initted;
1166
1167	if (initted)
1168		return;
1169	initted = 1;
1170
1171	cninit();
1172
1173#ifdef DDB
1174	ddb_init();
1175#endif
1176}
1177
1178void
1179cpu_reset()
1180{
1181
1182	disable_intr();
1183
1184	Sh3Reset();
1185	for (;;)
1186		;
1187}
1188
1189int
1190bus_space_map (t, addr, size, flags, bshp)
1191	bus_space_tag_t t;
1192	bus_addr_t addr;
1193	bus_size_t size;
1194	int flags;
1195	bus_space_handle_t *bshp;
1196{
1197
1198	*bshp = (bus_space_handle_t)addr;
1199
1200	return 0;
1201}
1202
1203int
1204sh_memio_subregion(t, bsh, offset, size, nbshp)
1205	bus_space_tag_t t;
1206	bus_space_handle_t bsh;
1207	bus_size_t offset, size;
1208	bus_space_handle_t *nbshp;
1209{
1210
1211	*nbshp = bsh + offset;
1212	return (0);
1213}
1214
1215int
1216sh_memio_alloc(t, rstart, rend, size, alignment, boundary, flags,
1217	       bpap, bshp)
1218	bus_space_tag_t t;
1219	bus_addr_t rstart, rend;
1220	bus_size_t size, alignment, boundary;
1221	int flags;
1222	bus_addr_t *bpap;
1223	bus_space_handle_t *bshp;
1224{
1225	*bshp = *bpap = rstart;
1226
1227	return (0);
1228}
1229
1230void
1231sh_memio_free(t, bsh, size)
1232	bus_space_tag_t t;
1233	bus_space_handle_t bsh;
1234	bus_size_t size;
1235{
1236
1237}
1238
1239void
1240sh_memio_unmap(t, bsh, size)
1241	bus_space_tag_t t;
1242	bus_space_handle_t bsh;
1243	bus_size_t size;
1244{
1245	return;
1246}
1247
1248/*
1249 * InitializeBsc
1250 * : BSC(Bus State Controler)
1251 */
1252void InitializeBsc __P((void));
1253
1254void
1255InitializeBsc()
1256{
1257
1258	/*
1259	 * Drive RAS,CAS in stand by mode and bus release mode
1260	 * Area0 = Normal memory, Area5,6=Normal(no burst)
1261	 * Area2 = Normal memory, Area3 = SDRAM, Area5 = Normal memory
1262	 * Area4 = Normal Memory
1263	 * Area6 = Normal memory
1264	 */
1265	SHREG_BSC.BCR1.WORD = BSC_BCR1_VAL;
1266
1267	/*
1268	 * Bus Width
1269	 * Area4: Bus width = 16bit
1270	 * Area6,5 = 16bit
1271	 * Area1 = 8bit
1272	 * Area2,3: Bus width = 32bit
1273	 */
1274	 SHREG_BSC.BCR2.WORD = BSC_BCR2_VAL;
1275
1276	/*
1277	 * Idle cycle number in transition area and read to write
1278	 * Area6 = 3, Area5 = 3, Area4 = 3, Area3 = 3, Area2 = 3
1279	 * Area1 = 3, Area0 = 3
1280	 */
1281	SHREG_BSC.WCR1.WORD = BSC_WCR1_VAL;
1282
1283	/*
1284	 * Wait cycle
1285	 * Area 6 = 6
1286	 * Area 5 = 2
1287	 * Area 4 = 10
1288	 * Area 3 = 3
1289	 * Area 2,1 = 3
1290	 * Area 0 = 6
1291	 */
1292	SHREG_BSC.WCR2.WORD = BSC_WCR2_VAL;
1293
1294#ifdef SH4
1295	SHREG_BSC.WCR3.WORD = BSC_WCR3_VAL;
1296#endif
1297
1298	/*
1299	 * RAS pre-charge = 2cycle, RAS-CAS delay = 3 cycle,
1300	 * write pre-charge=1cycle
1301	 * CAS before RAS refresh RAS assert time = 3 cycle
1302	 * Disable burst, Bus size=32bit, Column Address=10bit, Refresh ON
1303	 * CAS before RAS refresh ON, EDO DRAM
1304	 */
1305	SHREG_BSC.MCR.WORD = BSC_MCR_VAL;
1306
1307#ifdef BSC_SDMR_VAL
1308#if 1
1309#define SDMR	(*(volatile unsigned char  *)BSC_SDMR_VAL)
1310
1311	SDMR = 0;
1312#else
1313#define ADDSET	(*(volatile unsigned short *)0x1A000000)
1314#define ADDRST	(*(volatile unsigned short *)0x18000000)
1315#define SDMR	(*(volatile unsigned char  *)BSC_SDMR_VAL)
1316
1317	ADDSET = 0;
1318	SDMR = 0;
1319	ADDRST = 0;
1320#endif
1321#endif
1322
1323	/*
1324	 * PCMCIA Control Register
1325	 * OE/WE assert delay 3.5 cycle
1326	 * OE/WE negate-address delay 3.5 cycle
1327	 */
1328#ifdef BSC_PCR_VAL
1329	SHREG_BSC.PCR.WORD = 0x00ff;
1330#endif
1331
1332	/*
1333	 * Refresh Timer Control/Status Register
1334	 * Disable interrupt by CMF, closk 1/16, Disable OVF interrupt
1335	 * Count Limit = 1024
1336	 * In following statement, the reason why high byte = 0xa5(a4 in RFCR)
1337	 * is the rule of SH3 in writing these register.
1338	 */
1339	SHREG_BSC.RTCSR.WORD = BSC_RTCSR_VAL;
1340
1341
1342	/*
1343	 * Refresh Timer Counter
1344	 * Initialize to 0
1345	 */
1346	SHREG_BSC.RTCNT = BSC_RTCNT_VAL;
1347
1348	/* set Refresh Time Constant Register */
1349	SHREG_BSC.RTCOR = BSC_RTCOR_VAL;
1350
1351	/* init Refresh Count Register */
1352#ifdef BSC_RFCR_VAL
1353	SHREG_BSC.RFCR = BSC_RFCR_VAL;
1354#endif
1355
1356	/* Set Clock mode (make internal clock double speed) */
1357
1358	SHREG_FRQCR = FRQCR_VAL;
1359
1360#ifndef MMEYE_NO_CACHE
1361	/* Cache ON */
1362	SHREG_CCR = 0x0001;
1363#endif
1364}
1365
1366void
1367sh3_cache_on(void)
1368{
1369#ifndef MMEYE_NO_CACHE
1370	/* Cache ON */
1371	SHREG_CCR = 0x0001;
1372	SHREG_CCR = 0x0009; /* cache clear */
1373	SHREG_CCR = 0x0001; /* cache on */
1374#endif
1375}
1376
1377#include <machine/mmeye.h>
1378void
1379LoadAndReset(char *osimage)
1380{
1381	void *buf_addr;
1382	u_long size;
1383	u_long *src;
1384	u_long *dest;
1385	u_long csum = 0;
1386	u_long csum2 = 0;
1387	u_long size2;
1388#define OSIMAGE_BUF_ADDR 0x8c400000 /* !!!!!! This value depends on physical
1389				       available memory */
1390
1391
1392	printf("LoadAndReset:copy start\n");
1393	buf_addr = (void *)OSIMAGE_BUF_ADDR;
1394
1395	size = *(u_long *)osimage;
1396	src = (u_long *)osimage;
1397	dest = buf_addr;
1398
1399	size = (size + sizeof(u_long)*2 + 3) >> 2 ;
1400	size2 = size;
1401
1402	while (size--){
1403		csum += *src;
1404		*dest++ = *src++;
1405	}
1406
1407	dest = buf_addr;
1408	while (size2--)
1409		csum2 += *dest++;
1410
1411	printf("LoadAndReset:copy end[%lx,%lx]\n", csum, csum2);
1412	printf("start XLoadAndReset\n");
1413
1414	/* mask all externel interrupt (XXX) */
1415
1416	XLoadAndReset(buf_addr);
1417}
1418