machdep.c revision 1.4
1/*	$NetBSD: machdep.c,v 1.4 1999/09/16 22:52:11 msaitoh Exp $	*/
2
3/*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the NetBSD
22 *	Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 *    contributors may be used to endorse or promote products derived
25 *    from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*-
41 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
42 * All rights reserved.
43 *
44 * This code is derived from software contributed to Berkeley by
45 * William Jolitz.
46 *
47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions
49 * are met:
50 * 1. Redistributions of source code must retain the above copyright
51 *    notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright
53 *    notice, this list of conditions and the following disclaimer in the
54 *    documentation and/or other materials provided with the distribution.
55 * 3. All advertising materials mentioning features or use of this software
56 *    must display the following acknowledgement:
57 *	This product includes software developed by the University of
58 *	California, Berkeley and its contributors.
59 * 4. Neither the name of the University nor the names of its contributors
60 *    may be used to endorse or promote products derived from this software
61 *    without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 *	@(#)machdep.c	7.4 (Berkeley) 6/3/91
76 */
77
78#include "opt_compat_netbsd.h"
79#include "opt_ddb.h"
80#include "opt_memsize.h"
81#include "opt_initbsc.h"
82#include "opt_sysv.h"
83
84#include <sys/param.h>
85#include <sys/systm.h>
86#include <sys/signalvar.h>
87#include <sys/kernel.h>
88#include <sys/map.h>
89#include <sys/proc.h>
90#include <sys/user.h>
91#include <sys/exec.h>
92#include <sys/buf.h>
93#include <sys/reboot.h>
94#include <sys/conf.h>
95#include <sys/file.h>
96#include <sys/callout.h>
97#include <sys/malloc.h>
98#include <sys/mbuf.h>
99#include <sys/msgbuf.h>
100#include <sys/mount.h>
101#include <sys/vnode.h>
102#include <sys/device.h>
103#include <sys/extent.h>
104#include <sys/syscallargs.h>
105
106#ifdef KGDB
107#include <sys/kgdb.h>
108#endif
109
110#include <dev/cons.h>
111
112#include <vm/vm.h>
113#include <vm/vm_kern.h>
114#include <vm/vm_page.h>
115
116#include <uvm/uvm_extern.h>
117
118#include <sys/sysctl.h>
119
120#include <machine/cpu.h>
121#include <machine/cpufunc.h>
122#include <machine/psl.h>
123#include <machine/bootinfo.h>
124#include <machine/bus.h>
125#include <sh3/bscreg.h>
126#include <sh3/ccrreg.h>
127#include <sh3/cpgreg.h>
128#include <sh3/intcreg.h>
129#include <sh3/pfcreg.h>
130#include <sh3/wdtreg.h>
131
132#include <sys/termios.h>
133#include "sci.h"
134
135/* the following is used externally (sysctl_hw) */
136char machine[] = MACHINE;		/* cpu "architecture" */
137char machine_arch[] = MACHINE_ARCH;	/* machine_arch = "sh3" */
138
139#ifdef sh3_debug
140int cpu_debug_mode = 1;
141#else
142int cpu_debug_mode = 0;
143#endif
144
145char cpu_model[120];
146
147char bootinfo[BOOTINFO_MAXSIZE];
148
149int physmem;
150int dumpmem_low;
151int dumpmem_high;
152vaddr_t atdevbase;	/* location of start of iomem in virtual */
153paddr_t msgbuf_paddr;
154struct user *proc0paddr;
155
156vm_map_t exec_map = NULL;
157vm_map_t mb_map = NULL;
158vm_map_t phys_map = NULL;
159
160extern int boothowto;
161extern paddr_t avail_start, avail_end;
162
163#ifdef	SYSCALL_DEBUG
164#define	SCDEBUG_ALL 0x0004
165extern int	scdebug;
166#endif
167
168#define IOM_RAM_END	((paddr_t)IOM_RAM_BEGIN + IOM_RAM_SIZE - 1)
169
170/*
171 * Extent maps to manage I/O and ISA memory hole space.  Allocate
172 * storage for 8 regions in each, initially.  Later, ioport_malloc_safe
173 * will indicate that it's safe to use malloc() to dynamically allocate
174 * region descriptors.
175 *
176 * N.B. At least two regions are _always_ allocated from the iomem
177 * extent map; (0 -> ISA hole) and (end of ISA hole -> end of RAM).
178 *
179 * The extent maps are not static!  Machine-dependent ISA and EISA
180 * routines need access to them for bus address space allocation.
181 */
182static	long iomem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(8) / sizeof(long)];
183struct	extent *ioport_ex;
184struct	extent *iomem_ex;
185static	int ioport_malloc_safe;
186
187void setup_bootinfo __P((void));
188void dumpsys __P((void));
189void identifycpu __P((void));
190void initSH3 __P((void *));
191void InitializeSci  __P((unsigned char));
192void sh3_cache_on __P((void));
193void LoadAndReset __P((char *));
194void XLoadAndReset __P((char *));
195void Sh3Reset __P((void));
196
197#include <dev/ic/comreg.h>
198#include <dev/ic/comvar.h>
199
200void	consinit __P((void));
201
202#ifdef COMPAT_NOMID
203static int exec_nomid	__P((struct proc *, struct exec_package *));
204#endif
205
206/*
207 * Machine-dependent startup code
208 *
209 * This is called from main() in kern/main.c.
210 */
211void
212cpu_startup()
213{
214	unsigned i;
215	caddr_t v;
216	int sz;
217	int base, residual;
218	vaddr_t minaddr, maxaddr;
219	vsize_t size;
220	struct pcb *pcb;
221	char pbuf[9];
222
223	printf(version);
224
225	sprintf(cpu_model, "Hitachi SH3");
226
227	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
228	printf("total memory = %s\n", pbuf);
229
230	/*
231	 * Find out how much space we need, allocate it,
232	 * and then give everything true virtual addresses.
233	 */
234	sz = (int)allocsys(NULL, NULL);
235	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
236		panic("startup: no room for tables");
237	if (allocsys(v, NULL) - v != sz)
238		panic("startup: table size inconsistency");
239
240	/*
241	 * Now allocate buffers proper.  They are different than the above
242	 * in that they usually occupy more virtual memory than physical.
243	 */
244	size = MAXBSIZE * nbuf;
245	buffers = 0;
246	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
247		    NULL, UVM_UNKNOWN_OFFSET,
248		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
249				UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
250		panic("cpu_startup: cannot allocate VM for buffers");
251	minaddr = (vaddr_t)buffers;
252	if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
253		/* don't want to alloc more physical mem than needed */
254		bufpages = btoc(MAXBSIZE) * nbuf;
255	}
256
257	base = bufpages / nbuf;
258	residual = bufpages % nbuf;
259	for (i = 0; i < nbuf; i++) {
260		vsize_t curbufsize;
261		vaddr_t curbuf;
262		struct vm_page *pg;
263
264		/*
265		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
266		 * that MAXBSIZE space, we allocate and map (base+1) pages
267		 * for the first "residual" buffers, and then we allocate
268		 * "base" pages for the rest.
269		 */
270		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
271		curbufsize = CLBYTES * ((i < residual) ? (base+1) : base);
272
273		while (curbufsize) {
274			pg = uvm_pagealloc(NULL, 0, NULL, 0);
275			if (pg == NULL)
276				panic("cpu_startup: not enough memory for "
277				    "buffer cache");
278			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
279					VM_PROT_READ|VM_PROT_WRITE);
280			curbuf += PAGE_SIZE;
281			curbufsize -= PAGE_SIZE;
282		}
283	}
284
285	/*
286	 * Allocate a submap for exec arguments.  This map effectively
287	 * limits the number of processes exec'ing at any time.
288	 */
289	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
290				   16*NCARGS, TRUE, FALSE, NULL);
291
292	/*
293	 * Allocate a submap for physio
294	 */
295	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
296				   VM_PHYS_SIZE, TRUE, FALSE, NULL);
297
298	/*
299	 * Finally, allocate mbuf cluster submap.
300	 */
301	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
302	    VM_MBUF_SIZE, FALSE, FALSE, NULL);
303
304	/*
305	 * Initialize callouts
306	 */
307	callfree = callout;
308	for (i = 1; i < ncallout; i++)
309		callout[i-1].c_next = &callout[i];
310
311	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
312	printf("avail memory = %s\n", pbuf);
313	format_bytes(pbuf, sizeof(pbuf), bufpages * CLBYTES);
314	printf("using %d buffers containing %s of memory\n", nbuf, pbuf);
315
316	/*
317	 * Set up buffers, so they can be used to read disk labels.
318	 */
319	bufinit();
320
321	/* Safe for i/o port allocation to use malloc now. */
322	ioport_malloc_safe = 1;
323
324	curpcb = pcb = &proc0.p_addr->u_pcb;
325	pcb->r15 = (int)proc0.p_addr + USPACE - 16;
326
327	proc0.p_md.md_regs = (struct trapframe *)pcb->r15 - 1;
328
329#ifdef SYSCALL_DEBUG
330	scdebug |= SCDEBUG_ALL;
331#endif
332
333#ifdef FORCE_RB_SINGLE
334	boothowto |= RB_SINGLE;
335#endif
336}
337
338/*
339 * Info for CTL_HW
340 */
341extern	char version[];
342
343#define CPUDEBUG
344
345/*
346 * machine dependent system variables.
347 */
348int
349cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
350	int *name;
351	u_int namelen;
352	void *oldp;
353	size_t *oldlenp;
354	void *newp;
355	size_t newlen;
356	struct proc *p;
357{
358	dev_t consdev;
359	struct btinfo_bootpath *bibp;
360	struct trapframe *tf;
361	char *osimage;
362
363	/* all sysctl names at this level are terminal */
364	if (namelen != 1)
365		return (ENOTDIR);		/* overloaded */
366
367	switch (name[0]) {
368	case CPU_CONSDEV:
369		if (cn_tab != NULL)
370			consdev = cn_tab->cn_dev;
371		else
372			consdev = NODEV;
373		return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
374		    sizeof consdev));
375
376	case CPU_NKPDE:
377		return (sysctl_rdint(oldp, oldlenp, newp, nkpde));
378
379	case CPU_BOOTED_KERNEL:
380	        bibp = lookup_bootinfo(BTINFO_BOOTPATH);
381	        if (!bibp)
382			return (ENOENT); /* ??? */
383		return (sysctl_rdstring(oldp, oldlenp, newp, bibp->bootpath));
384
385	case CPU_SETPRIVPROC:
386		if (newp == NULL)
387			return (0);
388
389		/* set current process to priviledged process */
390		tf = p->p_md.md_regs;
391		tf->tf_ssr |= PSL_MD;
392		return (0);
393
394	case CPU_DEBUGMODE:
395		return (sysctl_int(oldp, oldlenp, newp, newlen,
396				   &cpu_debug_mode));
397
398	case CPU_LOADANDRESET:
399		if (newp != NULL) {
400			osimage = (char *)(*(u_long *)newp);
401
402			LoadAndReset(osimage);
403			/* not reach here */
404		}
405		return (0);
406
407	default:
408		return (EOPNOTSUPP);
409	}
410	/* NOTREACHED */
411}
412
413/*
414 * Send an interrupt to process.
415 *
416 * Stack is set up to allow sigcode stored
417 * in u. to call routine, followed by kcall
418 * to sigreturn routine below.  After sigreturn
419 * resets the signal mask, the stack, and the
420 * frame pointer, it returns to the user
421 * specified pc, psl.
422 */
423void
424sendsig(catcher, sig, mask, code)
425	sig_t catcher;
426	int sig;
427	sigset_t *mask;
428	u_long code;
429{
430	struct proc *p = curproc;
431	struct trapframe *tf;
432	struct sigframe *fp, frame;
433	struct sigacts *psp = p->p_sigacts;
434	int onstack;
435
436	tf = p->p_md.md_regs;
437
438	/* Do we need to jump onto the signal stack? */
439	onstack =
440	    (psp->ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
441	    (psp->ps_sigact[sig].sa_flags & SA_ONSTACK) != 0;
442
443	/* Allocate space for the signal handler context. */
444	if (onstack)
445		fp = (struct sigframe *)((caddr_t)psp->ps_sigstk.ss_sp +
446						  psp->ps_sigstk.ss_size);
447	else
448		fp = (struct sigframe *)tf->tf_r15;
449	fp--;
450
451	/* Build stack frame for signal trampoline. */
452	frame.sf_signum = sig;
453	frame.sf_code = code;
454	frame.sf_scp = &fp->sf_sc;
455	frame.sf_handler = catcher;
456
457	/* Save register context. */
458	frame.sf_sc.sc_ssr = tf->tf_ssr;
459	frame.sf_sc.sc_spc = tf->tf_spc;
460	frame.sf_sc.sc_pr = tf->tf_pr;
461	frame.sf_sc.sc_r15 = tf->tf_r15;
462	frame.sf_sc.sc_r14 = tf->tf_r14;
463	frame.sf_sc.sc_r13 = tf->tf_r13;
464	frame.sf_sc.sc_r12 = tf->tf_r12;
465	frame.sf_sc.sc_r11 = tf->tf_r11;
466	frame.sf_sc.sc_r10 = tf->tf_r10;
467	frame.sf_sc.sc_r9 = tf->tf_r9;
468	frame.sf_sc.sc_r8 = tf->tf_r8;
469	frame.sf_sc.sc_r7 = tf->tf_r7;
470	frame.sf_sc.sc_r6 = tf->tf_r6;
471	frame.sf_sc.sc_r5 = tf->tf_r5;
472	frame.sf_sc.sc_r4 = tf->tf_r4;
473	frame.sf_sc.sc_r3 = tf->tf_r3;
474	frame.sf_sc.sc_r2 = tf->tf_r2;
475	frame.sf_sc.sc_r1 = tf->tf_r1;
476	frame.sf_sc.sc_r0 = tf->tf_r0;
477	frame.sf_sc.sc_trapno = tf->tf_trapno;
478#ifdef TODO
479	frame.sf_sc.sc_err = tf->tf_err;
480#endif
481
482	/* Save signal stack. */
483	frame.sf_sc.sc_onstack = psp->ps_sigstk.ss_flags & SS_ONSTACK;
484
485	/* Save signal mask. */
486	frame.sf_sc.sc_mask = *mask;
487
488#ifdef COMPAT_13
489	/*
490	 * XXX We always have to save an old style signal mask because
491	 * XXX we might be delivering a signal to a process which will
492	 * XXX escape from the signal in a non-standard way and invoke
493	 * XXX sigreturn() directly.
494	 */
495	native_sigset_to_sigset13(mask, &frame.sf_sc.__sc_mask13);
496#endif
497
498	if (copyout(&frame, fp, sizeof(frame)) != 0) {
499		/*
500		 * Process has trashed its stack; give it an illegal
501		 * instruction to halt it in its tracks.
502		 */
503		sigexit(p, SIGILL);
504		/* NOTREACHED */
505	}
506
507	/*
508	 * Build context to run handler in.
509	 */
510	tf->tf_spc = (int)psp->ps_sigcode;
511#ifdef TODO
512	tf->tf_ssr &= ~(PSL_T|PSL_VM|PSL_AC);
513#endif
514	tf->tf_r15 = (int)fp;
515
516	/* Remember that we're now on the signal stack. */
517	if (onstack)
518		psp->ps_sigstk.ss_flags |= SS_ONSTACK;
519}
520
521/*
522 * System call to cleanup state after a signal
523 * has been taken.  Reset signal mask and
524 * stack state from context left by sendsig (above).
525 * Return to previous pc and psl as specified by
526 * context left by sendsig. Check carefully to
527 * make sure that the user has not modified the
528 * psl to gain improper privileges or to cause
529 * a machine fault.
530 */
531int
532sys___sigreturn14(p, v, retval)
533	struct proc *p;
534	void *v;
535	register_t *retval;
536{
537	struct sys___sigreturn14_args /* {
538		syscallarg(struct sigcontext *) sigcntxp;
539	} */ *uap = v;
540	struct sigcontext *scp, context;
541	struct trapframe *tf;
542
543	/*
544	 * The trampoline code hands us the context.
545	 * It is unsafe to keep track of it ourselves, in the event that a
546	 * program jumps out of a signal handler.
547	 */
548	scp = SCARG(uap, sigcntxp);
549	if (copyin((caddr_t)scp, &context, sizeof(*scp)) != 0)
550		return (EFAULT);
551
552	/* Restore signal context. */
553	tf = p->p_md.md_regs;
554	{
555		/*
556		 * Check for security violations.  If we're returning to
557		 * protected mode, the CPU will validate the segment registers
558		 * automatically and generate a trap on violations.  We handle
559		 * the trap, rather than doing all of the checking here.
560		 */
561#ifdef TODO
562	  if (((context.sc_ssr ^ tf->tf_ssr) & PSL_USERSTATIC) != 0) {
563	    return (EINVAL);
564	  }
565#endif
566
567	  tf->tf_ssr = context.sc_ssr;
568	}
569	tf->tf_r0 = context.sc_r0;
570	tf->tf_r1 = context.sc_r1;
571	tf->tf_r2 = context.sc_r2;
572	tf->tf_r3 = context.sc_r3;
573	tf->tf_r4 = context.sc_r4;
574	tf->tf_r5 = context.sc_r5;
575	tf->tf_r6 = context.sc_r6;
576	tf->tf_r7 = context.sc_r7;
577	tf->tf_r8 = context.sc_r8;
578	tf->tf_r9 = context.sc_r9;
579	tf->tf_r10 = context.sc_r10;
580	tf->tf_r11 = context.sc_r11;
581	tf->tf_r12 = context.sc_r12;
582	tf->tf_r13 = context.sc_r13;
583	tf->tf_r14 = context.sc_r14;
584	tf->tf_spc = context.sc_spc;
585	tf->tf_r15 = context.sc_r15;
586	tf->tf_pr = context.sc_pr;
587
588	/* Restore signal stack. */
589	if (context.sc_onstack & SS_ONSTACK)
590		p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK;
591	else
592		p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK;
593	/* Restore signal mask. */
594	(void) sigprocmask1(p, SIG_SETMASK, &context.sc_mask, 0);
595
596	return (EJUSTRETURN);
597}
598
599int waittime = -1;
600int cold = 1;
601struct pcb dumppcb;
602
603void
604cpu_reboot(howto, bootstr)
605	int howto;
606	char *bootstr;
607{
608
609	if (cold) {
610		howto |= RB_HALT;
611		goto haltsys;
612	}
613
614	boothowto = howto;
615	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
616		waittime = 0;
617		vfs_shutdown();
618		/*
619		 * If we've been adjusting the clock, the todr
620		 * will be out of synch; adjust it now.
621		 */
622		/* resettodr(); */
623	}
624
625	/* Disable interrupts. */
626	splhigh();
627
628	/* Do a dump if requested. */
629	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
630		dumpsys();
631
632haltsys:
633	doshutdownhooks();
634
635	if (howto & RB_HALT) {
636		printf("\n");
637		printf("The operating system has halted.\n");
638		printf("Please press any key to reboot.\n\n");
639		cngetc();
640	}
641
642	printf("rebooting...\n");
643	cpu_reset();
644	for(;;)
645		;
646	/*NOTREACHED*/
647}
648
649/*
650 * These variables are needed by /sbin/savecore
651 */
652u_long	dumpmag = 0x8fca0101;	/* magic number */
653int 	dumpsize = 0;		/* pages */
654long	dumplo = 0; 		/* blocks */
655
656/*
657 * This is called by main to set dumplo and dumpsize.
658 * Dumps always skip the first CLBYTES of disk space
659 * in case there might be a disk label stored there.
660 * If there is extra space, put dump at the end to
661 * reduce the chance that swapping trashes it.
662 */
663void
664cpu_dumpconf()
665{
666#ifdef	TODO
667	int nblks;	/* size of dump area */
668	int maj;
669
670	if (dumpdev == NODEV)
671		return;
672	maj = major(dumpdev);
673	if (maj < 0 || maj >= nblkdev)
674		panic("dumpconf: bad dumpdev=0x%x", dumpdev);
675	if (bdevsw[maj].d_psize == NULL)
676		return;
677	nblks = (*bdevsw[maj].d_psize)(dumpdev);
678	if (nblks <= ctod(1))
679		return;
680
681	dumpsize = btoc(IOM_END + ctob(dumpmem_high));
682
683	/* Always skip the first CLBYTES, in case there is a label there. */
684	if (dumplo < ctod(1))
685		dumplo = ctod(1);
686
687	/* Put dump at end of partition, and make it fit. */
688	if (dumpsize > dtoc(nblks - dumplo))
689		dumpsize = dtoc(nblks - dumplo);
690	if (dumplo < nblks - ctod(dumpsize))
691		dumplo = nblks - ctod(dumpsize);
692#endif
693}
694
695/*
696 * Doadump comes here after turning off memory management and
697 * getting on the dump stack, either when called above, or by
698 * the auto-restart code.
699 */
700#define BYTES_PER_DUMP  NBPG	/* must be a multiple of pagesize XXX small */
701static vaddr_t dumpspace;
702
703vaddr_t
704reserve_dumppages(p)
705	vaddr_t p;
706{
707
708	dumpspace = p;
709	return (p + BYTES_PER_DUMP);
710}
711
712void
713dumpsys()
714{
715#ifdef	TODO
716	unsigned bytes, i, n;
717	int maddr, psize;
718	daddr_t blkno;
719	int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
720	int error;
721
722	/* Save registers. */
723	savectx(&dumppcb);
724
725	msgbufmapped = 0;	/* don't record dump msgs in msgbuf */
726	if (dumpdev == NODEV)
727		return;
728
729	/*
730	 * For dumps during autoconfiguration,
731	 * if dump device has already configured...
732	 */
733	if (dumpsize == 0)
734		cpu_dumpconf();
735	if (dumplo < 0)
736		return;
737	printf("\ndumping to dev %x, offset %ld\n", dumpdev, dumplo);
738
739	psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
740	printf("dump ");
741	if (psize == -1) {
742		printf("area unavailable\n");
743		return;
744	}
745
746#if 0	/* XXX this doesn't work.  grr. */
747        /* toss any characters present prior to dump */
748	while (sget() != NULL); /*syscons and pccons differ */
749#endif
750
751	bytes = ctob(dumpmem_high) + IOM_END;
752	maddr = 0;
753	blkno = dumplo;
754	dump = bdevsw[major(dumpdev)].d_dump;
755	error = 0;
756	for (i = 0; i < bytes; i += n) {
757		/*
758		 * Avoid dumping the ISA memory hole, and areas that
759		 * BIOS claims aren't in low memory.
760		 */
761		if (i >= ctob(dumpmem_low) && i < IOM_END) {
762			n = IOM_END - i;
763			maddr += n;
764			blkno += btodb(n);
765			continue;
766		}
767
768		/* Print out how many MBs we to go. */
769		n = bytes - i;
770		if (n && (n % (1024*1024)) == 0)
771			printf("%d ", n / (1024 * 1024));
772
773		/* Limit size for next transfer. */
774		if (n > BYTES_PER_DUMP)
775			n =  BYTES_PER_DUMP;
776
777		(void) pmap_map(dumpspace, maddr, maddr + n, VM_PROT_READ);
778		error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, n);
779		if (error)
780			break;
781		maddr += n;
782		blkno += btodb(n);			/* XXX? */
783
784#if 0	/* XXX this doesn't work.  grr. */
785		/* operator aborting dump? */
786		if (sget() != NULL) {
787			error = EINTR;
788			break;
789		}
790#endif
791	}
792
793	switch (error) {
794
795	case ENXIO:
796		printf("device bad\n");
797		break;
798
799	case EFAULT:
800		printf("device not ready\n");
801		break;
802
803	case EINVAL:
804		printf("area improper\n");
805		break;
806
807	case EIO:
808		printf("i/o error\n");
809		break;
810
811	case EINTR:
812		printf("aborted from console\n");
813		break;
814
815	case 0:
816		printf("succeeded\n");
817		break;
818
819	default:
820		printf("error %d\n", error);
821		break;
822	}
823	printf("\n\n");
824	delay(5000000);		/* 5 seconds */
825#endif	/* TODO */
826}
827
828/*
829 * Clear registers on exec
830 */
831void
832setregs(p, pack, stack)
833	struct proc *p;
834	struct exec_package *pack;
835	u_long stack;
836{
837	register struct pcb *pcb = &p->p_addr->u_pcb;
838	register struct trapframe *tf;
839
840	p->p_md.md_flags &= ~MDP_USEDFPU;
841	pcb->pcb_flags = 0;
842
843	tf = p->p_md.md_regs;
844
845	tf->tf_r0 = 0;
846	tf->tf_r1 = 0;
847	tf->tf_r2 = 0;
848	tf->tf_r3 = 0;
849	tf->tf_r4 = *(int *)stack;	/* argc */
850	tf->tf_r5 = stack+4;		/* argv */
851	tf->tf_r6 = stack+4*tf->tf_r4 + 8; /* envp */
852	tf->tf_r7 = 0;
853	tf->tf_r8 = 0;
854	tf->tf_r9 = 0;
855	tf->tf_r10 = 0;
856	tf->tf_r11 = 0;
857	tf->tf_r12 = 0;
858	tf->tf_r13 = 0;
859	tf->tf_r14 = 0;
860	tf->tf_spc = pack->ep_entry;
861	tf->tf_ssr = PSL_USERSET;
862	tf->tf_r15 = stack;
863#ifdef TODO
864	tf->tf_r9 = (int)PS_STRINGS;
865#endif
866}
867
868/*
869 * Initialize segments and descriptor tables
870 */
871#define VBRINIT		((char *)0x8c000000)
872#define Trap100Vec	(VBRINIT + 0x100)
873#define Trap600Vec	(VBRINIT + 0x600)
874#define TLBVECTOR	(VBRINIT + 0x400)
875#define VADDRSTART	VM_MIN_KERNEL_ADDRESS
876
877extern int nkpde;
878extern char MonTrap100[], MonTrap100_end[];
879extern char MonTrap600[], MonTrap600_end[];
880extern char _start[], etext[], edata[], end[];
881extern char tlbmisshandler_stub[], tlbmisshandler_stub_end[];
882
883void
884initSH3(pc)
885	void *pc;	/* XXX return address */
886{
887	paddr_t avail;
888	pd_entry_t *pagedir;
889	pt_entry_t *pagetab, pte;
890	u_int sp;
891	int x;
892	char *p;
893
894	avail = sh3_round_page(end);
895
896	/* XXX nkpde = kernel page dir area (IOM_RAM_SIZE*2 Mbyte (why?)) */
897	nkpde = IOM_RAM_SIZE >> (PDSHIFT - 1);
898
899	/*
900	 * clear .bss, .common area, page dir area,
901	 *	process0 stack, page table area
902	 */
903	p = (char *)avail + (1 + UPAGES) * NBPG + NBPG * (1 + nkpde); /* XXX */
904	bzero(edata, p - edata);
905
906	/*
907	 * install trap handler
908	 */
909	bcopy(MonTrap100, Trap100Vec, MonTrap100_end - MonTrap100);
910	bcopy(MonTrap600, Trap600Vec, MonTrap600_end - MonTrap600);
911	__asm ("ldc %0, vbr" :: "r"(VBRINIT));
912
913/*
914 *                          edata  end
915 *	+-------------+------+-----+----------+-------------+------------+
916 *	| kernel text | data | bss | Page Dir | Proc0 Stack | Page Table |
917 *	+-------------+------+-----+----------+-------------+------------+
918 *                                     NBPG       USPACE    (1+nkpde)*NBPG
919 *                                                (= 4*NBPG)
920 *	Build initial page tables
921 */
922	pagedir = (void *)avail;
923	pagetab = (void *)(avail + SYSMAP);
924
925	/*
926	 *	Construct a page table directory
927	 *	In SH3 H/W does not support PTD,
928	 *	these structures are used by S/W.
929	 */
930	pte = (pt_entry_t)pagetab;
931	pte |= PG_KW | PG_V | PG_4K | PG_M | PG_N;
932	pagedir[KERNTEXTOFF >> PDSHIFT] = pte;
933
934	/* make pde for 0xd0000000, 0xd0400000, 0xd0800000,0xd0c00000,
935		0xd1000000, 0xd1400000, 0xd1800000, 0xd1c00000 */
936	pte += NBPG;
937	for (x = 0; x < nkpde; x++) {
938		pagedir[(VADDRSTART >> PDSHIFT) + x] = pte;
939		pte += NBPG;
940	}
941
942	/* Install a PDE recursively mapping page directory as a page table! */
943	pte = (u_int)pagedir;
944	pte |= PG_V | PG_4K | PG_KW | PG_M | PG_N;
945	pagedir[PDSLOT_PTE] = pte;
946
947	/* set PageDirReg */
948	SHREG_TTB = (u_int)pagedir;
949
950	/* Set TLB miss handler */
951	p = tlbmisshandler_stub;
952	x = tlbmisshandler_stub_end - p;
953	bcopy(p, TLBVECTOR, x);
954
955	/*
956	 * Activate MMU
957	 */
958
959#define MMUCR_AT	0x0001	/* address traslation enable */
960#define MMUCR_IX	0x0002	/* index mode */
961#define MMUCR_TF	0x0004	/* TLB flush */
962#define MMUCR_SV	0x0100	/* single virtual space mode */
963
964	SHREG_MMUCR = MMUCR_AT | MMUCR_TF | MMUCR_SV;
965
966	/*
967	 * Now here is virtual address
968	 */
969
970	/* Set proc0paddr */
971	proc0paddr = (void *)(avail + NBPG);
972
973	/* Set pcb->PageDirReg of proc0 */
974	proc0paddr->u_pcb.pageDirReg = (int)pagedir;
975
976	/* avail_start is first available physical memory address */
977	avail_start = avail + NBPG + USPACE + NBPG + NBPG * nkpde;
978
979	/* atdevbase is first available logical memory address */
980	atdevbase = VADDRSTART;
981
982	proc0.p_addr = proc0paddr; /* page dir address */
983
984	/* XXX: PMAP_NEW requires valid curpcb.   also init'd in cpu_startup */
985	curpcb = &proc0.p_addr->u_pcb;
986
987	/*
988	 * Initialize the I/O port and I/O mem extent maps.
989	 * Note: we don't have to check the return value since
990	 * creation of a fixed extent map will never fail (since
991	 * descriptor storage has already been allocated).
992	 *
993	 * N.B. The iomem extent manages _all_ physical addresses
994	 * on the machine.  When the amount of RAM is found, the two
995	 * extents of RAM are allocated from the map (0 -> ISA hole
996	 * and end of ISA hole -> end of RAM).
997	 */
998	iomem_ex = extent_create("iomem", 0x0, 0xffffffff, M_DEVBUF,
999	    (caddr_t)iomem_ex_storage, sizeof(iomem_ex_storage),
1000	    EX_NOCOALESCE|EX_NOWAIT);
1001
1002#if 0
1003	consinit();	/* XXX SHOULD NOT BE DONE HERE */
1004#endif
1005
1006	splraise(-1);
1007	enable_intr();
1008
1009	avail_end = sh3_trunc_page(IOM_RAM_END + 1);
1010
1011	printf("initSH3\r\n");
1012
1013	/*
1014	 * Calculate check sum
1015	 */
1016    {
1017	u_short *p, sum;
1018	int size;
1019
1020	size = etext - _start;
1021	p = (u_short *)_start;
1022	sum = 0;
1023	size >>= 1;
1024	while (size--)
1025		sum += *p++;
1026	printf("Check Sum = 0x%x\r\n", sum);
1027    }
1028	/*
1029	 * Allocate the physical addresses used by RAM from the iomem
1030	 * extent map.  This is done before the addresses are
1031	 * page rounded just to make sure we get them all.
1032	 */
1033	if (extent_alloc_region(iomem_ex, IOM_RAM_BEGIN,
1034				(IOM_RAM_END-IOM_RAM_BEGIN) + 1,
1035				EX_NOWAIT)) {
1036		/* XXX What should we do? */
1037		printf("WARNING: CAN'T ALLOCATE RAM MEMORY FROM IOMEM EXTENT MAP!\n");
1038	}
1039
1040	/* number of pages of physmem addr space */
1041	physmem = btoc(IOM_RAM_END - IOM_RAM_BEGIN +1);
1042#ifdef	TODO
1043	dumpmem = physmem;
1044#endif
1045
1046	/*
1047	 * Initialize for pmap_free_pages and pmap_next_page.
1048	 * These guys should be page-aligned.
1049	 */
1050	if (physmem < btoc(2 * 1024 * 1024)) {
1051		printf("warning: too little memory available; "
1052		       "have %d bytes, want %d bytes\n"
1053		       "running in degraded mode\n"
1054		       "press a key to confirm\n\n",
1055		       ctob(physmem), 2*1024*1024);
1056		cngetc();
1057	}
1058
1059	/* Call pmap initialization to make new kernel address space */
1060	pmap_bootstrap(atdevbase);
1061
1062	/*
1063	 * Initialize error message buffer (at end of core).
1064	 */
1065	initmsgbuf((caddr_t)msgbuf_paddr, round_page(MSGBUFSIZE));
1066
1067	/*
1068	 * set boot device information
1069	 */
1070	setup_bootinfo();
1071
1072#if 0
1073	sh3_cache_on();
1074#endif
1075
1076	/* setup proc0 stack */
1077	sp = avail + NBPG + USPACE - 16 - sizeof(struct trapframe);
1078
1079	/*
1080	 * XXX We can't return here, because we change stack pointer.
1081	 *     So jump to return address directly.
1082	 */
1083	__asm __volatile ("jmp @%0; mov %1, r15" :: "r"(pc), "r"(sp));
1084}
1085
1086struct queue {
1087	struct queue *q_next, *q_prev;
1088};
1089
1090/*
1091 * insert an element into a queue
1092 */
1093void
1094_insque(v1, v2)
1095	void *v1;
1096	void *v2;
1097{
1098	struct queue *elem = v1, *head = v2;
1099	struct queue *next;
1100
1101	next = head->q_next;
1102	elem->q_next = next;
1103	head->q_next = elem;
1104	elem->q_prev = head;
1105	next->q_prev = elem;
1106}
1107
1108/*
1109 * remove an element from a queue
1110 */
1111void
1112_remque(v)
1113	void *v;
1114{
1115	struct queue *elem = v;
1116	struct queue *next, *prev;
1117
1118	next = elem->q_next;
1119	prev = elem->q_prev;
1120	next->q_prev = prev;
1121	prev->q_next = next;
1122	elem->q_prev = 0;
1123}
1124
1125#ifdef COMPAT_NOMID
1126static int
1127exec_nomid(p, epp)
1128	struct proc *p;
1129	struct exec_package *epp;
1130{
1131	int error;
1132	u_long midmag, magic;
1133	u_short mid;
1134	struct exec *execp = epp->ep_hdr;
1135
1136	/* check on validity of epp->ep_hdr performed by exec_out_makecmds */
1137
1138	midmag = ntohl(execp->a_midmag);
1139	mid = (midmag >> 16) & 0xffff;
1140	magic = midmag & 0xffff;
1141
1142	if (magic == 0) {
1143		magic = (execp->a_midmag & 0xffff);
1144		mid = MID_ZERO;
1145	}
1146
1147	midmag = mid << 16 | magic;
1148
1149	switch (midmag) {
1150	case (MID_ZERO << 16) | ZMAGIC:
1151		/*
1152		 * 386BSD's ZMAGIC format:
1153		 */
1154		error = exec_aout_prep_oldzmagic(p, epp);
1155		break;
1156
1157	case (MID_ZERO << 16) | QMAGIC:
1158		/*
1159		 * BSDI's QMAGIC format:
1160		 * same as new ZMAGIC format, but with different magic number
1161		 */
1162		error = exec_aout_prep_zmagic(p, epp);
1163		break;
1164
1165	case (MID_ZERO << 16) | NMAGIC:
1166		/*
1167		 * BSDI's NMAGIC format:
1168		 * same as NMAGIC format, but with different magic number
1169		 * and with text starting at 0.
1170		 */
1171		error = exec_aout_prep_oldnmagic(p, epp);
1172		break;
1173
1174	case (MID_ZERO << 16) | OMAGIC:
1175		/*
1176		 * BSDI's OMAGIC format:
1177		 * same as OMAGIC format, but with different magic number
1178		 * and with text starting at 0.
1179		 */
1180		error = exec_aout_prep_oldomagic(p, epp);
1181		break;
1182
1183	default:
1184		error = ENOEXEC;
1185	}
1186
1187	return error;
1188}
1189#endif
1190
1191/*
1192 * cpu_exec_aout_makecmds():
1193 *	cpu-dependent a.out format hook for execve().
1194 *
1195 * Determine of the given exec package refers to something which we
1196 * understand and, if so, set up the vmcmds for it.
1197 *
1198 * On the i386, old (386bsd) ZMAGIC binaries and BSDI QMAGIC binaries
1199 * if COMPAT_NOMID is given as a kernel option.
1200 */
1201int
1202cpu_exec_aout_makecmds(p, epp)
1203	struct proc *p;
1204	struct exec_package *epp;
1205{
1206	int error = ENOEXEC;
1207
1208#ifdef COMPAT_NOMID
1209	if ((error = exec_nomid(p, epp)) == 0)
1210		return error;
1211#endif /* ! COMPAT_NOMID */
1212
1213	return error;
1214}
1215
1216void
1217setup_bootinfo(void)
1218{
1219	struct btinfo_bootdisk *help;
1220
1221	*(int *)bootinfo = 1;
1222	help = (struct btinfo_bootdisk *)(bootinfo + sizeof(int));
1223	help->biosdev = 0;
1224	help->partition = 0;
1225	((struct btinfo_common *)help)->len = sizeof(struct btinfo_bootdisk);
1226	((struct btinfo_common *)help)->type = BTINFO_BOOTDISK;
1227}
1228
1229void *
1230lookup_bootinfo(type)
1231	int type;
1232{
1233	struct btinfo_common *help;
1234	int n = *(int*)bootinfo;
1235	help = (struct btinfo_common *)(bootinfo + sizeof(int));
1236	while (n--) {
1237		if (help->type == type)
1238			return (help);
1239		help = (struct btinfo_common *)((char*)help + help->len);
1240	}
1241	return (0);
1242}
1243
1244
1245/*
1246 * consinit:
1247 * initialize the system console.
1248 * XXX - shouldn't deal with this initted thing, but then,
1249 * it shouldn't be called from init386 either.
1250 */
1251void
1252consinit()
1253{
1254	static int initted;
1255
1256	if (initted)
1257		return;
1258	initted = 1;
1259
1260	cninit();
1261
1262#ifdef DDB
1263	ddb_init();
1264#endif
1265}
1266
1267void
1268cpu_reset()
1269{
1270
1271	disable_intr();
1272
1273	Sh3Reset();
1274	for (;;)
1275		;
1276}
1277
1278int
1279bus_space_map (t, addr, size, flags, bshp)
1280	bus_space_tag_t t;
1281	bus_addr_t addr;
1282	bus_size_t size;
1283	int flags;
1284	bus_space_handle_t *bshp;
1285{
1286
1287	*bshp = (bus_space_handle_t)addr;
1288
1289	return 0;
1290}
1291
1292int
1293sh_memio_subregion(t, bsh, offset, size, nbshp)
1294	bus_space_tag_t t;
1295	bus_space_handle_t bsh;
1296	bus_size_t offset, size;
1297	bus_space_handle_t *nbshp;
1298{
1299
1300	*nbshp = bsh + offset;
1301	return (0);
1302}
1303
1304int
1305sh_memio_alloc(t, rstart, rend, size, alignment, boundary, flags,
1306	       bpap, bshp)
1307	bus_space_tag_t t;
1308	bus_addr_t rstart, rend;
1309	bus_size_t size, alignment, boundary;
1310	int flags;
1311	bus_addr_t *bpap;
1312	bus_space_handle_t *bshp;
1313{
1314	*bshp = *bpap = rstart;
1315
1316	return (0);
1317}
1318
1319void
1320sh_memio_free(t, bsh, size)
1321	bus_space_tag_t t;
1322	bus_space_handle_t bsh;
1323	bus_size_t size;
1324{
1325
1326}
1327
1328void
1329sh_memio_unmap(t, bsh, size)
1330	bus_space_tag_t t;
1331	bus_space_handle_t bsh;
1332	bus_size_t size;
1333{
1334	return;
1335}
1336
1337/*
1338 * InitializeBsc
1339 * : BSC(Bus State Controler)
1340 */
1341void InitializeBsc __P((void));
1342
1343void
1344InitializeBsc()
1345{
1346
1347	/*
1348	 * Drive RAS,CAS in stand by mode and bus release mode
1349	 * Area0 = Normal memory, Area5,6=Normal(no burst)
1350	 * Area2 = Normal memory, Area3 = SDRAM, Area5 = Normal memory
1351	 * Area4 = Normal Memory
1352	 * Area6 = Normal memory
1353	 */
1354	SHREG_BCR1 = BSC_BCR1_VAL;
1355
1356	/*
1357	 * Bus Width
1358	 * Area4: Bus width = 16bit
1359	 * Area6,5 = 16bit
1360	 * Area1 = 8bit
1361	 * Area2,3: Bus width = 32bit
1362	 */
1363	 SHREG_BCR2 = BSC_BCR2_VAL;
1364
1365	/*
1366	 * Idle cycle number in transition area and read to write
1367	 * Area6 = 3, Area5 = 3, Area4 = 3, Area3 = 3, Area2 = 3
1368	 * Area1 = 3, Area0 = 3
1369	 */
1370	SHREG_WCR1 = BSC_WCR1_VAL;
1371
1372	/*
1373	 * Wait cycle
1374	 * Area 6 = 6
1375	 * Area 5 = 2
1376	 * Area 4 = 10
1377	 * Area 3 = 3
1378	 * Area 2,1 = 3
1379	 * Area 0 = 6
1380	 */
1381	SHREG_WCR2 = BSC_WCR2_VAL;
1382
1383#ifdef SH4
1384	SHREG_WCR3 = BSC_WCR3_VAL;
1385#endif
1386
1387	/*
1388	 * RAS pre-charge = 2cycle, RAS-CAS delay = 3 cycle,
1389	 * write pre-charge=1cycle
1390	 * CAS before RAS refresh RAS assert time = 3 cycle
1391	 * Disable burst, Bus size=32bit, Column Address=10bit, Refresh ON
1392	 * CAS before RAS refresh ON, EDO DRAM
1393	 */
1394	SHREG_MCR = BSC_MCR_VAL;
1395
1396#ifdef BSC_SDMR_VAL
1397#if 1
1398#define SDMR	(*(volatile unsigned char  *)BSC_SDMR_VAL)
1399
1400	SDMR = 0;
1401#else
1402#define ADDSET	(*(volatile unsigned short *)0x1A000000)
1403#define ADDRST	(*(volatile unsigned short *)0x18000000)
1404#define SDMR	(*(volatile unsigned char  *)BSC_SDMR_VAL)
1405
1406	ADDSET = 0;
1407	SDMR = 0;
1408	ADDRST = 0;
1409#endif
1410#endif
1411
1412	/*
1413	 * PCMCIA Control Register
1414	 * OE/WE assert delay 3.5 cycle
1415	 * OE/WE negate-address delay 3.5 cycle
1416	 */
1417#ifdef BSC_PCR_VAL
1418	SHREG_PCR = 0x00ff;
1419#endif
1420
1421	/*
1422	 * Refresh Timer Control/Status Register
1423	 * Disable interrupt by CMF, closk 1/16, Disable OVF interrupt
1424	 * Count Limit = 1024
1425	 * In following statement, the reason why high byte = 0xa5(a4 in RFCR)
1426	 * is the rule of SH3 in writing these register.
1427	 */
1428	SHREG_RTCSR = BSC_RTCSR_VAL;
1429
1430
1431	/*
1432	 * Refresh Timer Counter
1433	 * Initialize to 0
1434	 */
1435	SHREG_RTCNT = BSC_RTCNT_VAL;
1436
1437	/* set Refresh Time Constant Register */
1438	SHREG_RTCOR = BSC_RTCOR_VAL;
1439
1440	/* init Refresh Count Register */
1441#ifdef BSC_RFCR_VAL
1442	SHREG_RFCR = BSC_RFCR_VAL;
1443#endif
1444
1445	/* Set Clock mode (make internal clock double speed) */
1446
1447	SHREG_FRQCR = FRQCR_VAL;
1448
1449#ifndef MMEYE_NO_CACHE
1450	/* Cache ON */
1451	SHREG_CCR = 0x0001;
1452#endif
1453}
1454
1455void
1456sh3_cache_on(void)
1457{
1458#ifndef MMEYE_NO_CACHE
1459	/* Cache ON */
1460	SHREG_CCR = 0x0001;
1461	SHREG_CCR = 0x0009; /* cache clear */
1462	SHREG_CCR = 0x0001; /* cache on */
1463#endif
1464}
1465
1466#include <machine/mmeye.h>
1467void
1468LoadAndReset(osimage)
1469	char *osimage;
1470{
1471	void *buf_addr;
1472	u_long size;
1473	u_long *src;
1474	u_long *dest;
1475	u_long csum = 0;
1476	u_long csum2 = 0;
1477	u_long size2;
1478#define OSIMAGE_BUF_ADDR 0x8c400000 /* !!!!!! This value depends on physical
1479				       available memory */
1480
1481
1482	printf("LoadAndReset: copy start\n");
1483	buf_addr = (void *)OSIMAGE_BUF_ADDR;
1484
1485	size = *(u_long *)osimage;
1486	src = (u_long *)osimage;
1487	dest = buf_addr;
1488
1489	size = (size + sizeof(u_long) * 2 + 3) >> 2;
1490	size2 = size;
1491
1492	while (size--) {
1493		csum += *src;
1494		*dest++ = *src++;
1495	}
1496
1497	dest = buf_addr;
1498	while (size2--)
1499		csum2 += *dest++;
1500
1501	printf("LoadAndReset: copy end[%lx,%lx]\n", csum, csum2);
1502	printf("start XLoadAndReset\n");
1503
1504	/* mask all externel interrupt (XXX) */
1505
1506	XLoadAndReset(buf_addr);
1507}
1508