machdep.c revision 1.3
1/*	$NetBSD: machdep.c,v 1.3 1999/09/16 21:23:40 msaitoh Exp $	*/
2
3/*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the NetBSD
22 *	Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 *    contributors may be used to endorse or promote products derived
25 *    from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*-
41 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
42 * All rights reserved.
43 *
44 * This code is derived from software contributed to Berkeley by
45 * William Jolitz.
46 *
47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions
49 * are met:
50 * 1. Redistributions of source code must retain the above copyright
51 *    notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright
53 *    notice, this list of conditions and the following disclaimer in the
54 *    documentation and/or other materials provided with the distribution.
55 * 3. All advertising materials mentioning features or use of this software
56 *    must display the following acknowledgement:
57 *	This product includes software developed by the University of
58 *	California, Berkeley and its contributors.
59 * 4. Neither the name of the University nor the names of its contributors
60 *    may be used to endorse or promote products derived from this software
61 *    without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 *	@(#)machdep.c	7.4 (Berkeley) 6/3/91
76 */
77
78#include "opt_compat_netbsd.h"
79#include "opt_ddb.h"
80#include "opt_memsize.h"
81#include "opt_initbsc.h"
82#include "opt_sysv.h"
83
84#include <sys/param.h>
85#include <sys/systm.h>
86#include <sys/signalvar.h>
87#include <sys/kernel.h>
88#include <sys/map.h>
89#include <sys/proc.h>
90#include <sys/user.h>
91#include <sys/exec.h>
92#include <sys/buf.h>
93#include <sys/reboot.h>
94#include <sys/conf.h>
95#include <sys/file.h>
96#include <sys/callout.h>
97#include <sys/malloc.h>
98#include <sys/mbuf.h>
99#include <sys/msgbuf.h>
100#include <sys/mount.h>
101#include <sys/vnode.h>
102#include <sys/device.h>
103#include <sys/extent.h>
104#include <sys/syscallargs.h>
105
106#ifdef KGDB
107#include <sys/kgdb.h>
108#endif
109
110#include <dev/cons.h>
111
112#include <vm/vm.h>
113#include <vm/vm_kern.h>
114#include <vm/vm_page.h>
115
116#include <uvm/uvm_extern.h>
117
118#include <sys/sysctl.h>
119
120#include <machine/cpu.h>
121#include <machine/cpufunc.h>
122#include <machine/psl.h>
123#include <machine/bootinfo.h>
124#include <machine/bus.h>
125#include <sh3/bscreg.h>
126#include <sh3/ccrreg.h>
127#include <sh3/cpgreg.h>
128#include <sh3/intcreg.h>
129#include <sh3/pfcreg.h>
130#include <sh3/wdtreg.h>
131
132#include <sys/termios.h>
133#include "sci.h"
134
135/* the following is used externally (sysctl_hw) */
136char machine[] = MACHINE;		/* cpu "architecture" */
137char machine_arch[] = MACHINE_ARCH;	/* machine_arch = "sh3" */
138
139#ifdef sh3_debug
140int cpu_debug_mode = 1;
141#else
142int cpu_debug_mode = 0;
143#endif
144
145char cpu_model[120];
146
147char bootinfo[BOOTINFO_MAXSIZE];
148
149int physmem;
150int dumpmem_low;
151int dumpmem_high;
152vaddr_t atdevbase;	/* location of start of iomem in virtual */
153paddr_t msgbuf_paddr;
154struct user *proc0paddr;
155
156vm_map_t exec_map = NULL;
157vm_map_t mb_map = NULL;
158vm_map_t phys_map = NULL;
159
160extern int boothowto;
161extern paddr_t avail_start, avail_end;
162
163#ifdef	SYSCALL_DEBUG
164#define	SCDEBUG_ALL 0x0004
165extern int	scdebug;
166#endif
167
168#define IOM_RAM_END	((paddr_t)IOM_RAM_BEGIN + IOM_RAM_SIZE - 1)
169
170/*
171 * Extent maps to manage I/O and ISA memory hole space.  Allocate
172 * storage for 8 regions in each, initially.  Later, ioport_malloc_safe
173 * will indicate that it's safe to use malloc() to dynamically allocate
174 * region descriptors.
175 *
176 * N.B. At least two regions are _always_ allocated from the iomem
177 * extent map; (0 -> ISA hole) and (end of ISA hole -> end of RAM).
178 *
179 * The extent maps are not static!  Machine-dependent ISA and EISA
180 * routines need access to them for bus address space allocation.
181 */
182static	long iomem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(8) / sizeof(long)];
183struct	extent *ioport_ex;
184struct	extent *iomem_ex;
185static	int ioport_malloc_safe;
186
187void setup_bootinfo __P((void));
188void dumpsys __P((void));
189void identifycpu __P((void));
190void initSH3 __P((void *));
191void InitializeSci  __P((unsigned char));
192void sh3_cache_on __P((void));
193void LoadAndReset __P((char *));
194void XLoadAndReset __P((char *));
195void Sh3Reset __P((void));
196
197#include <dev/ic/comreg.h>
198#include <dev/ic/comvar.h>
199
200void	consinit __P((void));
201
202#ifdef COMPAT_NOMID
203static int exec_nomid	__P((struct proc *, struct exec_package *));
204#endif
205
206/*
207 * Machine-dependent startup code
208 *
209 * This is called from main() in kern/main.c.
210 */
211void
212cpu_startup()
213{
214	unsigned i;
215	caddr_t v;
216	int sz;
217	int base, residual;
218	vaddr_t minaddr, maxaddr;
219	vsize_t size;
220	struct pcb *pcb;
221	char pbuf[9];
222
223	printf(version);
224
225	sprintf(cpu_model, "Hitachi SH3");
226
227	format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
228	printf("total memory = %s\n", pbuf);
229
230	/*
231	 * Find out how much space we need, allocate it,
232	 * and then give everything true virtual addresses.
233	 */
234	sz = (int)allocsys(NULL, NULL);
235	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
236		panic("startup: no room for tables");
237	if (allocsys(v, NULL) - v != sz)
238		panic("startup: table size inconsistency");
239
240	/*
241	 * Now allocate buffers proper.  They are different than the above
242	 * in that they usually occupy more virtual memory than physical.
243	 */
244	size = MAXBSIZE * nbuf;
245	buffers = 0;
246	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
247		    NULL, UVM_UNKNOWN_OFFSET,
248		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
249				UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
250		panic("cpu_startup: cannot allocate VM for buffers");
251	minaddr = (vaddr_t)buffers;
252	if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
253		/* don't want to alloc more physical mem than needed */
254		bufpages = btoc(MAXBSIZE) * nbuf;
255	}
256
257	base = bufpages / nbuf;
258	residual = bufpages % nbuf;
259	for (i = 0; i < nbuf; i++) {
260		vsize_t curbufsize;
261		vaddr_t curbuf;
262		struct vm_page *pg;
263
264		/*
265		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
266		 * that MAXBSIZE space, we allocate and map (base+1) pages
267		 * for the first "residual" buffers, and then we allocate
268		 * "base" pages for the rest.
269		 */
270		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
271		curbufsize = CLBYTES * ((i < residual) ? (base+1) : base);
272
273		while (curbufsize) {
274			pg = uvm_pagealloc(NULL, 0, NULL, 0);
275			if (pg == NULL)
276				panic("cpu_startup: not enough memory for "
277				    "buffer cache");
278			pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
279					VM_PROT_READ|VM_PROT_WRITE);
280			curbuf += PAGE_SIZE;
281			curbufsize -= PAGE_SIZE;
282		}
283	}
284
285	/*
286	 * Allocate a submap for exec arguments.  This map effectively
287	 * limits the number of processes exec'ing at any time.
288	 */
289	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
290				   16*NCARGS, TRUE, FALSE, NULL);
291
292	/*
293	 * Allocate a submap for physio
294	 */
295	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
296				   VM_PHYS_SIZE, TRUE, FALSE, NULL);
297
298	/*
299	 * Finally, allocate mbuf cluster submap.
300	 */
301	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
302	    VM_MBUF_SIZE, FALSE, FALSE, NULL);
303
304	/*
305	 * Initialize callouts
306	 */
307	callfree = callout;
308	for (i = 1; i < ncallout; i++)
309		callout[i-1].c_next = &callout[i];
310
311	format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
312	printf("avail memory = %s\n", pbuf);
313	format_bytes(pbuf, sizeof(pbuf), bufpages * CLBYTES);
314	printf("using %d buffers containing %s of memory\n", nbuf, pbuf);
315
316	/*
317	 * Set up buffers, so they can be used to read disk labels.
318	 */
319	bufinit();
320
321	/* Safe for i/o port allocation to use malloc now. */
322	ioport_malloc_safe = 1;
323
324	curpcb = pcb = &proc0.p_addr->u_pcb;
325	pcb->r15 = (int)proc0.p_addr + USPACE - 16;
326
327	proc0.p_md.md_regs = (struct trapframe *)pcb->r15 - 1;
328
329#ifdef SYSCALL_DEBUG
330	scdebug |= SCDEBUG_ALL;
331#endif
332
333#ifdef FORCE_RB_SINGLE
334	boothowto |= RB_SINGLE;
335#endif
336}
337
338/*
339 * Info for CTL_HW
340 */
341extern	char version[];
342
343#define CPUDEBUG
344
345/*
346 * machine dependent system variables.
347 */
348int
349cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
350	int *name;
351	u_int namelen;
352	void *oldp;
353	size_t *oldlenp;
354	void *newp;
355	size_t newlen;
356	struct proc *p;
357{
358	dev_t consdev;
359	struct btinfo_bootpath *bibp;
360	struct trapframe *tf;
361	char *osimage;
362
363	/* all sysctl names at this level are terminal */
364	if (namelen != 1)
365		return (ENOTDIR);		/* overloaded */
366
367	switch (name[0]) {
368	case CPU_CONSDEV:
369		if (cn_tab != NULL)
370			consdev = cn_tab->cn_dev;
371		else
372			consdev = NODEV;
373		return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
374		    sizeof consdev));
375
376	case CPU_NKPDE:
377		return (sysctl_rdint(oldp, oldlenp, newp, nkpde));
378
379	case CPU_BOOTED_KERNEL:
380	        bibp = lookup_bootinfo(BTINFO_BOOTPATH);
381	        if (!bibp)
382			return (ENOENT); /* ??? */
383		return (sysctl_rdstring(oldp, oldlenp, newp, bibp->bootpath));
384
385	case CPU_SETPRIVPROC:
386		if (newp == NULL)
387			return (0);
388
389		/* set current process to priviledged process */
390		tf = p->p_md.md_regs;
391		tf->tf_ssr |= PSL_MD;
392		return (0);
393
394	case CPU_DEBUGMODE:
395		return (sysctl_int(oldp, oldlenp, newp, newlen,
396				   &cpu_debug_mode));
397
398	case CPU_LOADANDRESET:
399		if (newp != NULL) {
400			osimage = (char *)(*(u_long *)newp);
401
402			LoadAndReset(osimage);
403			/* not reach here */
404		}
405		return (0);
406
407	default:
408		return (EOPNOTSUPP);
409	}
410	/* NOTREACHED */
411}
412
413/*
414 * Send an interrupt to process.
415 *
416 * Stack is set up to allow sigcode stored
417 * in u. to call routine, followed by kcall
418 * to sigreturn routine below.  After sigreturn
419 * resets the signal mask, the stack, and the
420 * frame pointer, it returns to the user
421 * specified pc, psl.
422 */
423void
424sendsig(catcher, sig, mask, code)
425	sig_t catcher;
426	int sig;
427	sigset_t *mask;
428	u_long code;
429{
430	struct proc *p = curproc;
431	struct trapframe *tf;
432	struct sigframe *fp, frame;
433	struct sigacts *psp = p->p_sigacts;
434	int onstack;
435
436	tf = p->p_md.md_regs;
437
438	/* Do we need to jump onto the signal stack? */
439	onstack =
440	    (psp->ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
441	    (psp->ps_sigact[sig].sa_flags & SA_ONSTACK) != 0;
442
443	/* Allocate space for the signal handler context. */
444	if (onstack)
445		fp = (struct sigframe *)((caddr_t)psp->ps_sigstk.ss_sp +
446						  psp->ps_sigstk.ss_size);
447	else
448		fp = (struct sigframe *)tf->tf_r15;
449	fp--;
450
451	/* Build stack frame for signal trampoline. */
452	frame.sf_signum = sig;
453	frame.sf_code = code;
454	frame.sf_scp = &fp->sf_sc;
455	frame.sf_handler = catcher;
456
457	/* Save register context. */
458	frame.sf_sc.sc_ssr = tf->tf_ssr;
459	frame.sf_sc.sc_spc = tf->tf_spc;
460	frame.sf_sc.sc_pr = tf->tf_pr;
461	frame.sf_sc.sc_r15 = tf->tf_r15;
462	frame.sf_sc.sc_r14 = tf->tf_r14;
463	frame.sf_sc.sc_r13 = tf->tf_r13;
464	frame.sf_sc.sc_r12 = tf->tf_r12;
465	frame.sf_sc.sc_r11 = tf->tf_r11;
466	frame.sf_sc.sc_r10 = tf->tf_r10;
467	frame.sf_sc.sc_r9 = tf->tf_r9;
468	frame.sf_sc.sc_r8 = tf->tf_r8;
469	frame.sf_sc.sc_r7 = tf->tf_r7;
470	frame.sf_sc.sc_r6 = tf->tf_r6;
471	frame.sf_sc.sc_r5 = tf->tf_r5;
472	frame.sf_sc.sc_r4 = tf->tf_r4;
473	frame.sf_sc.sc_r3 = tf->tf_r3;
474	frame.sf_sc.sc_r2 = tf->tf_r2;
475	frame.sf_sc.sc_r1 = tf->tf_r1;
476	frame.sf_sc.sc_r0 = tf->tf_r0;
477	frame.sf_sc.sc_trapno = tf->tf_trapno;
478#ifdef TODO
479	frame.sf_sc.sc_err = tf->tf_err;
480#endif
481
482	/* Save signal stack. */
483	frame.sf_sc.sc_onstack = psp->ps_sigstk.ss_flags & SS_ONSTACK;
484
485	/* Save signal mask. */
486	frame.sf_sc.sc_mask = *mask;
487
488#ifdef COMPAT_13
489	/*
490	 * XXX We always have to save an old style signal mask because
491	 * XXX we might be delivering a signal to a process which will
492	 * XXX escape from the signal in a non-standard way and invoke
493	 * XXX sigreturn() directly.
494	 */
495	native_sigset_to_sigset13(mask, &frame.sf_sc.__sc_mask13);
496#endif
497
498	if (copyout(&frame, fp, sizeof(frame)) != 0) {
499		/*
500		 * Process has trashed its stack; give it an illegal
501		 * instruction to halt it in its tracks.
502		 */
503		sigexit(p, SIGILL);
504		/* NOTREACHED */
505	}
506
507	/*
508	 * Build context to run handler in.
509	 */
510	tf->tf_spc = (int)psp->ps_sigcode;
511#ifdef TODO
512	tf->tf_ssr &= ~(PSL_T|PSL_VM|PSL_AC);
513#endif
514	tf->tf_r15 = (int)fp;
515
516	/* Remember that we're now on the signal stack. */
517	if (onstack)
518		psp->ps_sigstk.ss_flags |= SS_ONSTACK;
519}
520
521/*
522 * System call to cleanup state after a signal
523 * has been taken.  Reset signal mask and
524 * stack state from context left by sendsig (above).
525 * Return to previous pc and psl as specified by
526 * context left by sendsig. Check carefully to
527 * make sure that the user has not modified the
528 * psl to gain improper privileges or to cause
529 * a machine fault.
530 */
531int
532sys___sigreturn14(p, v, retval)
533	struct proc *p;
534	void *v;
535	register_t *retval;
536{
537	struct sys___sigreturn14_args /* {
538		syscallarg(struct sigcontext *) sigcntxp;
539	} */ *uap = v;
540	struct sigcontext *scp, context;
541	struct trapframe *tf;
542
543	/*
544	 * The trampoline code hands us the context.
545	 * It is unsafe to keep track of it ourselves, in the event that a
546	 * program jumps out of a signal handler.
547	 */
548	scp = SCARG(uap, sigcntxp);
549	if (copyin((caddr_t)scp, &context, sizeof(*scp)) != 0)
550		return (EFAULT);
551
552	/* Restore signal context. */
553	tf = p->p_md.md_regs;
554	{
555		/*
556		 * Check for security violations.  If we're returning to
557		 * protected mode, the CPU will validate the segment registers
558		 * automatically and generate a trap on violations.  We handle
559		 * the trap, rather than doing all of the checking here.
560		 */
561#ifdef TODO
562	  if (((context.sc_ssr ^ tf->tf_ssr) & PSL_USERSTATIC) != 0) {
563	    return (EINVAL);
564	  }
565#endif
566
567	  tf->tf_ssr = context.sc_ssr;
568	}
569	tf->tf_r0 = context.sc_r0;
570	tf->tf_r1 = context.sc_r1;
571	tf->tf_r2 = context.sc_r2;
572	tf->tf_r3 = context.sc_r3;
573	tf->tf_r4 = context.sc_r4;
574	tf->tf_r5 = context.sc_r5;
575	tf->tf_r6 = context.sc_r6;
576	tf->tf_r7 = context.sc_r7;
577	tf->tf_r8 = context.sc_r8;
578	tf->tf_r9 = context.sc_r9;
579	tf->tf_r10 = context.sc_r10;
580	tf->tf_r11 = context.sc_r11;
581	tf->tf_r12 = context.sc_r12;
582	tf->tf_r13 = context.sc_r13;
583	tf->tf_r14 = context.sc_r14;
584	tf->tf_spc = context.sc_spc;
585	tf->tf_r15 = context.sc_r15;
586	tf->tf_pr = context.sc_pr;
587
588	/* Restore signal stack. */
589	if (context.sc_onstack & SS_ONSTACK)
590		p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK;
591	else
592		p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK;
593	/* Restore signal mask. */
594	(void) sigprocmask1(p, SIG_SETMASK, &context.sc_mask, 0);
595
596	return (EJUSTRETURN);
597}
598
599int waittime = -1;
600int cold = 1;
601struct pcb dumppcb;
602
603void
604cpu_reboot(howto, bootstr)
605	int howto;
606	char *bootstr;
607{
608
609	if (cold) {
610		howto |= RB_HALT;
611		goto haltsys;
612	}
613
614	boothowto = howto;
615	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
616		waittime = 0;
617		vfs_shutdown();
618		/*
619		 * If we've been adjusting the clock, the todr
620		 * will be out of synch; adjust it now.
621		 */
622		/* resettodr(); */
623	}
624
625	/* Disable interrupts. */
626	splhigh();
627
628	/* Do a dump if requested. */
629	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
630		dumpsys();
631
632haltsys:
633	doshutdownhooks();
634
635	if (howto & RB_HALT) {
636		printf("\n");
637		printf("The operating system has halted.\n");
638		printf("Please press any key to reboot.\n\n");
639		cngetc();
640	}
641
642	printf("rebooting...\n");
643	cpu_reset();
644	for(;;)
645		;
646	/*NOTREACHED*/
647}
648
649/*
650 * These variables are needed by /sbin/savecore
651 */
652u_long	dumpmag = 0x8fca0101;	/* magic number */
653int 	dumpsize = 0;		/* pages */
654long	dumplo = 0; 		/* blocks */
655
656/*
657 * This is called by main to set dumplo and dumpsize.
658 * Dumps always skip the first CLBYTES of disk space
659 * in case there might be a disk label stored there.
660 * If there is extra space, put dump at the end to
661 * reduce the chance that swapping trashes it.
662 */
663void
664cpu_dumpconf()
665{
666#ifdef	TODO
667	int nblks;	/* size of dump area */
668	int maj;
669
670	if (dumpdev == NODEV)
671		return;
672	maj = major(dumpdev);
673	if (maj < 0 || maj >= nblkdev)
674		panic("dumpconf: bad dumpdev=0x%x", dumpdev);
675	if (bdevsw[maj].d_psize == NULL)
676		return;
677	nblks = (*bdevsw[maj].d_psize)(dumpdev);
678	if (nblks <= ctod(1))
679		return;
680
681	dumpsize = btoc(IOM_END + ctob(dumpmem_high));
682
683	/* Always skip the first CLBYTES, in case there is a label there. */
684	if (dumplo < ctod(1))
685		dumplo = ctod(1);
686
687	/* Put dump at end of partition, and make it fit. */
688	if (dumpsize > dtoc(nblks - dumplo))
689		dumpsize = dtoc(nblks - dumplo);
690	if (dumplo < nblks - ctod(dumpsize))
691		dumplo = nblks - ctod(dumpsize);
692#endif
693}
694
695/*
696 * Doadump comes here after turning off memory management and
697 * getting on the dump stack, either when called above, or by
698 * the auto-restart code.
699 */
700#define BYTES_PER_DUMP  NBPG	/* must be a multiple of pagesize XXX small */
701static vaddr_t dumpspace;
702
703vaddr_t
704reserve_dumppages(p)
705	vaddr_t p;
706{
707
708	dumpspace = p;
709	return (p + BYTES_PER_DUMP);
710}
711
712void
713dumpsys()
714{
715#ifdef	TODO
716	unsigned bytes, i, n;
717	int maddr, psize;
718	daddr_t blkno;
719	int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
720	int error;
721
722	/* Save registers. */
723	savectx(&dumppcb);
724
725	msgbufmapped = 0;	/* don't record dump msgs in msgbuf */
726	if (dumpdev == NODEV)
727		return;
728
729	/*
730	 * For dumps during autoconfiguration,
731	 * if dump device has already configured...
732	 */
733	if (dumpsize == 0)
734		cpu_dumpconf();
735	if (dumplo < 0)
736		return;
737	printf("\ndumping to dev %x, offset %ld\n", dumpdev, dumplo);
738
739	psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
740	printf("dump ");
741	if (psize == -1) {
742		printf("area unavailable\n");
743		return;
744	}
745
746#if 0	/* XXX this doesn't work.  grr. */
747        /* toss any characters present prior to dump */
748	while (sget() != NULL); /*syscons and pccons differ */
749#endif
750
751	bytes = ctob(dumpmem_high) + IOM_END;
752	maddr = 0;
753	blkno = dumplo;
754	dump = bdevsw[major(dumpdev)].d_dump;
755	error = 0;
756	for (i = 0; i < bytes; i += n) {
757		/*
758		 * Avoid dumping the ISA memory hole, and areas that
759		 * BIOS claims aren't in low memory.
760		 */
761		if (i >= ctob(dumpmem_low) && i < IOM_END) {
762			n = IOM_END - i;
763			maddr += n;
764			blkno += btodb(n);
765			continue;
766		}
767
768		/* Print out how many MBs we to go. */
769		n = bytes - i;
770		if (n && (n % (1024*1024)) == 0)
771			printf("%d ", n / (1024 * 1024));
772
773		/* Limit size for next transfer. */
774		if (n > BYTES_PER_DUMP)
775			n =  BYTES_PER_DUMP;
776
777		(void) pmap_map(dumpspace, maddr, maddr + n, VM_PROT_READ);
778		error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, n);
779		if (error)
780			break;
781		maddr += n;
782		blkno += btodb(n);			/* XXX? */
783
784#if 0	/* XXX this doesn't work.  grr. */
785		/* operator aborting dump? */
786		if (sget() != NULL) {
787			error = EINTR;
788			break;
789		}
790#endif
791	}
792
793	switch (error) {
794
795	case ENXIO:
796		printf("device bad\n");
797		break;
798
799	case EFAULT:
800		printf("device not ready\n");
801		break;
802
803	case EINVAL:
804		printf("area improper\n");
805		break;
806
807	case EIO:
808		printf("i/o error\n");
809		break;
810
811	case EINTR:
812		printf("aborted from console\n");
813		break;
814
815	case 0:
816		printf("succeeded\n");
817		break;
818
819	default:
820		printf("error %d\n", error);
821		break;
822	}
823	printf("\n\n");
824	delay(5000000);		/* 5 seconds */
825#endif	/* TODO */
826}
827
828/*
829 * Clear registers on exec
830 */
831void
832setregs(p, pack, stack)
833	struct proc *p;
834	struct exec_package *pack;
835	u_long stack;
836{
837	register struct pcb *pcb = &p->p_addr->u_pcb;
838	register struct trapframe *tf;
839
840	p->p_md.md_flags &= ~MDP_USEDFPU;
841	pcb->pcb_flags = 0;
842
843	tf = p->p_md.md_regs;
844
845	tf->tf_r0 = 0;
846	tf->tf_r1 = 0;
847	tf->tf_r2 = 0;
848	tf->tf_r3 = 0;
849	tf->tf_r4 = *(int *)stack;	/* argc */
850	tf->tf_r5 = stack+4;		/* argv */
851	tf->tf_r6 = stack+4*tf->tf_r4 + 8; /* envp */
852	tf->tf_r7 = 0;
853	tf->tf_r8 = 0;
854	tf->tf_r9 = 0;
855	tf->tf_r10 = 0;
856	tf->tf_r11 = 0;
857	tf->tf_r12 = 0;
858	tf->tf_r13 = 0;
859	tf->tf_r14 = 0;
860	tf->tf_spc = pack->ep_entry;
861	tf->tf_ssr = PSL_USERSET;
862	tf->tf_r15 = stack;
863#ifdef TODO
864	tf->tf_r9 = (int)PS_STRINGS;
865#endif
866}
867
868/*
869 * Initialize segments and descriptor tables
870 */
871#define VBRINIT		((char *)0x8c000000)
872#define Trap100Vec	(VBRINIT + 0x100)
873#define Trap600Vec	(VBRINIT + 0x600)
874#define TLBVECTOR	(VBRINIT + 0x400)
875#define VADDRSTART	VM_MIN_KERNEL_ADDRESS
876
877extern int nkpde;
878extern char MonTrap100[], MonTrap100_end[];
879extern char MonTrap600[], MonTrap600_end[];
880extern char _start[], etext[], edata[], end[];
881extern char tlbmisshandler_stub[], tlbmisshandler_stub_end[];
882
883void
884initSH3(pc)
885	void *pc;	/* XXX return address */
886{
887	paddr_t avail;
888	pd_entry_t *pagedir;
889	pt_entry_t *pagetab, pte;
890	u_int sp;
891	int x;
892	char *p;
893
894	avail = sh3_round_page(end);
895
896	/*
897	 * clear .bss, .common area, page dir area,
898	 *	process0 stack, page table area
899	 */
900
901	p = (char *)avail + (1 + UPAGES) * NBPG + NBPG * 9;
902	bzero(edata, p - edata);
903
904	/*
905	 * install trap handler
906	 */
907	bcopy(MonTrap100, Trap100Vec, MonTrap100_end - MonTrap100);
908	bcopy(MonTrap600, Trap600Vec, MonTrap600_end - MonTrap600);
909	__asm ("ldc %0, vbr" :: "r"(VBRINIT));
910
911/*
912 *                          edata  end
913 *	+-------------+------+-----+----------+-------------+------------+
914 *	| kernel text | data | bss | Page Dir | Proc0 Stack | Page Table |
915 *	+-------------+------+-----+----------+-------------+------------+
916 *                                     NBPG       USPACE        9*NBPG
917 *                                                (= 4*NBPG)
918 *	Build initial page tables
919 */
920	pagedir = (void *)avail;
921	pagetab = (void *)(avail + SYSMAP);
922	nkpde = 8;	/* XXX nkpde = kernel page dir area (32 Mbyte) */
923
924	/*
925	 *	Construct a page table directory
926	 *	In SH3 H/W does not support PTD,
927	 *	these structures are used by S/W.
928	 */
929	pte = (pt_entry_t)pagetab;
930	pte |= PG_KW | PG_V | PG_4K | PG_M | PG_N;
931	pagedir[KERNTEXTOFF >> PDSHIFT] = pte;
932
933	/* make pde for 0xd0000000, 0xd0400000, 0xd0800000,0xd0c00000,
934		0xd1000000, 0xd1400000, 0xd1800000, 0xd1c00000 */
935	pte += NBPG;
936	for (x = 0; x < nkpde; x++) {
937		pagedir[(VADDRSTART >> PDSHIFT) + x] = pte;
938		pte += NBPG;
939	}
940
941	/* Install a PDE recursively mapping page directory as a page table! */
942	pte = (u_int)pagedir;
943	pte |= PG_V | PG_4K | PG_KW | PG_M | PG_N;
944	pagedir[PDSLOT_PTE] = pte;
945
946	/* set PageDirReg */
947	SHREG_TTB = (u_int)pagedir;
948
949	/* Set TLB miss handler */
950	p = tlbmisshandler_stub;
951	x = tlbmisshandler_stub_end - p;
952	bcopy(p, TLBVECTOR, x);
953
954	/*
955	 * Activate MMU
956	 */
957#ifndef ROMIMAGE
958	MMEYE_LED = 1;
959#endif
960
961#define MMUCR_AT	0x0001	/* address traslation enable */
962#define MMUCR_IX	0x0002	/* index mode */
963#define MMUCR_TF	0x0004	/* TLB flush */
964#define MMUCR_SV	0x0100	/* single virtual space mode */
965
966	SHREG_MMUCR = MMUCR_AT | MMUCR_TF | MMUCR_SV;
967
968	/*
969	 * Now here is virtual address
970	 */
971#ifndef ROMIMAGE
972	MMEYE_LED = 0;
973#endif
974
975	/* Set proc0paddr */
976	proc0paddr = (void *)(avail + NBPG);
977
978	/* Set pcb->PageDirReg of proc0 */
979	proc0paddr->u_pcb.pageDirReg = (int)pagedir;
980
981	/* avail_start is first available physical memory address */
982	avail_start = avail + NBPG + USPACE + NBPG + NBPG * nkpde;
983
984	/* atdevbase is first available logical memory address */
985	atdevbase = VADDRSTART;
986
987	proc0.p_addr = proc0paddr; /* page dir address */
988
989	/* XXX: PMAP_NEW requires valid curpcb.   also init'd in cpu_startup */
990	curpcb = &proc0.p_addr->u_pcb;
991
992	/*
993	 * Initialize the I/O port and I/O mem extent maps.
994	 * Note: we don't have to check the return value since
995	 * creation of a fixed extent map will never fail (since
996	 * descriptor storage has already been allocated).
997	 *
998	 * N.B. The iomem extent manages _all_ physical addresses
999	 * on the machine.  When the amount of RAM is found, the two
1000	 * extents of RAM are allocated from the map (0 -> ISA hole
1001	 * and end of ISA hole -> end of RAM).
1002	 */
1003	iomem_ex = extent_create("iomem", 0x0, 0xffffffff, M_DEVBUF,
1004	    (caddr_t)iomem_ex_storage, sizeof(iomem_ex_storage),
1005	    EX_NOCOALESCE|EX_NOWAIT);
1006
1007#if 0
1008	consinit();	/* XXX SHOULD NOT BE DONE HERE */
1009#endif
1010
1011	splraise(-1);
1012	enable_intr();
1013
1014	avail_end = sh3_trunc_page(IOM_RAM_END + 1);
1015
1016	printf("initSH3\r\n");
1017
1018	/*
1019	 * Calculate check sum
1020	 */
1021    {
1022	u_short *p, sum;
1023	int size;
1024
1025	size = etext - _start;
1026	p = (u_short *)_start;
1027	sum = 0;
1028	size >>= 1;
1029	while (size--)
1030		sum += *p++;
1031	printf("Check Sum = 0x%x\r\n", sum);
1032    }
1033	/*
1034	 * Allocate the physical addresses used by RAM from the iomem
1035	 * extent map.  This is done before the addresses are
1036	 * page rounded just to make sure we get them all.
1037	 */
1038	if (extent_alloc_region(iomem_ex, IOM_RAM_BEGIN,
1039				(IOM_RAM_END-IOM_RAM_BEGIN) + 1,
1040				EX_NOWAIT)) {
1041		/* XXX What should we do? */
1042		printf("WARNING: CAN'T ALLOCATE RAM MEMORY FROM IOMEM EXTENT MAP!\n");
1043	}
1044
1045	/* number of pages of physmem addr space */
1046	physmem = btoc(IOM_RAM_END - IOM_RAM_BEGIN +1);
1047#ifdef	TODO
1048	dumpmem = physmem;
1049#endif
1050
1051	/*
1052	 * Initialize for pmap_free_pages and pmap_next_page.
1053	 * These guys should be page-aligned.
1054	 */
1055	if (physmem < btoc(2 * 1024 * 1024)) {
1056		printf("warning: too little memory available; "
1057		       "have %d bytes, want %d bytes\n"
1058		       "running in degraded mode\n"
1059		       "press a key to confirm\n\n",
1060		       ctob(physmem), 2*1024*1024);
1061		cngetc();
1062	}
1063
1064	/* Call pmap initialization to make new kernel address space */
1065	pmap_bootstrap(atdevbase);
1066
1067	/*
1068	 * Initialize error message buffer (at end of core).
1069	 */
1070	initmsgbuf((caddr_t)msgbuf_paddr, round_page(MSGBUFSIZE));
1071
1072	/*
1073	 * set boot device information
1074	 */
1075	setup_bootinfo();
1076
1077#if 0
1078	sh3_cache_on();
1079#endif
1080
1081	/* setup proc0 stack */
1082	sp = avail + NBPG + USPACE - 16 - sizeof(struct trapframe);
1083
1084	/*
1085	 * XXX We can't return here, because we change stack pointer.
1086	 *     So jump to return address directly.
1087	 */
1088	__asm __volatile ("jmp @%0; mov %1, r15" :: "r"(pc), "r"(sp));
1089}
1090
1091struct queue {
1092	struct queue *q_next, *q_prev;
1093};
1094
1095/*
1096 * insert an element into a queue
1097 */
1098void
1099_insque(v1, v2)
1100	void *v1;
1101	void *v2;
1102{
1103	struct queue *elem = v1, *head = v2;
1104	struct queue *next;
1105
1106	next = head->q_next;
1107	elem->q_next = next;
1108	head->q_next = elem;
1109	elem->q_prev = head;
1110	next->q_prev = elem;
1111}
1112
1113/*
1114 * remove an element from a queue
1115 */
1116void
1117_remque(v)
1118	void *v;
1119{
1120	struct queue *elem = v;
1121	struct queue *next, *prev;
1122
1123	next = elem->q_next;
1124	prev = elem->q_prev;
1125	next->q_prev = prev;
1126	prev->q_next = next;
1127	elem->q_prev = 0;
1128}
1129
1130#ifdef COMPAT_NOMID
1131static int
1132exec_nomid(p, epp)
1133	struct proc *p;
1134	struct exec_package *epp;
1135{
1136	int error;
1137	u_long midmag, magic;
1138	u_short mid;
1139	struct exec *execp = epp->ep_hdr;
1140
1141	/* check on validity of epp->ep_hdr performed by exec_out_makecmds */
1142
1143	midmag = ntohl(execp->a_midmag);
1144	mid = (midmag >> 16) & 0xffff;
1145	magic = midmag & 0xffff;
1146
1147	if (magic == 0) {
1148		magic = (execp->a_midmag & 0xffff);
1149		mid = MID_ZERO;
1150	}
1151
1152	midmag = mid << 16 | magic;
1153
1154	switch (midmag) {
1155	case (MID_ZERO << 16) | ZMAGIC:
1156		/*
1157		 * 386BSD's ZMAGIC format:
1158		 */
1159		error = exec_aout_prep_oldzmagic(p, epp);
1160		break;
1161
1162	case (MID_ZERO << 16) | QMAGIC:
1163		/*
1164		 * BSDI's QMAGIC format:
1165		 * same as new ZMAGIC format, but with different magic number
1166		 */
1167		error = exec_aout_prep_zmagic(p, epp);
1168		break;
1169
1170	case (MID_ZERO << 16) | NMAGIC:
1171		/*
1172		 * BSDI's NMAGIC format:
1173		 * same as NMAGIC format, but with different magic number
1174		 * and with text starting at 0.
1175		 */
1176		error = exec_aout_prep_oldnmagic(p, epp);
1177		break;
1178
1179	case (MID_ZERO << 16) | OMAGIC:
1180		/*
1181		 * BSDI's OMAGIC format:
1182		 * same as OMAGIC format, but with different magic number
1183		 * and with text starting at 0.
1184		 */
1185		error = exec_aout_prep_oldomagic(p, epp);
1186		break;
1187
1188	default:
1189		error = ENOEXEC;
1190	}
1191
1192	return error;
1193}
1194#endif
1195
1196/*
1197 * cpu_exec_aout_makecmds():
1198 *	cpu-dependent a.out format hook for execve().
1199 *
1200 * Determine of the given exec package refers to something which we
1201 * understand and, if so, set up the vmcmds for it.
1202 *
1203 * On the i386, old (386bsd) ZMAGIC binaries and BSDI QMAGIC binaries
1204 * if COMPAT_NOMID is given as a kernel option.
1205 */
1206int
1207cpu_exec_aout_makecmds(p, epp)
1208	struct proc *p;
1209	struct exec_package *epp;
1210{
1211	int error = ENOEXEC;
1212
1213#ifdef COMPAT_NOMID
1214	if ((error = exec_nomid(p, epp)) == 0)
1215		return error;
1216#endif /* ! COMPAT_NOMID */
1217
1218	return error;
1219}
1220
1221void
1222setup_bootinfo(void)
1223{
1224	struct btinfo_bootdisk *help;
1225
1226	*(int *)bootinfo = 1;
1227	help = (struct btinfo_bootdisk *)(bootinfo + sizeof(int));
1228	help->biosdev = 0;
1229	help->partition = 0;
1230	((struct btinfo_common *)help)->len = sizeof(struct btinfo_bootdisk);
1231	((struct btinfo_common *)help)->type = BTINFO_BOOTDISK;
1232}
1233
1234void *
1235lookup_bootinfo(type)
1236	int type;
1237{
1238	struct btinfo_common *help;
1239	int n = *(int*)bootinfo;
1240	help = (struct btinfo_common *)(bootinfo + sizeof(int));
1241	while (n--) {
1242		if (help->type == type)
1243			return (help);
1244		help = (struct btinfo_common *)((char*)help + help->len);
1245	}
1246	return (0);
1247}
1248
1249
1250/*
1251 * consinit:
1252 * initialize the system console.
1253 * XXX - shouldn't deal with this initted thing, but then,
1254 * it shouldn't be called from init386 either.
1255 */
1256void
1257consinit()
1258{
1259	static int initted;
1260
1261	if (initted)
1262		return;
1263	initted = 1;
1264
1265	cninit();
1266
1267#ifdef DDB
1268	ddb_init();
1269#endif
1270}
1271
1272void
1273cpu_reset()
1274{
1275
1276	disable_intr();
1277
1278	Sh3Reset();
1279	for (;;)
1280		;
1281}
1282
1283int
1284bus_space_map (t, addr, size, flags, bshp)
1285	bus_space_tag_t t;
1286	bus_addr_t addr;
1287	bus_size_t size;
1288	int flags;
1289	bus_space_handle_t *bshp;
1290{
1291
1292	*bshp = (bus_space_handle_t)addr;
1293
1294	return 0;
1295}
1296
1297int
1298sh_memio_subregion(t, bsh, offset, size, nbshp)
1299	bus_space_tag_t t;
1300	bus_space_handle_t bsh;
1301	bus_size_t offset, size;
1302	bus_space_handle_t *nbshp;
1303{
1304
1305	*nbshp = bsh + offset;
1306	return (0);
1307}
1308
1309int
1310sh_memio_alloc(t, rstart, rend, size, alignment, boundary, flags,
1311	       bpap, bshp)
1312	bus_space_tag_t t;
1313	bus_addr_t rstart, rend;
1314	bus_size_t size, alignment, boundary;
1315	int flags;
1316	bus_addr_t *bpap;
1317	bus_space_handle_t *bshp;
1318{
1319	*bshp = *bpap = rstart;
1320
1321	return (0);
1322}
1323
1324void
1325sh_memio_free(t, bsh, size)
1326	bus_space_tag_t t;
1327	bus_space_handle_t bsh;
1328	bus_size_t size;
1329{
1330
1331}
1332
1333void
1334sh_memio_unmap(t, bsh, size)
1335	bus_space_tag_t t;
1336	bus_space_handle_t bsh;
1337	bus_size_t size;
1338{
1339	return;
1340}
1341
1342/*
1343 * InitializeBsc
1344 * : BSC(Bus State Controler)
1345 */
1346void InitializeBsc __P((void));
1347
1348void
1349InitializeBsc()
1350{
1351
1352	/*
1353	 * Drive RAS,CAS in stand by mode and bus release mode
1354	 * Area0 = Normal memory, Area5,6=Normal(no burst)
1355	 * Area2 = Normal memory, Area3 = SDRAM, Area5 = Normal memory
1356	 * Area4 = Normal Memory
1357	 * Area6 = Normal memory
1358	 */
1359	SHREG_BCR1 = BSC_BCR1_VAL;
1360
1361	/*
1362	 * Bus Width
1363	 * Area4: Bus width = 16bit
1364	 * Area6,5 = 16bit
1365	 * Area1 = 8bit
1366	 * Area2,3: Bus width = 32bit
1367	 */
1368	 SHREG_BCR2 = BSC_BCR2_VAL;
1369
1370	/*
1371	 * Idle cycle number in transition area and read to write
1372	 * Area6 = 3, Area5 = 3, Area4 = 3, Area3 = 3, Area2 = 3
1373	 * Area1 = 3, Area0 = 3
1374	 */
1375	SHREG_WCR1 = BSC_WCR1_VAL;
1376
1377	/*
1378	 * Wait cycle
1379	 * Area 6 = 6
1380	 * Area 5 = 2
1381	 * Area 4 = 10
1382	 * Area 3 = 3
1383	 * Area 2,1 = 3
1384	 * Area 0 = 6
1385	 */
1386	SHREG_WCR2 = BSC_WCR2_VAL;
1387
1388#ifdef SH4
1389	SHREG_WCR3 = BSC_WCR3_VAL;
1390#endif
1391
1392	/*
1393	 * RAS pre-charge = 2cycle, RAS-CAS delay = 3 cycle,
1394	 * write pre-charge=1cycle
1395	 * CAS before RAS refresh RAS assert time = 3 cycle
1396	 * Disable burst, Bus size=32bit, Column Address=10bit, Refresh ON
1397	 * CAS before RAS refresh ON, EDO DRAM
1398	 */
1399	SHREG_MCR = BSC_MCR_VAL;
1400
1401#ifdef BSC_SDMR_VAL
1402#if 1
1403#define SDMR	(*(volatile unsigned char  *)BSC_SDMR_VAL)
1404
1405	SDMR = 0;
1406#else
1407#define ADDSET	(*(volatile unsigned short *)0x1A000000)
1408#define ADDRST	(*(volatile unsigned short *)0x18000000)
1409#define SDMR	(*(volatile unsigned char  *)BSC_SDMR_VAL)
1410
1411	ADDSET = 0;
1412	SDMR = 0;
1413	ADDRST = 0;
1414#endif
1415#endif
1416
1417	/*
1418	 * PCMCIA Control Register
1419	 * OE/WE assert delay 3.5 cycle
1420	 * OE/WE negate-address delay 3.5 cycle
1421	 */
1422#ifdef BSC_PCR_VAL
1423	SHREG_PCR = 0x00ff;
1424#endif
1425
1426	/*
1427	 * Refresh Timer Control/Status Register
1428	 * Disable interrupt by CMF, closk 1/16, Disable OVF interrupt
1429	 * Count Limit = 1024
1430	 * In following statement, the reason why high byte = 0xa5(a4 in RFCR)
1431	 * is the rule of SH3 in writing these register.
1432	 */
1433	SHREG_RTCSR = BSC_RTCSR_VAL;
1434
1435
1436	/*
1437	 * Refresh Timer Counter
1438	 * Initialize to 0
1439	 */
1440	SHREG_RTCNT = BSC_RTCNT_VAL;
1441
1442	/* set Refresh Time Constant Register */
1443	SHREG_RTCOR = BSC_RTCOR_VAL;
1444
1445	/* init Refresh Count Register */
1446#ifdef BSC_RFCR_VAL
1447	SHREG_RFCR = BSC_RFCR_VAL;
1448#endif
1449
1450	/* Set Clock mode (make internal clock double speed) */
1451
1452	SHREG_FRQCR = FRQCR_VAL;
1453
1454#ifndef MMEYE_NO_CACHE
1455	/* Cache ON */
1456	SHREG_CCR = 0x0001;
1457#endif
1458}
1459
1460void
1461sh3_cache_on(void)
1462{
1463#ifndef MMEYE_NO_CACHE
1464	/* Cache ON */
1465	SHREG_CCR = 0x0001;
1466	SHREG_CCR = 0x0009; /* cache clear */
1467	SHREG_CCR = 0x0001; /* cache on */
1468#endif
1469}
1470
1471#include <machine/mmeye.h>
1472void
1473LoadAndReset(osimage)
1474	char *osimage;
1475{
1476	void *buf_addr;
1477	u_long size;
1478	u_long *src;
1479	u_long *dest;
1480	u_long csum = 0;
1481	u_long csum2 = 0;
1482	u_long size2;
1483#define OSIMAGE_BUF_ADDR 0x8c400000 /* !!!!!! This value depends on physical
1484				       available memory */
1485
1486
1487	printf("LoadAndReset: copy start\n");
1488	buf_addr = (void *)OSIMAGE_BUF_ADDR;
1489
1490	size = *(u_long *)osimage;
1491	src = (u_long *)osimage;
1492	dest = buf_addr;
1493
1494	size = (size + sizeof(u_long) * 2 + 3) >> 2;
1495	size2 = size;
1496
1497	while (size--) {
1498		csum += *src;
1499		*dest++ = *src++;
1500	}
1501
1502	dest = buf_addr;
1503	while (size2--)
1504		csum2 += *dest++;
1505
1506	printf("LoadAndReset: copy end[%lx,%lx]\n", csum, csum2);
1507	printf("start XLoadAndReset\n");
1508
1509	/* mask all externel interrupt (XXX) */
1510
1511	XLoadAndReset(buf_addr);
1512}
1513