machdep.c revision 1.1
1/*	$NetBSD: machdep.c,v 1.1 1999/09/13 10:30:26 itojun Exp $	*/
2
3/*-
4 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the NetBSD
22 *	Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 *    contributors may be used to endorse or promote products derived
25 *    from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*-
41 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
42 * All rights reserved.
43 *
44 * This code is derived from software contributed to Berkeley by
45 * William Jolitz.
46 *
47 * Redistribution and use in source and binary forms, with or without
48 * modification, are permitted provided that the following conditions
49 * are met:
50 * 1. Redistributions of source code must retain the above copyright
51 *    notice, this list of conditions and the following disclaimer.
52 * 2. Redistributions in binary form must reproduce the above copyright
53 *    notice, this list of conditions and the following disclaimer in the
54 *    documentation and/or other materials provided with the distribution.
55 * 3. All advertising materials mentioning features or use of this software
56 *    must display the following acknowledgement:
57 *	This product includes software developed by the University of
58 *	California, Berkeley and its contributors.
59 * 4. Neither the name of the University nor the names of its contributors
60 *    may be used to endorse or promote products derived from this software
61 *    without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 *	@(#)machdep.c	7.4 (Berkeley) 6/3/91
76 */
77
78#include "opt_bufcache.h"
79#include "opt_compat_netbsd.h"
80#include "opt_ddb.h"
81#include "opt_memsize.h"
82#include "opt_initbsc.h"
83#include "opt_sysv.h"
84
85#include <sys/param.h>
86#include <sys/systm.h>
87#include <sys/signalvar.h>
88#include <sys/kernel.h>
89#include <sys/map.h>
90#include <sys/proc.h>
91#include <sys/user.h>
92#include <sys/exec.h>
93#include <sys/buf.h>
94#include <sys/reboot.h>
95#include <sys/conf.h>
96#include <sys/file.h>
97#include <sys/callout.h>
98#include <sys/malloc.h>
99#include <sys/mbuf.h>
100#include <sys/msgbuf.h>
101#include <sys/mount.h>
102#include <sys/vnode.h>
103#include <sys/device.h>
104#include <sys/extent.h>
105#include <sys/syscallargs.h>
106#ifdef SYSVMSG
107#include <sys/msg.h>
108#endif
109#ifdef SYSVSEM
110#include <sys/sem.h>
111#endif
112#ifdef SYSVSHM
113#include <sys/shm.h>
114#endif
115
116#ifdef KGDB
117#include <sys/kgdb.h>
118#endif
119
120#include <dev/cons.h>
121
122#include <vm/vm.h>
123#include <vm/vm_kern.h>
124#include <vm/vm_page.h>
125
126#include <uvm/uvm_extern.h>
127
128#include <sys/sysctl.h>
129
130#include <machine/cpu.h>
131#include <machine/cpufunc.h>
132#include <machine/psl.h>
133#include <machine/bootinfo.h>
134#include <machine/bus.h>
135#include <sh3/bscreg.h>
136#include <sh3/ccrreg.h>
137#include <sh3/cpgreg.h>
138#include <sh3/intcreg.h>
139#include <sh3/pfcreg.h>
140#include <sh3/wdtreg.h>
141
142#include <sys/termios.h>
143#include "sci.h"
144
145/* the following is used externally (sysctl_hw) */
146char machine[] = MACHINE;		/* cpu "architecture" */
147char machine_arch[] = MACHINE_ARCH;	/* machine_arch = "sh3" */
148
149#ifdef sh3_debug
150int cpu_debug_mode = 1;
151#else
152int cpu_debug_mode = 0;
153#endif
154
155char cpu_model[120];
156
157char bootinfo[BOOTINFO_MAXSIZE];
158
159/*
160 * Declare these as initialized data so we can patch them.
161 */
162int	nswbuf = 0;
163#ifdef	NBUF
164int	nbuf = NBUF;
165#else
166int	nbuf = 0;
167#endif
168#ifdef	BUFPAGES
169int	bufpages = BUFPAGES;
170#else
171int	bufpages = 0;
172#endif
173#ifdef BUFCACHE
174int	bufcache = BUFCACHE;	/* % of RAM to use for buffer cache */
175#else
176int	bufcache = 0;		/* fallback to old algorithm */
177#endif
178
179int	physmem;
180int	dumpmem_low;
181int	dumpmem_high;
182extern int	boothowto;
183int	cpu_class;
184
185paddr_t msgbuf_paddr;
186
187vm_map_t exec_map = NULL;
188vm_map_t mb_map = NULL;
189vm_map_t phys_map = NULL;
190
191extern paddr_t avail_start, avail_end;
192extern u_long atdevbase;
193extern int etext,_start;
194
195#ifdef	SYSCALL_DEBUG
196#define	SCDEBUG_ALL 0x0004
197extern int	scdebug;
198#endif
199
200#define IOM_RAM_END	((paddr_t)IOM_RAM_BEGIN + IOM_RAM_SIZE - 1)
201
202/*
203 * Extent maps to manage I/O and ISA memory hole space.  Allocate
204 * storage for 8 regions in each, initially.  Later, ioport_malloc_safe
205 * will indicate that it's safe to use malloc() to dynamically allocate
206 * region descriptors.
207 *
208 * N.B. At least two regions are _always_ allocated from the iomem
209 * extent map; (0 -> ISA hole) and (end of ISA hole -> end of RAM).
210 *
211 * The extent maps are not static!  Machine-dependent ISA and EISA
212 * routines need access to them for bus address space allocation.
213 */
214static	long iomem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(8) / sizeof(long)];
215struct	extent *ioport_ex;
216struct	extent *iomem_ex;
217static	int ioport_malloc_safe;
218
219void	setup_bootinfo __P((void));
220caddr_t	allocsys __P((caddr_t));
221void	dumpsys __P((void));
222void	identifycpu __P((void));
223void	initSH3 __P((vaddr_t));
224void	InitializeSci  __P((unsigned char));
225void	Send16550 __P((int c));
226void	Init16550 __P((void));
227void	sh3_cache_on __P((void));
228void	LoadAndReset __P((char *osimage));
229void	XLoadAndReset __P((char *osimage));
230void	Sh3Reset __P((void));
231
232#include <dev/ic/comreg.h>
233#include <dev/ic/comvar.h>
234
235void	consinit __P((void));
236
237#ifdef COMPAT_NOMID
238static int exec_nomid	__P((struct proc *, struct exec_package *));
239#endif
240
241
242
243/*
244 * Machine-dependent startup code
245 *
246 * This is called from main() in kern/main.c.
247 */
248void
249cpu_startup()
250{
251	unsigned i;
252	caddr_t v;
253	int sz;
254	int base, residual;
255	vaddr_t minaddr, maxaddr;
256	vsize_t size;
257	struct pcb *pcb;
258	/* int x; */
259
260	printf(version);
261
262	sprintf(cpu_model, "Hitachi SH3");
263
264	printf("real mem  = %d\n", ctob(physmem));
265
266	/*
267	 * Find out how much space we need, allocate it,
268	 * and then give everything true virtual addresses.
269	 */
270	sz = (int)allocsys((caddr_t)0);
271	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
272		panic("startup: no room for tables");
273	if (allocsys(v) - v != sz)
274		panic("startup: table size inconsistency");
275
276	/*
277	 * Now allocate buffers proper.  They are different than the above
278	 * in that they usually occupy more virtual memory than physical.
279	 */
280	size = MAXBSIZE * nbuf;
281	buffers = 0;
282	if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
283		    NULL, UVM_UNKNOWN_OFFSET,
284		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
285				UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
286		panic("cpu_startup: cannot allocate VM for buffers");
287	minaddr = (vaddr_t)buffers;
288
289	if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
290		/* don't want to alloc more physical mem than needed */
291		bufpages = btoc(MAXBSIZE) * nbuf;
292	}
293
294	base = bufpages / nbuf;
295	residual = bufpages % nbuf;
296
297	for (i = 0; i < nbuf; i++) {
298		vsize_t curbufsize;
299		vaddr_t curbuf;
300		struct vm_page *pg;
301
302		/*
303		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
304		 * that MAXBSIZE space, we allocate and map (base+1) pages
305		 * for the first "residual" buffers, and then we allocate
306		 * "base" pages for the rest.
307		 */
308		curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
309		curbufsize = CLBYTES * ((i < residual) ? (base+1) : base);
310
311		while (curbufsize) {
312			/*
313			 * Attempt to allocate buffers from the first
314			 * 16M of RAM to avoid bouncing file system
315			 * transfers.
316			 */
317			pg = uvm_pagealloc(NULL, 0, NULL, 0);
318			if (pg == NULL)
319				panic("cpu_startup: not enough memory for "
320				    "buffer cache");
321#if defined(PMAP_NEW)
322			pmap_kenter_pgs(curbuf, &pg, 1);
323#else
324			pmap_enter(kernel_map->pmap, curbuf,
325				   VM_PAGE_TO_PHYS(pg), VM_PROT_ALL, TRUE,
326				   VM_PROT_ALL);
327#endif
328			curbuf += PAGE_SIZE;
329			curbufsize -= PAGE_SIZE;
330		}
331	}
332
333	/*
334	 * Allocate a submap for exec arguments.  This map effectively
335	 * limits the number of processes exec'ing at any time.
336	 */
337	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
338				   16*NCARGS, TRUE, FALSE, NULL);
339
340	/*
341	 * Allocate a submap for physio
342	 */
343	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
344				   VM_PHYS_SIZE, TRUE, FALSE, NULL);
345
346	/*
347	 * Finally, allocate mbuf cluster submap.
348	 */
349	mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
350	    VM_MBUF_SIZE, FALSE, FALSE, NULL);
351
352	/*
353	 * Initialize callouts
354	 */
355	callfree = callout;
356	for (i = 1; i < ncallout; i++)
357		callout[i-1].c_next = &callout[i];
358
359	printf("avail mem = %ld\n", ptoa(uvmexp.free));
360	printf("using %d buffers containing %d bytes of memory\n",
361		nbuf, bufpages * CLBYTES);
362
363	/*
364	 * Set up buffers, so they can be used to read disk labels.
365	 */
366	bufinit();
367
368	/* Safe for i/o port allocation to use malloc now. */
369	ioport_malloc_safe = 1;
370
371	curpcb = pcb = &proc0.p_addr->u_pcb;
372	pcb->r15 = (int)proc0.p_addr + USPACE - 16;
373
374	proc0.p_md.md_regs = (struct trapframe *)pcb->r15 - 1;
375
376#ifdef SYSCALL_DEBUG
377	scdebug |= SCDEBUG_ALL;
378#endif
379
380#if 0
381	boothowto |= RB_SINGLE;
382#endif
383}
384
385/*
386 * Allocate space for system data structures.  We are given
387 * a starting virtual address and we return a final virtual
388 * address; along the way we set each data structure pointer.
389 *
390 * We call allocsys() with 0 to find out how much space we want,
391 * allocate that much and fill it with zeroes, and then call
392 * allocsys() again with the correct base virtual address.
393 */
394caddr_t
395allocsys(v)
396	caddr_t v;
397{
398
399#define	valloc(name, type, num) \
400	    v = (caddr_t)(((name) = (type *)v) + (num))
401#ifdef REAL_CLISTS
402	valloc(cfree, struct cblock, nclist);
403#endif
404	valloc(callout, struct callout, ncallout);
405#ifdef SYSVSHM
406	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
407#endif
408#ifdef SYSVSEM
409	valloc(sema, struct semid_ds, seminfo.semmni);
410	valloc(sem, struct sem, seminfo.semmns);
411	/* This is pretty disgusting! */
412	valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
413#endif
414#ifdef SYSVMSG
415	valloc(msgpool, char, msginfo.msgmax);
416	valloc(msgmaps, struct msgmap, msginfo.msgseg);
417	valloc(msghdrs, struct msg, msginfo.msgtql);
418	valloc(msqids, struct msqid_ds, msginfo.msgmni);
419#endif
420
421	/*
422	 * If necessary, determine the number of pages to use for the
423	 * buffer cache.  We allocate 1/2 as many swap buffer headers
424	 * as file I/O buffers.
425	 */
426	if (bufpages == 0) {
427		if (bufcache == 0) {		/* use old algorithm */
428			/*
429			 * Determine how many buffers to allocate. We use 10%
430			 * of the first 2MB of memory, and 5% of the rest, with
431			 * a minimum of 16 buffers.
432			 */
433			if (physmem < btoc(2 * 1024 * 1024))
434				bufpages = physmem / (10 * CLSIZE);
435			else
436				bufpages = (btoc(2 * 1024 * 1024) + physmem) /
437				    (20 * CLSIZE);
438		} else {
439			/*
440			 * Set size of buffer cache to physmem/bufcache * 100
441			 * (i.e., bufcache % of physmem).
442			 */
443			if (bufcache < 5 || bufcache > 95) {
444				printf(
445		"warning: unable to set bufcache to %d%% of RAM, using 10%%",
446				    bufcache);
447				bufcache = 10;
448			}
449			bufpages= physmem / (CLSIZE * 100) * bufcache;
450		}
451	}
452	if (nbuf == 0) {
453		nbuf = bufpages;
454		if (nbuf < 16)
455			nbuf = 16;
456	}
457
458	/*
459	 * XXX stopgap measure to prevent wasting too much KVM on
460	 * the sparsely filled buffer cache.
461	 */
462	if (nbuf * MAXBSIZE > VM_MAX_KERNEL_BUF)
463		nbuf = VM_MAX_KERNEL_BUF / MAXBSIZE;
464
465	if (nswbuf == 0) {
466		nswbuf = (nbuf / 2) &~ 1;	/* force even */
467		if (nswbuf > 256)
468			nswbuf = 256;		/* sanity */
469	}
470	valloc(buf, struct buf, nbuf);
471	return v;
472}
473
474/*
475 * Info for CTL_HW
476 */
477extern	char version[];
478
479
480#define CPUDEBUG
481
482
483/*
484 * machine dependent system variables.
485 */
486int
487cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
488	int *name;
489	u_int namelen;
490	void *oldp;
491	size_t *oldlenp;
492	void *newp;
493	size_t newlen;
494	struct proc *p;
495{
496	dev_t consdev;
497	struct btinfo_bootpath *bibp;
498	struct trapframe *tf;
499	char *osimage;
500
501	/* all sysctl names at this level are terminal */
502	if (namelen != 1)
503		return (ENOTDIR);		/* overloaded */
504
505	switch (name[0]) {
506	case CPU_CONSDEV:
507		if (cn_tab != NULL)
508			consdev = cn_tab->cn_dev;
509		else
510			consdev = NODEV;
511		return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
512		    sizeof consdev));
513
514	case CPU_NKPDE:
515		return (sysctl_rdint(oldp, oldlenp, newp, nkpde));
516
517	case CPU_BOOTED_KERNEL:
518	        bibp = lookup_bootinfo(BTINFO_BOOTPATH);
519	        if (!bibp)
520			return (ENOENT); /* ??? */
521		return (sysctl_rdstring(oldp, oldlenp, newp, bibp->bootpath));
522
523	case CPU_SETPRIVPROC:
524		if (newp == NULL)
525			return (0);
526
527		/* set current process to priviledged process */
528		tf = p->p_md.md_regs;
529		tf->tf_ssr |= PSL_MD;
530		return (0);
531
532	case CPU_DEBUGMODE:
533		return (sysctl_int(oldp, oldlenp, newp, newlen,
534				   &cpu_debug_mode));
535
536	case CPU_LOADANDRESET:
537		if (newp != NULL) {
538			osimage = (char *)(*(u_long *)newp);
539
540			LoadAndReset(osimage);
541			/* not reach here */
542		}
543		return (0);
544
545	default:
546		return (EOPNOTSUPP);
547	}
548	/* NOTREACHED */
549}
550
551/*
552 * Send an interrupt to process.
553 *
554 * Stack is set up to allow sigcode stored
555 * in u. to call routine, followed by kcall
556 * to sigreturn routine below.  After sigreturn
557 * resets the signal mask, the stack, and the
558 * frame pointer, it returns to the user
559 * specified pc, psl.
560 */
561void
562sendsig(catcher, sig, mask, code)
563	sig_t catcher;
564	int sig;
565	sigset_t *mask;
566	u_long code;
567{
568	struct proc *p = curproc;
569	struct trapframe *tf;
570	struct sigframe *fp, frame;
571	struct sigacts *psp = p->p_sigacts;
572	int onstack;
573
574	tf = p->p_md.md_regs;
575
576	/* Do we need to jump onto the signal stack? */
577	onstack =
578	    (psp->ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
579	    (psp->ps_sigact[sig].sa_flags & SA_ONSTACK) != 0;
580
581	/* Allocate space for the signal handler context. */
582	if (onstack)
583		fp = (struct sigframe *)((caddr_t)psp->ps_sigstk.ss_sp +
584						  psp->ps_sigstk.ss_size);
585	else
586		fp = (struct sigframe *)tf->tf_r15;
587	fp--;
588
589	/* Build stack frame for signal trampoline. */
590	frame.sf_signum = sig;
591	frame.sf_code = code;
592	frame.sf_scp = &fp->sf_sc;
593	frame.sf_handler = catcher;
594
595	/* Save register context. */
596	frame.sf_sc.sc_ssr = tf->tf_ssr;
597	frame.sf_sc.sc_spc = tf->tf_spc;
598	frame.sf_sc.sc_pr = tf->tf_pr;
599	frame.sf_sc.sc_r15 = tf->tf_r15;
600	frame.sf_sc.sc_r14 = tf->tf_r14;
601	frame.sf_sc.sc_r13 = tf->tf_r13;
602	frame.sf_sc.sc_r12 = tf->tf_r12;
603	frame.sf_sc.sc_r11 = tf->tf_r11;
604	frame.sf_sc.sc_r10 = tf->tf_r10;
605	frame.sf_sc.sc_r9 = tf->tf_r9;
606	frame.sf_sc.sc_r8 = tf->tf_r8;
607	frame.sf_sc.sc_r7 = tf->tf_r7;
608	frame.sf_sc.sc_r6 = tf->tf_r6;
609	frame.sf_sc.sc_r5 = tf->tf_r5;
610	frame.sf_sc.sc_r4 = tf->tf_r4;
611	frame.sf_sc.sc_r3 = tf->tf_r3;
612	frame.sf_sc.sc_r2 = tf->tf_r2;
613	frame.sf_sc.sc_r1 = tf->tf_r1;
614	frame.sf_sc.sc_r0 = tf->tf_r0;
615	frame.sf_sc.sc_trapno = tf->tf_trapno;
616#ifdef TODO
617	frame.sf_sc.sc_err = tf->tf_err;
618#endif
619
620	/* Save signal stack. */
621	frame.sf_sc.sc_onstack = psp->ps_sigstk.ss_flags & SS_ONSTACK;
622
623	/* Save signal mask. */
624	frame.sf_sc.sc_mask = *mask;
625
626#ifdef COMPAT_13
627	/*
628	 * XXX We always have to save an old style signal mask because
629	 * XXX we might be delivering a signal to a process which will
630	 * XXX escape from the signal in a non-standard way and invoke
631	 * XXX sigreturn() directly.
632	 */
633	native_sigset_to_sigset13(mask, &frame.sf_sc.__sc_mask13);
634#endif
635
636	if (copyout(&frame, fp, sizeof(frame)) != 0) {
637		/*
638		 * Process has trashed its stack; give it an illegal
639		 * instruction to halt it in its tracks.
640		 */
641		sigexit(p, SIGILL);
642		/* NOTREACHED */
643	}
644
645	/*
646	 * Build context to run handler in.
647	 */
648	tf->tf_spc = (int)psp->ps_sigcode;
649#ifdef TODO
650	tf->tf_ssr &= ~(PSL_T|PSL_VM|PSL_AC);
651#endif
652	tf->tf_r15 = (int)fp;
653
654	/* Remember that we're now on the signal stack. */
655	if (onstack)
656		psp->ps_sigstk.ss_flags |= SS_ONSTACK;
657}
658
659/*
660 * System call to cleanup state after a signal
661 * has been taken.  Reset signal mask and
662 * stack state from context left by sendsig (above).
663 * Return to previous pc and psl as specified by
664 * context left by sendsig. Check carefully to
665 * make sure that the user has not modified the
666 * psl to gain improper privileges or to cause
667 * a machine fault.
668 */
669int
670sys___sigreturn14(p, v, retval)
671	struct proc *p;
672	void *v;
673	register_t *retval;
674{
675	struct sys___sigreturn14_args /* {
676		syscallarg(struct sigcontext *) sigcntxp;
677	} */ *uap = v;
678	struct sigcontext *scp, context;
679	struct trapframe *tf;
680
681	/*
682	 * The trampoline code hands us the context.
683	 * It is unsafe to keep track of it ourselves, in the event that a
684	 * program jumps out of a signal handler.
685	 */
686	scp = SCARG(uap, sigcntxp);
687	if (copyin((caddr_t)scp, &context, sizeof(*scp)) != 0)
688		return (EFAULT);
689
690	/* Restore signal context. */
691	tf = p->p_md.md_regs;
692	{
693		/*
694		 * Check for security violations.  If we're returning to
695		 * protected mode, the CPU will validate the segment registers
696		 * automatically and generate a trap on violations.  We handle
697		 * the trap, rather than doing all of the checking here.
698		 */
699#ifdef TODO
700	  if (((context.sc_ssr ^ tf->tf_ssr) & PSL_USERSTATIC) != 0) {
701	    return (EINVAL);
702	  }
703#endif
704
705	  tf->tf_ssr = context.sc_ssr;
706	}
707	tf->tf_r0 = context.sc_r0;
708	tf->tf_r1 = context.sc_r1;
709	tf->tf_r2 = context.sc_r2;
710	tf->tf_r3 = context.sc_r3;
711	tf->tf_r4 = context.sc_r4;
712	tf->tf_r5 = context.sc_r5;
713	tf->tf_r6 = context.sc_r6;
714	tf->tf_r7 = context.sc_r7;
715	tf->tf_r8 = context.sc_r8;
716	tf->tf_r9 = context.sc_r9;
717	tf->tf_r10 = context.sc_r10;
718	tf->tf_r11 = context.sc_r11;
719	tf->tf_r12 = context.sc_r12;
720	tf->tf_r13 = context.sc_r13;
721	tf->tf_r14 = context.sc_r14;
722	tf->tf_spc = context.sc_spc;
723	tf->tf_r15 = context.sc_r15;
724	tf->tf_pr = context.sc_pr;
725
726	/* Restore signal stack. */
727	if (context.sc_onstack & SS_ONSTACK)
728		p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK;
729	else
730		p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK;
731	/* Restore signal mask. */
732	(void) sigprocmask1(p, SIG_SETMASK, &context.sc_mask, 0);
733
734	return (EJUSTRETURN);
735}
736
737int	waittime = -1;
738struct pcb dumppcb;
739
740void
741cpu_reboot(howto, bootstr)
742	int howto;
743	char *bootstr;
744{
745	extern int cold;
746
747	if (cold) {
748		howto |= RB_HALT;
749		goto haltsys;
750	}
751
752	boothowto = howto;
753	if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
754		waittime = 0;
755		vfs_shutdown();
756		/*
757		 * If we've been adjusting the clock, the todr
758		 * will be out of synch; adjust it now.
759		 */
760		/* resettodr(); */
761	}
762
763	/* Disable interrupts. */
764	splhigh();
765
766	/* Do a dump if requested. */
767	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
768		dumpsys();
769
770haltsys:
771	doshutdownhooks();
772
773	if (howto & RB_HALT) {
774		printf("\n");
775		printf("The operating system has halted.\n");
776		printf("Please press any key to reboot.\n\n");
777		cngetc();
778	}
779
780	printf("rebooting...\n");
781	cpu_reset();
782	for(;;)
783		;
784	/*NOTREACHED*/
785}
786
787/*
788 * These variables are needed by /sbin/savecore
789 */
790u_long	dumpmag = 0x8fca0101;	/* magic number */
791int 	dumpsize = 0;		/* pages */
792long	dumplo = 0; 		/* blocks */
793
794/*
795 * This is called by main to set dumplo and dumpsize.
796 * Dumps always skip the first CLBYTES of disk space
797 * in case there might be a disk label stored there.
798 * If there is extra space, put dump at the end to
799 * reduce the chance that swapping trashes it.
800 */
801void
802cpu_dumpconf()
803{
804#ifdef	TODO
805	int nblks;	/* size of dump area */
806	int maj;
807
808	if (dumpdev == NODEV)
809		return;
810	maj = major(dumpdev);
811	if (maj < 0 || maj >= nblkdev)
812		panic("dumpconf: bad dumpdev=0x%x", dumpdev);
813	if (bdevsw[maj].d_psize == NULL)
814		return;
815	nblks = (*bdevsw[maj].d_psize)(dumpdev);
816	if (nblks <= ctod(1))
817		return;
818
819	dumpsize = btoc(IOM_END + ctob(dumpmem_high));
820
821	/* Always skip the first CLBYTES, in case there is a label there. */
822	if (dumplo < ctod(1))
823		dumplo = ctod(1);
824
825	/* Put dump at end of partition, and make it fit. */
826	if (dumpsize > dtoc(nblks - dumplo))
827		dumpsize = dtoc(nblks - dumplo);
828	if (dumplo < nblks - ctod(dumpsize))
829		dumplo = nblks - ctod(dumpsize);
830#endif
831}
832
833/*
834 * Doadump comes here after turning off memory management and
835 * getting on the dump stack, either when called above, or by
836 * the auto-restart code.
837 */
838#define BYTES_PER_DUMP  NBPG	/* must be a multiple of pagesize XXX small */
839static vaddr_t dumpspace;
840
841vaddr_t
842reserve_dumppages(p)
843	vaddr_t p;
844{
845
846	dumpspace = p;
847	return (p + BYTES_PER_DUMP);
848}
849
850void
851dumpsys()
852{
853#ifdef	TODO
854	unsigned bytes, i, n;
855	int maddr, psize;
856	daddr_t blkno;
857	int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
858	int error;
859
860	/* Save registers. */
861	savectx(&dumppcb);
862
863	msgbufmapped = 0;	/* don't record dump msgs in msgbuf */
864	if (dumpdev == NODEV)
865		return;
866
867	/*
868	 * For dumps during autoconfiguration,
869	 * if dump device has already configured...
870	 */
871	if (dumpsize == 0)
872		cpu_dumpconf();
873	if (dumplo < 0)
874		return;
875	printf("\ndumping to dev %x, offset %ld\n", dumpdev, dumplo);
876
877	psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
878	printf("dump ");
879	if (psize == -1) {
880		printf("area unavailable\n");
881		return;
882	}
883
884#if 0	/* XXX this doesn't work.  grr. */
885        /* toss any characters present prior to dump */
886	while (sget() != NULL); /*syscons and pccons differ */
887#endif
888
889	bytes = ctob(dumpmem_high) + IOM_END;
890	maddr = 0;
891	blkno = dumplo;
892	dump = bdevsw[major(dumpdev)].d_dump;
893	error = 0;
894	for (i = 0; i < bytes; i += n) {
895		/*
896		 * Avoid dumping the ISA memory hole, and areas that
897		 * BIOS claims aren't in low memory.
898		 */
899		if (i >= ctob(dumpmem_low) && i < IOM_END) {
900			n = IOM_END - i;
901			maddr += n;
902			blkno += btodb(n);
903			continue;
904		}
905
906		/* Print out how many MBs we to go. */
907		n = bytes - i;
908		if (n && (n % (1024*1024)) == 0)
909			printf("%d ", n / (1024 * 1024));
910
911		/* Limit size for next transfer. */
912		if (n > BYTES_PER_DUMP)
913			n =  BYTES_PER_DUMP;
914
915		(void) pmap_map(dumpspace, maddr, maddr + n, VM_PROT_READ);
916		error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, n);
917		if (error)
918			break;
919		maddr += n;
920		blkno += btodb(n);			/* XXX? */
921
922#if 0	/* XXX this doesn't work.  grr. */
923		/* operator aborting dump? */
924		if (sget() != NULL) {
925			error = EINTR;
926			break;
927		}
928#endif
929	}
930
931	switch (error) {
932
933	case ENXIO:
934		printf("device bad\n");
935		break;
936
937	case EFAULT:
938		printf("device not ready\n");
939		break;
940
941	case EINVAL:
942		printf("area improper\n");
943		break;
944
945	case EIO:
946		printf("i/o error\n");
947		break;
948
949	case EINTR:
950		printf("aborted from console\n");
951		break;
952
953	case 0:
954		printf("succeeded\n");
955		break;
956
957	default:
958		printf("error %d\n", error);
959		break;
960	}
961	printf("\n\n");
962	delay(5000000);		/* 5 seconds */
963#endif	/* TODO */
964}
965
966/*
967 * Clear registers on exec
968 */
969void
970setregs(p, pack, stack)
971	struct proc *p;
972	struct exec_package *pack;
973	u_long stack;
974{
975	register struct pcb *pcb = &p->p_addr->u_pcb;
976	register struct trapframe *tf;
977
978	p->p_md.md_flags &= ~MDP_USEDFPU;
979	pcb->pcb_flags = 0;
980
981	tf = p->p_md.md_regs;
982
983	tf->tf_r0 = 0;
984	tf->tf_r1 = 0;
985	tf->tf_r2 = 0;
986	tf->tf_r3 = 0;
987	tf->tf_r4 = *(int *)stack;	/* argc */
988	tf->tf_r5 = stack+4;		/* argv */
989	tf->tf_r6 = stack+4*tf->tf_r4 + 8; /* envp */
990	tf->tf_r7 = 0;
991	tf->tf_r8 = 0;
992	tf->tf_r9 = 0;
993	tf->tf_r10 = 0;
994	tf->tf_r11 = 0;
995	tf->tf_r12 = 0;
996	tf->tf_r13 = 0;
997	tf->tf_r14 = 0;
998	tf->tf_spc = pack->ep_entry;
999	tf->tf_ssr = PSL_USERSET;
1000	tf->tf_r15 = stack;
1001#ifdef TODO
1002	tf->tf_ebx = (int)PS_STRINGS;
1003#endif
1004}
1005
1006/*
1007 * Initialize segments and descriptor tables
1008 */
1009
1010extern  struct user *proc0paddr;
1011
1012void
1013initSH3(first_avail)
1014	vaddr_t first_avail;
1015{
1016	unsigned short *p;
1017	unsigned short sum;
1018	int	size;
1019	extern void consinit __P((void));
1020
1021	proc0.p_addr = proc0paddr; /* page dir address */
1022
1023	/*
1024	 * Initialize the I/O port and I/O mem extent maps.
1025	 * Note: we don't have to check the return value since
1026	 * creation of a fixed extent map will never fail (since
1027	 * descriptor storage has already been allocated).
1028	 *
1029	 * N.B. The iomem extent manages _all_ physical addresses
1030	 * on the machine.  When the amount of RAM is found, the two
1031	 * extents of RAM are allocated from the map (0 -> ISA hole
1032	 * and end of ISA hole -> end of RAM).
1033	 */
1034	iomem_ex = extent_create("iomem", 0x0, 0xffffffff, M_DEVBUF,
1035	    (caddr_t)iomem_ex_storage, sizeof(iomem_ex_storage),
1036	    EX_NOCOALESCE|EX_NOWAIT);
1037
1038#if 0	/* XXX (msaitoh) */
1039	consinit();	/* XXX SHOULD NOT BE DONE HERE */
1040#endif
1041
1042	splraise(-1);
1043	enable_intr();
1044
1045	avail_end = sh3_trunc_page(IOM_RAM_END + 1);
1046
1047#if 0	/* XXX (msaitoh) */
1048	printf("initSH3\r\n");
1049#endif
1050
1051	/*
1052	 * Calculate check sum
1053	 */
1054	size = (char *)&etext - (char *)&_start;
1055	p = (unsigned short *)&_start;
1056	sum = 0;
1057	size >>= 1;
1058	while (size--)
1059		sum += *p++;
1060#if 0
1061	printf("Check Sum = 0x%x", sum);
1062#endif
1063
1064	/*
1065	 * Allocate the physical addresses used by RAM from the iomem
1066	 * extent map.  This is done before the addresses are
1067	 * page rounded just to make sure we get them all.
1068	 */
1069	if (extent_alloc_region(iomem_ex, IOM_RAM_BEGIN,
1070				IOM_RAM_SIZE,
1071				EX_NOWAIT)) {
1072		/* XXX What should we do? */
1073#if 1
1074		printf("WARNING: CAN'T ALLOCATE RAM MEMORY FROM IOMEM EXTENT MAP!\n");
1075#endif
1076	}
1077
1078#if 0 /* avail_start is set in locore.s to first available page rounded
1079	 physical mem */
1080	avail_start = IOM_RAM_BEGIN + NBPG;
1081#endif
1082
1083	/* number of pages of physmem addr space */
1084	physmem = btoc(IOM_RAM_SIZE);
1085#ifdef	TODO
1086	dumpmem = physmem;
1087#endif
1088
1089	/*
1090	 * Initialize for pmap_free_pages and pmap_next_page.
1091	 * These guys should be page-aligned.
1092	 */
1093	if (physmem < btoc(2 * 1024 * 1024)) {
1094		printf("warning: too little memory available; "
1095		       "have %d bytes, want %d bytes\n"
1096		       "running in degraded mode\n"
1097		       "press a key to confirm\n\n",
1098		       ctob(physmem), 2*1024*1024);
1099		cngetc();
1100	}
1101
1102	/* Call pmap initialization to make new kernel address space */
1103	pmap_bootstrap((vaddr_t)atdevbase);
1104
1105	/*
1106	 * Initialize error message buffer (at end of core).
1107	 */
1108	initmsgbuf((caddr_t)msgbuf_paddr, round_page(MSGBUFSIZE));
1109
1110	/*
1111	 * set boot device information
1112	 */
1113	setup_bootinfo();
1114
1115#if 0
1116	sh3_cache_on();
1117#endif
1118
1119}
1120
1121struct queue {
1122	struct queue *q_next, *q_prev;
1123};
1124
1125/*
1126 * insert an element into a queue
1127 */
1128void
1129_insque(v1, v2)
1130	void *v1;
1131	void *v2;
1132{
1133	struct queue *elem = v1, *head = v2;
1134	struct queue *next;
1135
1136	next = head->q_next;
1137	elem->q_next = next;
1138	head->q_next = elem;
1139	elem->q_prev = head;
1140	next->q_prev = elem;
1141}
1142
1143/*
1144 * remove an element from a queue
1145 */
1146void
1147_remque(v)
1148	void *v;
1149{
1150	struct queue *elem = v;
1151	struct queue *next, *prev;
1152
1153	next = elem->q_next;
1154	prev = elem->q_prev;
1155	next->q_prev = prev;
1156	prev->q_next = next;
1157	elem->q_prev = 0;
1158}
1159
1160#ifdef COMPAT_NOMID
1161static int
1162exec_nomid(p, epp)
1163	struct proc *p;
1164	struct exec_package *epp;
1165{
1166	int error;
1167	u_long midmag, magic;
1168	u_short mid;
1169	struct exec *execp = epp->ep_hdr;
1170
1171	/* check on validity of epp->ep_hdr performed by exec_out_makecmds */
1172
1173	midmag = ntohl(execp->a_midmag);
1174	mid = (midmag >> 16) & 0xffff;
1175	magic = midmag & 0xffff;
1176
1177	if (magic == 0) {
1178		magic = (execp->a_midmag & 0xffff);
1179		mid = MID_ZERO;
1180	}
1181
1182	midmag = mid << 16 | magic;
1183
1184	switch (midmag) {
1185	case (MID_ZERO << 16) | ZMAGIC:
1186		/*
1187		 * 386BSD's ZMAGIC format:
1188		 */
1189		error = exec_aout_prep_oldzmagic(p, epp);
1190		break;
1191
1192	case (MID_ZERO << 16) | QMAGIC:
1193		/*
1194		 * BSDI's QMAGIC format:
1195		 * same as new ZMAGIC format, but with different magic number
1196		 */
1197		error = exec_aout_prep_zmagic(p, epp);
1198		break;
1199
1200	case (MID_ZERO << 16) | NMAGIC:
1201		/*
1202		 * BSDI's NMAGIC format:
1203		 * same as NMAGIC format, but with different magic number
1204		 * and with text starting at 0.
1205		 */
1206		error = exec_aout_prep_oldnmagic(p, epp);
1207		break;
1208
1209	case (MID_ZERO << 16) | OMAGIC:
1210		/*
1211		 * BSDI's OMAGIC format:
1212		 * same as OMAGIC format, but with different magic number
1213		 * and with text starting at 0.
1214		 */
1215		error = exec_aout_prep_oldomagic(p, epp);
1216		break;
1217
1218	default:
1219		error = ENOEXEC;
1220	}
1221
1222	return error;
1223}
1224#endif
1225
1226/*
1227 * cpu_exec_aout_makecmds():
1228 *	cpu-dependent a.out format hook for execve().
1229 *
1230 * Determine of the given exec package refers to something which we
1231 * understand and, if so, set up the vmcmds for it.
1232 *
1233 * On the i386, old (386bsd) ZMAGIC binaries and BSDI QMAGIC binaries
1234 * if COMPAT_NOMID is given as a kernel option.
1235 */
1236int
1237cpu_exec_aout_makecmds(p, epp)
1238	struct proc *p;
1239	struct exec_package *epp;
1240{
1241	int error = ENOEXEC;
1242
1243#ifdef COMPAT_NOMID
1244	if ((error = exec_nomid(p, epp)) == 0)
1245		return error;
1246#endif /* ! COMPAT_NOMID */
1247
1248	return error;
1249}
1250
1251void
1252setup_bootinfo(void)
1253{
1254	struct btinfo_bootdisk *help;
1255
1256	*(int *)bootinfo = 1;
1257	help = (struct btinfo_bootdisk *)(bootinfo + sizeof(int));
1258	help->biosdev = 0;
1259	help->partition = 0;
1260	((struct btinfo_common *)help)->len = sizeof(struct btinfo_bootdisk);
1261	((struct btinfo_common *)help)->type = BTINFO_BOOTDISK;
1262}
1263
1264void *
1265lookup_bootinfo(type)
1266	int type;
1267{
1268	struct btinfo_common *help;
1269	int n = *(int*)bootinfo;
1270	help = (struct btinfo_common *)(bootinfo + sizeof(int));
1271	while (n--) {
1272		if (help->type == type)
1273			return (help);
1274		help = (struct btinfo_common *)((char*)help + help->len);
1275	}
1276	return (0);
1277}
1278
1279
1280/*
1281 * consinit:
1282 * initialize the system console.
1283 * XXX - shouldn't deal with this initted thing, but then,
1284 * it shouldn't be called from init386 either.
1285 */
1286void
1287consinit()
1288{
1289	static int initted;
1290
1291	if (initted)
1292		return;
1293	initted = 1;
1294
1295	cninit();
1296
1297#ifdef DDB
1298	ddb_init();
1299#endif
1300}
1301
1302void
1303cpu_reset()
1304{
1305
1306	disable_intr();
1307
1308	Sh3Reset();
1309	for (;;)
1310		;
1311}
1312
1313int
1314bus_space_map (t, addr, size, flags, bshp)
1315	bus_space_tag_t t;
1316	bus_addr_t addr;
1317	bus_size_t size;
1318	int flags;
1319	bus_space_handle_t *bshp;
1320{
1321
1322	*bshp = (bus_space_handle_t)addr;
1323
1324	return 0;
1325}
1326
1327int
1328sh_memio_subregion(t, bsh, offset, size, nbshp)
1329	bus_space_tag_t t;
1330	bus_space_handle_t bsh;
1331	bus_size_t offset, size;
1332	bus_space_handle_t *nbshp;
1333{
1334
1335	*nbshp = bsh + offset;
1336	return (0);
1337}
1338
1339int
1340sh_memio_alloc(t, rstart, rend, size, alignment, boundary, flags,
1341	       bpap, bshp)
1342	bus_space_tag_t t;
1343	bus_addr_t rstart, rend;
1344	bus_size_t size, alignment, boundary;
1345	int flags;
1346	bus_addr_t *bpap;
1347	bus_space_handle_t *bshp;
1348{
1349	*bshp = *bpap = rstart;
1350
1351	return (0);
1352}
1353
1354void
1355sh_memio_free(t, bsh, size)
1356	bus_space_tag_t t;
1357	bus_space_handle_t bsh;
1358	bus_size_t size;
1359{
1360
1361}
1362
1363void
1364sh_memio_unmap(t, bsh, size)
1365	bus_space_tag_t t;
1366	bus_space_handle_t bsh;
1367	bus_size_t size;
1368{
1369	return;
1370}
1371
1372/*
1373 * InitializeBsc
1374 * : BSC(Bus State Controler)
1375 */
1376void InitializeBsc __P((void));
1377
1378void
1379InitializeBsc()
1380{
1381
1382	/*
1383	 * Drive RAS,CAS in stand by mode and bus release mode
1384	 * Area0 = Normal memory, Area5,6=Normal(no burst)
1385	 * Area2 = Normal memory, Area3 = SDRAM, Area5 = Normal memory
1386	 * Area4 = Normal Memory
1387	 * Area6 = Normal memory
1388	 */
1389	SHREG_BSC.BCR1.WORD = BSC_BCR1_VAL;
1390
1391	/*
1392	 * Bus Width
1393	 * Area4: Bus width = 16bit
1394	 * Area6,5 = 16bit
1395	 * Area1 = 8bit
1396	 * Area2,3: Bus width = 32bit
1397	 */
1398	 SHREG_BSC.BCR2.WORD = BSC_BCR2_VAL;
1399
1400	/*
1401	 * Idle cycle number in transition area and read to write
1402	 * Area6 = 3, Area5 = 3, Area4 = 3, Area3 = 3, Area2 = 3
1403	 * Area1 = 3, Area0 = 3
1404	 */
1405	SHREG_BSC.WCR1.WORD = BSC_WCR1_VAL;
1406
1407	/*
1408	 * Wait cycle
1409	 * Area 6 = 6
1410	 * Area 5 = 2
1411	 * Area 4 = 10
1412	 * Area 3 = 3
1413	 * Area 2,1 = 3
1414	 * Area 0 = 6
1415	 */
1416	SHREG_BSC.WCR2.WORD = BSC_WCR2_VAL;
1417
1418#ifdef SH4
1419	SHREG_BSC.WCR3.WORD = BSC_WCR3_VAL;
1420#endif
1421
1422	/*
1423	 * RAS pre-charge = 2cycle, RAS-CAS delay = 3 cycle,
1424	 * write pre-charge=1cycle
1425	 * CAS before RAS refresh RAS assert time = 3 cycle
1426	 * Disable burst, Bus size=32bit, Column Address=10bit, Refresh ON
1427	 * CAS before RAS refresh ON, EDO DRAM
1428	 */
1429	SHREG_BSC.MCR.WORD = BSC_MCR_VAL;
1430
1431#ifdef BSC_SDMR_VAL
1432#if 1
1433#define SDMR	(*(volatile unsigned char  *)BSC_SDMR_VAL)
1434
1435	SDMR = 0;
1436#else
1437#define ADDSET	(*(volatile unsigned short *)0x1A000000)
1438#define ADDRST	(*(volatile unsigned short *)0x18000000)
1439#define SDMR	(*(volatile unsigned char  *)BSC_SDMR_VAL)
1440
1441	ADDSET = 0;
1442	SDMR = 0;
1443	ADDRST = 0;
1444#endif
1445#endif
1446
1447	/*
1448	 * PCMCIA Control Register
1449	 * OE/WE assert delay 3.5 cycle
1450	 * OE/WE negate-address delay 3.5 cycle
1451	 */
1452#ifdef BSC_PCR_VAL
1453	SHREG_BSC.PCR.WORD = 0x00ff;
1454#endif
1455
1456	/*
1457	 * Refresh Timer Control/Status Register
1458	 * Disable interrupt by CMF, closk 1/16, Disable OVF interrupt
1459	 * Count Limit = 1024
1460	 * In following statement, the reason why high byte = 0xa5(a4 in RFCR)
1461	 * is the rule of SH3 in writing these register.
1462	 */
1463	SHREG_BSC.RTCSR.WORD = BSC_RTCSR_VAL;
1464
1465
1466	/*
1467	 * Refresh Timer Counter
1468	 * Initialize to 0
1469	 */
1470	SHREG_BSC.RTCNT = BSC_RTCNT_VAL;
1471
1472	/* set Refresh Time Constant Register */
1473	SHREG_BSC.RTCOR = BSC_RTCOR_VAL;
1474
1475	/* init Refresh Count Register */
1476#ifdef BSC_RFCR_VAL
1477	SHREG_BSC.RFCR = BSC_RFCR_VAL;
1478#endif
1479
1480	/* Set Clock mode (make internal clock double speed) */
1481
1482	SHREG_FRQCR = FRQCR_VAL;
1483
1484#ifndef MMEYE_NO_CACHE
1485	/* Cache ON */
1486	SHREG_CCR = 0x0001;
1487#endif
1488}
1489
1490void
1491sh3_cache_on(void)
1492{
1493#ifndef MMEYE_NO_CACHE
1494	/* Cache ON */
1495	SHREG_CCR = 0x0001;
1496	SHREG_CCR = 0x0009; /* cache clear */
1497	SHREG_CCR = 0x0001; /* cache on */
1498#endif
1499}
1500
1501#include <machine/mmeye.h>
1502void
1503LoadAndReset(char *osimage)
1504{
1505	void *buf_addr;
1506	u_long size;
1507	u_long *src;
1508	u_long *dest;
1509	u_long csum = 0;
1510	u_long csum2 = 0;
1511	u_long size2;
1512#define OSIMAGE_BUF_ADDR 0x8c400000 /* !!!!!! This value depends on physical
1513				       available memory */
1514
1515
1516	printf("LoadAndReset:copy start\n");
1517	buf_addr = (void *)OSIMAGE_BUF_ADDR;
1518
1519	size = *(u_long *)osimage;
1520	src = (u_long *)osimage;
1521	dest = buf_addr;
1522
1523	size = (size + sizeof(u_long)*2 + 3) >> 2 ;
1524	size2 = size;
1525
1526	while (size--){
1527		csum += *src;
1528		*dest++ = *src++;
1529	}
1530
1531	dest = buf_addr;
1532	while (size2--)
1533		csum2 += *dest++;
1534
1535	printf("LoadAndReset:copy end[%lx,%lx]\n", csum, csum2);
1536	printf("start XLoadAndReset\n");
1537
1538	/* mask all externel interrupt (XXX) */
1539
1540	XLoadAndReset(buf_addr);
1541}
1542