sun2.c revision 1.8.42.1 1 /* $NetBSD: sun2.c,v 1.8.42.1 2008/05/18 12:32:57 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Gordon W. Ross and Matthew Fredette.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Standalone functions specific to the Sun2.
34 */
35
36 /* Need to avoid conflicts on these: */
37 #define get_pte sun2_get_pte
38 #define set_pte sun2_set_pte
39 #define get_segmap sun2_get_segmap
40 #define set_segmap sun2_set_segmap
41
42 /*
43 * We need to get the sun2 NBSG definition, even if we're
44 * building this with a different sun68k target.
45 */
46 #include <arch/sun2/include/param.h>
47
48 #include <sys/param.h>
49 #include <machine/idprom.h>
50 #include <machine/mon.h>
51
52 #include <arch/sun2/include/pte.h>
53 #include <arch/sun2/sun2/control.h>
54 #ifdef notyet
55 #include <arch/sun3/sun3/vme.h>
56 #else
57 #define VME16_BASE MBIO_BASE
58 #define VME16_MASK MBIO_MASK
59 #endif
60 #include <arch/sun2/sun2/mbmem.h>
61 #include <arch/sun2/sun2/mbio.h>
62
63 #include <stand.h>
64
65 #include "libsa.h"
66 #include "dvma.h"
67 #include "saio.h" /* enum MAPTYPES */
68
69 #define OBIO_MASK 0xFFFFFF
70
71 u_int get_pte(vaddr_t);
72 void set_pte(vaddr_t, u_int);
73 char * dvma2_alloc(int);
74 void dvma2_free(char *, int);
75 char * dvma2_mapin(char *, int);
76 void dvma2_mapout(char *, int);
77 char * dev2_mapin(int, u_long, int);
78
79 struct mapinfo {
80 int maptype;
81 int pgtype;
82 u_int base;
83 u_int mask;
84 };
85
86 #ifdef notyet
87 struct mapinfo
88 sun2_mapinfo[MAP__NTYPES] = {
89 /* On-board memory, I/O */
90 { MAP_MAINMEM, PGT_OBMEM, 0, ~0 },
91 { MAP_OBIO, PGT_OBIO, 0, OBIO_MASK },
92 /* Multibus memory, I/O */
93 { MAP_MBMEM, PGT_MBMEM, MBMEM_BASE, MBMEM_MASK },
94 { MAP_MBIO, PGT_MBIO, MBIO_BASE, MBIO_MASK },
95 /* VME A16 */
96 { MAP_VME16A16D, PGT_VME_D16, VME16_BASE, VME16_MASK },
97 { MAP_VME16A32D, 0, 0, 0 },
98 /* VME A24 */
99 { MAP_VME24A16D, 0, 0, 0 },
100 { MAP_VME24A32D, 0, 0, 0 },
101 /* VME A32 */
102 { MAP_VME32A16D, 0, 0, 0 },
103 { MAP_VME32A32D, 0, 0, 0 },
104 };
105 #endif
106
107 /* The virtual address we will use for PROM device mappings. */
108 int sun2_devmap = SUN3_MONSHORTSEG;
109
110 char *
111 dev2_mapin(int maptype, u_long physaddr, int length)
112 {
113 #ifdef notyet
114 u_int i, pa, pte, pgva, va;
115
116 if ((sun2_devmap + length) > SUN3_MONSHORTPAGE)
117 panic("dev2_mapin: length=%d", length);
118
119 for (i = 0; i < MAP__NTYPES; i++)
120 if (sun2_mapinfo[i].maptype == maptype)
121 goto found;
122 panic("dev2_mapin: bad maptype");
123 found:
124
125 if (physaddr & ~(sun2_mapinfo[i].mask))
126 panic("dev2_mapin: bad address");
127 pa = sun2_mapinfo[i].base += physaddr;
128
129 pte = PA_PGNUM(pa) | PG_PERM |
130 sun2_mapinfo[i].pgtype;
131
132 va = pgva = sun2_devmap;
133 do {
134 set_pte(pgva, pte);
135 pgva += NBPG;
136 pte += 1;
137 length -= NBPG;
138 } while (length > 0);
139 sun2_devmap = pgva;
140 va += (physaddr & PGOFSET);
141
142 #ifdef DEBUG_PROM
143 if (debug)
144 printf("dev2_mapin: va=0x%x pte=0x%x\n",
145 va, get_pte(va));
146 #endif
147 return ((char*)va);
148 #else
149 panic("dev2_mapin");
150 return(NULL);
151 #endif
152 }
153
154 /*****************************************************************
155 * DVMA support
156 */
157
158 /*
159 * The easiest way to deal with the need for DVMA mappings is to
160 * create a DVMA alias mapping of the entire address range used by
161 * the boot program. That way, dvma_mapin can just compute the
162 * DVMA alias address, and dvma_mapout does nothing.
163 *
164 * Note that this assumes that standalone programs will do I/O
165 * operations only within range (SA_MIN_VA .. SA_MAX_VA) checked.
166 */
167
168 #define DVMA_BASE 0x00f00000
169 #define DVMA_MAPLEN 0x38000 /* 256K - 32K (save MONSHORTSEG) */
170
171 #define SA_MIN_VA 0x220000
172 #define SA_MAX_VA (SA_MIN_VA + DVMA_MAPLEN)
173
174 /* This points to the end of the free DVMA space. */
175 u_int dvma2_end = DVMA_BASE + DVMA_MAPLEN;
176
177 void
178 dvma2_init(void)
179 {
180 int segva, dmava, sme;
181
182 segva = SA_MIN_VA;
183 dmava = DVMA_BASE;
184
185 while (segva < SA_MAX_VA) {
186 sme = get_segmap(segva);
187 set_segmap(dmava, sme);
188 segva += NBSG;
189 dmava += NBSG;
190 }
191 }
192
193 /* Convert a local address to a DVMA address. */
194 char *
195 dvma2_mapin(char *addr, int len)
196 {
197 int va = (int)addr;
198
199 /* Make sure the address is in the DVMA map. */
200 if ((va < SA_MIN_VA) || (va >= SA_MAX_VA))
201 panic("dvma2_mapin: 0x%x outside 0x%x..0x%x",
202 va, SA_MIN_VA, SA_MAX_VA);
203
204 va -= SA_MIN_VA;
205 va += DVMA_BASE;
206
207 return ((char *) va);
208 }
209
210 /* Destroy a DVMA address alias. */
211 void
212 dvma2_mapout(char *addr, int len)
213 {
214 int va = (int)addr;
215
216 /* Make sure the address is in the DVMA map. */
217 if ((va < DVMA_BASE) || (va >= (DVMA_BASE + DVMA_MAPLEN)))
218 panic("dvma2_mapout");
219 }
220
221 char *
222 dvma2_alloc(int len)
223 {
224 len = m68k_round_page(len);
225 dvma2_end -= len;
226 return((char*)dvma2_end);
227 }
228
229 void
230 dvma2_free(char *dvma, int len)
231 {
232 /* not worth the trouble */
233 }
234
235 /*****************************************************************
236 * Control space stuff...
237 */
238
239 u_int
240 get_pte(vaddr_t va)
241 {
242 u_int pte;
243
244 pte = get_control_word(CONTROL_ADDR_BUILD(PGMAP_BASE, va));
245 if (pte & PG_VALID) {
246 /*
247 * This clears bit 30 (the kernel readable bit, which
248 * should always be set), bit 28 (which should always
249 * be set) and bit 26 (the user writable bit, which we
250 * always have tracking the kernel writable bit). In
251 * the protection, this leaves bit 29 (the kernel
252 * writable bit) and bit 27 (the user readable bit).
253 * See pte2.h for more about this hack.
254 */
255 pte &= ~(0x54000000);
256 /*
257 * Flip bit 27 (the user readable bit) to become bit
258 * 27 (the PG_SYSTEM bit).
259 */
260 pte ^= (PG_SYSTEM);
261 }
262 return (pte);
263 }
264
265 void
266 set_pte(vaddr_t va, u_int pte)
267 {
268 if (pte & PG_VALID) {
269 /* Clear bit 26 (the user writable bit). */
270 pte &= (~0x04000000);
271 /*
272 * Flip bit 27 (the PG_SYSTEM bit) to become bit 27
273 * (the user readable bit).
274 */
275 pte ^= (PG_SYSTEM);
276 /*
277 * Always set bits 30 (the kernel readable bit) and
278 * bit 28, and set bit 26 (the user writable bit) iff
279 * bit 29 (the kernel writable bit) is set *and* bit
280 * 27 (the user readable bit) is set. This latter bit
281 * of logic is expressed in the bizarre second term
282 * below, chosen because it needs no branches.
283 */
284 #if (PG_WRITE >> 2) != PG_SYSTEM
285 #error "PG_WRITE and PG_SYSTEM definitions don't match!"
286 #endif
287 pte |= 0x50000000
288 | ((((pte & PG_WRITE) >> 2) & pte) >> 1);
289 }
290 set_control_word(CONTROL_ADDR_BUILD(PGMAP_BASE, va), pte);
291 }
292
293 int
294 get_segmap(vaddr_t va)
295 {
296 va = CONTROL_ADDR_BUILD(SEGMAP_BASE, va);
297 return (get_control_byte(va));
298 }
299
300 void
301 set_segmap(vaddr_t va, int sme)
302 {
303 va = CONTROL_ADDR_BUILD(SEGMAP_BASE, va);
304 set_control_byte(va, sme);
305 }
306
307 /*
308 * Copy the IDPROM contents into the passed buffer.
309 * The caller (idprom.c) will do the checksum.
310 */
311 void
312 sun2_getidprom(u_char *dst)
313 {
314 vaddr_t src; /* control space address */
315 int len, x;
316
317 src = IDPROM_BASE;
318 len = sizeof(struct idprom);
319 do {
320 x = get_control_byte(src);
321 src += NBPG;
322 *dst++ = x;
323 } while (--len > 0);
324 }
325
326 /*****************************************************************
327 * Init our function pointers, etc.
328 */
329
330 /*
331 * For booting, the PROM in fredette's Sun 2/120 doesn't map
332 * much main memory, and what is mapped is mapped strangely.
333 * Low virtual memory is mapped like:
334 *
335 * 0x000000 - 0x0bffff virtual -> 0x000000 - 0x0bffff physical
336 * 0x0c0000 - 0x0fffff virtual -> invalid
337 * 0x100000 - 0x13ffff virtual -> 0x0c0000 - 0x0fffff physical
338 * 0x200800 - 0x3fffff virtual -> 0x200800 - 0x3fffff physical
339 *
340 * I think the SunOS authors wanted to load kernels starting at
341 * physical zero, and assumed that kernels would be less
342 * than 768K (0x0c0000) long. Also, the PROM maps physical
343 * 0x0c0000 - 0x0fffff into DVMA space, so we can't take the
344 * easy road and just add more mappings to use that physical
345 * memory while loading (the PROM might do DMA there).
346 *
347 * What we do, then, is assume a 4MB machine (you'll really
348 * need that to run NetBSD at all anyways), and we map two
349 * chunks of physical and virtual space:
350 *
351 * 0x400000 - 0x4bffff virtual -> 0x000000 - 0x0bffff physical
352 * 0x4c0000 - 0x600000 virtual -> 0x2c0000 - 0x3fffff physical
353 *
354 * And then we load starting at virtual 0x400000. We will do
355 * all of this mapping just by copying PMEGs.
356 *
357 * After the load is done, but before we enter the kernel, we're
358 * done with the PROM, so we copy the part of the kernel that
359 * got loaded at physical 0x2c0000 down to physical 0x0c0000.
360 * This can't just be a PMEG copy; we've actually got to move
361 * bytes in physical memory.
362 *
363 * These two chunks of physical and virtual space are defined
364 * in macros below. Some of the macros are only for completeness:
365 */
366 #define MEM_CHUNK0_SIZE (0x0c0000)
367 #define MEM_CHUNK0_LOAD_PHYS (0x000000)
368 #define MEM_CHUNK0_LOAD_VIRT (0x400000)
369 #define MEM_CHUNK0_LOAD_VIRT_PROM MEM_CHUNK0_LOAD_PHYS
370 #define MEM_CHUNK0_COPY_PHYS MEM_CHUNK0_LOAD_PHYS
371 #define MEM_CHUNK0_COPY_VIRT MEM_CHUNK0_COPY_PHYS
372
373 #define MEM_CHUNK1_SIZE (0x140000)
374 #define MEM_CHUNK1_LOAD_PHYS (0x2c0000)
375 #define MEM_CHUNK1_LOAD_VIRT (MEM_CHUNK0_LOAD_VIRT + MEM_CHUNK0_SIZE)
376 #define MEM_CHUNK1_LOAD_VIRT_PROM MEM_CHUNK1_LOAD_PHYS
377 #define MEM_CHUNK1_COPY_PHYS (MEM_CHUNK0_LOAD_PHYS + MEM_CHUNK0_SIZE)
378 #define MEM_CHUNK1_COPY_VIRT MEM_CHUNK1_COPY_PHYS
379
380 /* Maps memory for loading. */
381 u_long
382 sun2_map_mem_load(void)
383 {
384 vaddr_t off;
385
386 /* Map chunk zero for loading. */
387 for(off = 0; off < MEM_CHUNK0_SIZE; off += NBSG)
388 set_segmap(MEM_CHUNK0_LOAD_VIRT + off,
389 get_segmap(MEM_CHUNK0_LOAD_VIRT_PROM + off));
390
391 /* Map chunk one for loading. */
392 for(off = 0; off < MEM_CHUNK1_SIZE; off += NBSG)
393 set_segmap(MEM_CHUNK1_LOAD_VIRT + off,
394 get_segmap(MEM_CHUNK1_LOAD_VIRT_PROM + off));
395
396 /* Tell our caller where in virtual space to load. */
397 return MEM_CHUNK0_LOAD_VIRT;
398 }
399
400 /* Remaps memory for running. */
401 void *
402 sun2_map_mem_run(void *entry)
403 {
404 vaddr_t off, off_end;
405 int sme;
406 u_int pte;
407
408 /* Chunk zero is already mapped and copied. */
409
410 /* Chunk one needs to be mapped and copied. */
411 pte = (get_pte(0) & ~PG_FRAME);
412 for(off = 0; off < MEM_CHUNK1_SIZE; ) {
413
414 /*
415 * We use the PMEG immediately before the
416 * segment we're copying in the PROM virtual
417 * mapping of the chunk. If this is the first
418 * segment, this is the PMEG the PROM used to
419 * map 0x2b8000 virtual to 0x2b8000 physical,
420 * which I'll assume is unused. For the second
421 * and subsequent segments, this will be the
422 * PMEG used to map the previous segment, which
423 * is now (since we already copied it) unused.
424 */
425 sme = get_segmap((MEM_CHUNK1_LOAD_VIRT_PROM + off) - NBSG);
426 set_segmap(MEM_CHUNK1_COPY_VIRT + off, sme);
427
428 /* Set the PTEs in this new PMEG. */
429 for(off_end = off + NBSG; off < off_end; off += NBPG)
430 set_pte(MEM_CHUNK1_COPY_VIRT + off,
431 pte | PA_PGNUM(MEM_CHUNK1_COPY_PHYS + off));
432
433 /* Copy this segment. */
434 memcpy((void *)(MEM_CHUNK1_COPY_VIRT + (off - NBSG)),
435 (void *)(MEM_CHUNK1_LOAD_VIRT + (off - NBSG)),
436 NBSG);
437 }
438
439 /* Tell our caller where in virtual space to enter. */
440 return ((void *)entry) - MEM_CHUNK0_LOAD_VIRT;
441 }
442
443 void
444 sun2_init(void)
445 {
446 /* Set the function pointers. */
447 dev_mapin_p = dev2_mapin;
448 dvma_alloc_p = dvma2_alloc;
449 dvma_free_p = dvma2_free;
450 dvma_mapin_p = dvma2_mapin;
451 dvma_mapout_p = dvma2_mapout;
452
453 /* Prepare DVMA segment. */
454 dvma2_init();
455 }
456