arm32_machdep.c revision 1.99 1 /* $NetBSD: arm32_machdep.c,v 1.99 2014/01/11 17:32:20 matt Exp $ */
2
3 /*
4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1994 Brini.
6 * All rights reserved.
7 *
8 * This code is derived from software written for Brini by Mark Brinicombe
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Mark Brinicombe
21 * for the NetBSD Project.
22 * 4. The name of the company nor the name of the author may be used to
23 * endorse or promote products derived from this software without specific
24 * prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
27 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
29 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * Machine dependent functions for kernel setup
39 *
40 * Created : 17/09/94
41 * Updated : 18/04/01 updated for new wscons
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.99 2014/01/11 17:32:20 matt Exp $");
46
47 #include "opt_modular.h"
48 #include "opt_md.h"
49 #include "opt_pmap_debug.h"
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/reboot.h>
54 #include <sys/proc.h>
55 #include <sys/kauth.h>
56 #include <sys/kernel.h>
57 #include <sys/mbuf.h>
58 #include <sys/mount.h>
59 #include <sys/buf.h>
60 #include <sys/msgbuf.h>
61 #include <sys/device.h>
62 #include <sys/sysctl.h>
63 #include <sys/cpu.h>
64 #include <sys/intr.h>
65 #include <sys/module.h>
66 #include <sys/atomic.h>
67 #include <sys/xcall.h>
68
69 #include <uvm/uvm_extern.h>
70
71 #include <dev/cons.h>
72 #include <dev/mm.h>
73
74 #include <arm/locore.h>
75
76 #include <arm/arm32/katelib.h>
77 #include <arm/arm32/machdep.h>
78
79 #include <machine/bootconfig.h>
80 #include <machine/pcb.h>
81
82 void (*cpu_reset_address)(void); /* Used by locore */
83 paddr_t cpu_reset_address_paddr; /* Used by locore */
84
85 struct vm_map *phys_map = NULL;
86
87 #if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE)
88 extern size_t md_root_size; /* Memory disc size */
89 #endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */
90
91 pv_addr_t kernelstack;
92 pv_addr_t abtstack;
93 pv_addr_t fiqstack;
94 pv_addr_t irqstack;
95 pv_addr_t undstack;
96 pv_addr_t idlestack;
97
98 void * msgbufaddr;
99 extern paddr_t msgbufphys;
100
101 int kernel_debug = 0;
102 int cpu_printfataltraps = 0;
103 int cpu_fpu_present;
104 int cpu_hwdiv_present;
105 int cpu_neon_present;
106 int cpu_simd_present;
107 int cpu_simdex_present;
108 int cpu_umull_present;
109 const char *cpu_arch = "";
110
111 int cpu_instruction_set_attributes[6];
112 int cpu_memory_model_features[4];
113 int cpu_processor_features[2];
114 int cpu_media_and_vfp_features[2];
115
116 /* exported variable to be filled in by the bootloaders */
117 char *booted_kernel;
118
119 /* Prototypes */
120
121 void data_abort_handler(trapframe_t *frame);
122 void prefetch_abort_handler(trapframe_t *frame);
123 extern void configure(void);
124
125 /*
126 * arm32_vector_init:
127 *
128 * Initialize the vector page, and select whether or not to
129 * relocate the vectors.
130 *
131 * NOTE: We expect the vector page to be mapped at its expected
132 * destination.
133 */
134 void
135 arm32_vector_init(vaddr_t va, int which)
136 {
137 #if defined(CPU_ARMV7) || defined(CPU_ARM11) || defined(ARM_HAS_VBAR)
138 /*
139 * If this processor has the security extension, don't bother
140 * to move/map the vector page. Simply point VBAR to the copy
141 * that exists in the .text segment.
142 */
143 #ifndef ARM_HAS_VBAR
144 if (va == ARM_VECTORS_LOW
145 && (armreg_pfr1_read() & ARM_PFR1_SEC_MASK) != 0) {
146 #endif
147 extern const uint32_t page0rel[];
148 vector_page = (vaddr_t)page0rel;
149 KASSERT((vector_page & 0x1f) == 0);
150 armreg_vbar_write(vector_page);
151 #ifdef VERBOSE_INIT_ARM
152 printf(" vbar=%p", page0rel);
153 #endif
154 cpu_control(CPU_CONTROL_VECRELOC, 0);
155 return;
156 #ifndef ARM_HAS_VBAR
157 }
158 #endif
159 #endif
160 #ifndef ARM_HAS_VBAR
161 if (CPU_IS_PRIMARY(curcpu())) {
162 extern unsigned int page0[], page0_data[];
163 unsigned int *vectors = (int *) va;
164 unsigned int *vectors_data = vectors + (page0_data - page0);
165 int vec;
166
167 /*
168 * Loop through the vectors we're taking over, and copy the
169 * vector's insn and data word.
170 */
171 for (vec = 0; vec < ARM_NVEC; vec++) {
172 if ((which & (1 << vec)) == 0) {
173 /* Don't want to take over this vector. */
174 continue;
175 }
176 vectors[vec] = page0[vec];
177 vectors_data[vec] = page0_data[vec];
178 }
179
180 /* Now sync the vectors. */
181 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
182
183 vector_page = va;
184 }
185
186 if (va == ARM_VECTORS_HIGH) {
187 /*
188 * Assume the MD caller knows what it's doing here, and
189 * really does want the vector page relocated.
190 *
191 * Note: This has to be done here (and not just in
192 * cpu_setup()) because the vector page needs to be
193 * accessible *before* cpu_startup() is called.
194 * Think ddb(9) ...
195 *
196 * NOTE: If the CPU control register is not readable,
197 * this will totally fail! We'll just assume that
198 * any system that has high vector support has a
199 * readable CPU control register, for now. If we
200 * ever encounter one that does not, we'll have to
201 * rethink this.
202 */
203 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
204 }
205 #endif
206 }
207
208 /*
209 * Debug function just to park the CPU
210 */
211
212 void
213 halt(void)
214 {
215 while (1)
216 cpu_sleep(0);
217 }
218
219
220 /* Sync the discs, unmount the filesystems, and adjust the todr */
221
222 void
223 bootsync(void)
224 {
225 static bool bootsyncdone = false;
226
227 if (bootsyncdone) return;
228
229 bootsyncdone = true;
230
231 /* Make sure we can still manage to do things */
232 if (GetCPSR() & I32_bit) {
233 /*
234 * If we get here then boot has been called without RB_NOSYNC
235 * and interrupts were disabled. This means the boot() call
236 * did not come from a user process e.g. shutdown, but must
237 * have come from somewhere in the kernel.
238 */
239 IRQenable;
240 printf("Warning IRQ's disabled during boot()\n");
241 }
242
243 vfs_shutdown();
244
245 resettodr();
246 }
247
248 /*
249 * void cpu_startup(void)
250 *
251 * Machine dependent startup code.
252 *
253 */
254 void
255 cpu_startup(void)
256 {
257 vaddr_t minaddr;
258 vaddr_t maxaddr;
259 u_int loop;
260 char pbuf[9];
261
262 /*
263 * Until we better locking, we have to live under the kernel lock.
264 */
265 //KERNEL_LOCK(1, NULL);
266
267 /* Set the CPU control register */
268 cpu_setup(boot_args);
269
270 #ifndef ARM_HAS_VBAR
271 /* Lock down zero page */
272 vector_page_setprot(VM_PROT_READ);
273 #endif
274
275 /*
276 * Give pmap a chance to set up a few more things now the vm
277 * is initialised
278 */
279 pmap_postinit();
280
281 /*
282 * Initialize error message buffer (at end of core).
283 */
284
285 /* msgbufphys was setup during the secondary boot strap */
286 for (loop = 0; loop < btoc(MSGBUFSIZE); ++loop)
287 pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE,
288 msgbufphys + loop * PAGE_SIZE,
289 VM_PROT_READ|VM_PROT_WRITE, 0);
290 pmap_update(pmap_kernel());
291 initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
292
293 /*
294 * Identify ourselves for the msgbuf (everything printed earlier will
295 * not be buffered).
296 */
297 printf("%s%s", copyright, version);
298
299 format_bytes(pbuf, sizeof(pbuf), arm_ptob(physmem));
300 printf("total memory = %s\n", pbuf);
301
302 minaddr = 0;
303
304 /*
305 * Allocate a submap for physio
306 */
307 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
308 VM_PHYS_SIZE, 0, false, NULL);
309
310 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
311 printf("avail memory = %s\n", pbuf);
312
313 struct lwp * const l = &lwp0;
314 struct pcb * const pcb = lwp_getpcb(l);
315 pcb->pcb_ksp = uvm_lwp_getuarea(l) + USPACE_SVC_STACK_TOP;
316 lwp_settrapframe(l, (struct trapframe *)pcb->pcb_ksp - 1);
317 }
318
319 /*
320 * machine dependent system variables.
321 */
322 static int
323 sysctl_machdep_booted_device(SYSCTLFN_ARGS)
324 {
325 struct sysctlnode node;
326
327 if (booted_device == NULL)
328 return (EOPNOTSUPP);
329
330 node = *rnode;
331 node.sysctl_data = __UNCONST(device_xname(booted_device));
332 node.sysctl_size = strlen(device_xname(booted_device)) + 1;
333 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
334 }
335
336 static int
337 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)
338 {
339 struct sysctlnode node;
340
341 if (booted_kernel == NULL || booted_kernel[0] == '\0')
342 return (EOPNOTSUPP);
343
344 node = *rnode;
345 node.sysctl_data = booted_kernel;
346 node.sysctl_size = strlen(booted_kernel) + 1;
347 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
348 }
349
350 static int
351 sysctl_machdep_cpu_arch(SYSCTLFN_ARGS)
352 {
353 struct sysctlnode node = *rnode;
354 node.sysctl_data = __UNCONST(cpu_arch);
355 node.sysctl_size = strlen(cpu_arch) + 1;
356 return sysctl_lookup(SYSCTLFN_CALL(&node));
357 }
358
359 static int
360 sysctl_machdep_powersave(SYSCTLFN_ARGS)
361 {
362 struct sysctlnode node = *rnode;
363 int error, newval;
364
365 newval = cpu_do_powersave;
366 node.sysctl_data = &newval;
367 if (cpufuncs.cf_sleep == (void *) cpufunc_nullop)
368 node.sysctl_flags &= ~CTLFLAG_READWRITE;
369 error = sysctl_lookup(SYSCTLFN_CALL(&node));
370 if (error || newp == NULL || newval == cpu_do_powersave)
371 return (error);
372
373 if (newval < 0 || newval > 1)
374 return (EINVAL);
375 cpu_do_powersave = newval;
376
377 return (0);
378 }
379
380 static int
381 sysctl_hw_machine_arch(SYSCTLFN_ARGS)
382 {
383 struct sysctlnode node = *rnode;
384 node.sysctl_data = l->l_proc->p_md.md_march;
385 node.sysctl_size = strlen(l->l_proc->p_md.md_march) + 1;
386 return sysctl_lookup(SYSCTLFN_CALL(&node));
387 }
388
389 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
390 {
391
392 sysctl_createv(clog, 0, NULL, NULL,
393 CTLFLAG_PERMANENT,
394 CTLTYPE_NODE, "machdep", NULL,
395 NULL, 0, NULL, 0,
396 CTL_MACHDEP, CTL_EOL);
397
398 sysctl_createv(clog, 0, NULL, NULL,
399 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
400 CTLTYPE_INT, "debug", NULL,
401 NULL, 0, &kernel_debug, 0,
402 CTL_MACHDEP, CPU_DEBUG, CTL_EOL);
403 sysctl_createv(clog, 0, NULL, NULL,
404 CTLFLAG_PERMANENT,
405 CTLTYPE_STRING, "booted_device", NULL,
406 sysctl_machdep_booted_device, 0, NULL, 0,
407 CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
408 sysctl_createv(clog, 0, NULL, NULL,
409 CTLFLAG_PERMANENT,
410 CTLTYPE_STRING, "booted_kernel", NULL,
411 sysctl_machdep_booted_kernel, 0, NULL, 0,
412 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
413 sysctl_createv(clog, 0, NULL, NULL,
414 CTLFLAG_PERMANENT,
415 CTLTYPE_STRUCT, "console_device", NULL,
416 sysctl_consdev, 0, NULL, sizeof(dev_t),
417 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
418 sysctl_createv(clog, 0, NULL, NULL,
419 CTLFLAG_PERMANENT,
420 CTLTYPE_STRING, "cpu_arch", NULL,
421 sysctl_machdep_cpu_arch, 0, NULL, 0,
422 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
423 sysctl_createv(clog, 0, NULL, NULL,
424 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
425 CTLTYPE_INT, "powersave", NULL,
426 sysctl_machdep_powersave, 0, &cpu_do_powersave, 0,
427 CTL_MACHDEP, CPU_POWERSAVE, CTL_EOL);
428 sysctl_createv(clog, 0, NULL, NULL,
429 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
430 CTLTYPE_INT, "cpu_id", NULL,
431 NULL, curcpu()->ci_arm_cpuid, NULL, 0,
432 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
433 #ifdef FPU_VFP
434 sysctl_createv(clog, 0, NULL, NULL,
435 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
436 CTLTYPE_INT, "fpu_id", NULL,
437 NULL, 0, &cpu_info_store.ci_vfp_id, 0,
438 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
439 #endif
440 sysctl_createv(clog, 0, NULL, NULL,
441 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
442 CTLTYPE_INT, "fpu_present", NULL,
443 NULL, 0, &cpu_fpu_present, 0,
444 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
445 sysctl_createv(clog, 0, NULL, NULL,
446 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
447 CTLTYPE_INT, "hwdiv_present", NULL,
448 NULL, 0, &cpu_hwdiv_present, 0,
449 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
450 sysctl_createv(clog, 0, NULL, NULL,
451 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
452 CTLTYPE_INT, "neon_present", NULL,
453 NULL, 0, &cpu_neon_present, 0,
454 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
455 sysctl_createv(clog, 0, NULL, NULL,
456 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
457 CTLTYPE_STRUCT, "id_isar", NULL,
458 NULL, 0,
459 cpu_instruction_set_attributes,
460 sizeof(cpu_instruction_set_attributes),
461 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
462 sysctl_createv(clog, 0, NULL, NULL,
463 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
464 CTLTYPE_STRUCT, "id_mmfr", NULL,
465 NULL, 0,
466 cpu_memory_model_features,
467 sizeof(cpu_memory_model_features),
468 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
469 sysctl_createv(clog, 0, NULL, NULL,
470 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
471 CTLTYPE_STRUCT, "id_pfr", NULL,
472 NULL, 0,
473 cpu_processor_features,
474 sizeof(cpu_processor_features),
475 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
476 sysctl_createv(clog, 0, NULL, NULL,
477 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
478 CTLTYPE_STRUCT, "id_mvfr", NULL,
479 NULL, 0,
480 cpu_media_and_vfp_features,
481 sizeof(cpu_media_and_vfp_features),
482 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
483 sysctl_createv(clog, 0, NULL, NULL,
484 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
485 CTLTYPE_INT, "simd_present", NULL,
486 NULL, 0, &cpu_simd_present, 0,
487 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
488 sysctl_createv(clog, 0, NULL, NULL,
489 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
490 CTLTYPE_INT, "simdex_present", NULL,
491 NULL, 0, &cpu_simdex_present, 0,
492 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
493 sysctl_createv(clog, 0, NULL, NULL,
494 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
495 CTLTYPE_INT, "printfataltraps", NULL,
496 NULL, 0, &cpu_printfataltraps, 0,
497 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
498
499
500 /*
501 * We need override the usual CTL_HW HW_MACHINE_ARCH so we
502 * return the right machine_arch based on the running executable.
503 */
504 sysctl_createv(clog, 0, NULL, NULL,
505 CTLFLAG_PERMANENT,
506 CTLTYPE_NODE, "hw", NULL,
507 NULL, 0, NULL, 0,
508 CTL_HW, CTL_EOL);
509 sysctl_createv(clog, 0, NULL, NULL,
510 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
511 CTLTYPE_STRING, "machine_arch",
512 SYSCTL_DESCR("Machine CPU class"),
513 sysctl_hw_machine_arch, 0, NULL, 0,
514 CTL_HW, HW_MACHINE_ARCH, CTL_EOL);
515 }
516
517 void
518 parse_mi_bootargs(char *args)
519 {
520 int integer;
521
522 if (get_bootconf_option(args, "single", BOOTOPT_TYPE_BOOLEAN, &integer)
523 || get_bootconf_option(args, "-s", BOOTOPT_TYPE_BOOLEAN, &integer))
524 if (integer)
525 boothowto |= RB_SINGLE;
526 if (get_bootconf_option(args, "kdb", BOOTOPT_TYPE_BOOLEAN, &integer)
527 || get_bootconf_option(args, "-k", BOOTOPT_TYPE_BOOLEAN, &integer)
528 || get_bootconf_option(args, "-d", BOOTOPT_TYPE_BOOLEAN, &integer))
529 if (integer)
530 boothowto |= RB_KDB;
531 if (get_bootconf_option(args, "ask", BOOTOPT_TYPE_BOOLEAN, &integer)
532 || get_bootconf_option(args, "-a", BOOTOPT_TYPE_BOOLEAN, &integer))
533 if (integer)
534 boothowto |= RB_ASKNAME;
535
536 #ifdef PMAP_DEBUG
537 if (get_bootconf_option(args, "pmapdebug", BOOTOPT_TYPE_INT, &integer)) {
538 pmap_debug_level = integer;
539 pmap_debug(pmap_debug_level);
540 }
541 #endif /* PMAP_DEBUG */
542
543 /* if (get_bootconf_option(args, "nbuf", BOOTOPT_TYPE_INT, &integer))
544 bufpages = integer;*/
545
546 #if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE)
547 if (get_bootconf_option(args, "memorydisc", BOOTOPT_TYPE_INT, &integer)
548 || get_bootconf_option(args, "memorydisk", BOOTOPT_TYPE_INT, &integer)) {
549 md_root_size = integer;
550 md_root_size *= 1024;
551 if (md_root_size < 32*1024)
552 md_root_size = 32*1024;
553 if (md_root_size > 2048*1024)
554 md_root_size = 2048*1024;
555 }
556 #endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */
557
558 if (get_bootconf_option(args, "quiet", BOOTOPT_TYPE_BOOLEAN, &integer)
559 || get_bootconf_option(args, "-q", BOOTOPT_TYPE_BOOLEAN, &integer))
560 if (integer)
561 boothowto |= AB_QUIET;
562 if (get_bootconf_option(args, "verbose", BOOTOPT_TYPE_BOOLEAN, &integer)
563 || get_bootconf_option(args, "-v", BOOTOPT_TYPE_BOOLEAN, &integer))
564 if (integer)
565 boothowto |= AB_VERBOSE;
566 }
567
568 #ifdef __HAVE_FAST_SOFTINTS
569 #if IPL_SOFTSERIAL != IPL_SOFTNET + 1
570 #error IPLs are screwed up
571 #elif IPL_SOFTNET != IPL_SOFTBIO + 1
572 #error IPLs are screwed up
573 #elif IPL_SOFTBIO != IPL_SOFTCLOCK + 1
574 #error IPLs are screwed up
575 #elif !(IPL_SOFTCLOCK > IPL_NONE)
576 #error IPLs are screwed up
577 #elif (IPL_NONE != 0)
578 #error IPLs are screwed up
579 #endif
580
581 #ifndef __HAVE_PIC_FAST_SOFTINTS
582 #define SOFTINT2IPLMAP \
583 (((IPL_SOFTSERIAL - IPL_SOFTCLOCK) << (SOFTINT_SERIAL * 4)) | \
584 ((IPL_SOFTNET - IPL_SOFTCLOCK) << (SOFTINT_NET * 4)) | \
585 ((IPL_SOFTBIO - IPL_SOFTCLOCK) << (SOFTINT_BIO * 4)) | \
586 ((IPL_SOFTCLOCK - IPL_SOFTCLOCK) << (SOFTINT_CLOCK * 4)))
587 #define SOFTINT2IPL(l) ((SOFTINT2IPLMAP >> ((l) * 4)) & 0x0f)
588
589 /*
590 * This returns a mask of softint IPLs that be dispatch at <ipl>
591 * SOFTIPLMASK(IPL_NONE) = 0x0000000f
592 * SOFTIPLMASK(IPL_SOFTCLOCK) = 0x0000000e
593 * SOFTIPLMASK(IPL_SOFTBIO) = 0x0000000c
594 * SOFTIPLMASK(IPL_SOFTNET) = 0x00000008
595 * SOFTIPLMASK(IPL_SOFTSERIAL) = 0x00000000
596 */
597 #define SOFTIPLMASK(ipl) ((0x0f << (ipl)) & 0x0f)
598
599 void softint_switch(lwp_t *, int);
600
601 void
602 softint_trigger(uintptr_t mask)
603 {
604 curcpu()->ci_softints |= mask;
605 }
606
607 void
608 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
609 {
610 lwp_t ** lp = &l->l_cpu->ci_softlwps[level];
611 KASSERT(*lp == NULL || *lp == l);
612 *lp = l;
613 *machdep = 1 << SOFTINT2IPL(level);
614 KASSERT(level != SOFTINT_CLOCK || *machdep == (1 << (IPL_SOFTCLOCK - IPL_SOFTCLOCK)));
615 KASSERT(level != SOFTINT_BIO || *machdep == (1 << (IPL_SOFTBIO - IPL_SOFTCLOCK)));
616 KASSERT(level != SOFTINT_NET || *machdep == (1 << (IPL_SOFTNET - IPL_SOFTCLOCK)));
617 KASSERT(level != SOFTINT_SERIAL || *machdep == (1 << (IPL_SOFTSERIAL - IPL_SOFTCLOCK)));
618 }
619
620 void
621 dosoftints(void)
622 {
623 struct cpu_info * const ci = curcpu();
624 const int opl = ci->ci_cpl;
625 const uint32_t softiplmask = SOFTIPLMASK(opl);
626
627 splhigh();
628 for (;;) {
629 u_int softints = ci->ci_softints & softiplmask;
630 KASSERT((softints != 0) == ((ci->ci_softints >> opl) != 0));
631 KASSERT(opl == IPL_NONE || (softints & (1 << (opl - IPL_SOFTCLOCK))) == 0);
632 if (softints == 0) {
633 splx(opl);
634 return;
635 }
636 #define DOSOFTINT(n) \
637 if (ci->ci_softints & (1 << (IPL_SOFT ## n - IPL_SOFTCLOCK))) { \
638 ci->ci_softints &= \
639 ~(1 << (IPL_SOFT ## n - IPL_SOFTCLOCK)); \
640 softint_switch(ci->ci_softlwps[SOFTINT_ ## n], \
641 IPL_SOFT ## n); \
642 continue; \
643 }
644 DOSOFTINT(SERIAL);
645 DOSOFTINT(NET);
646 DOSOFTINT(BIO);
647 DOSOFTINT(CLOCK);
648 panic("dosoftints wtf (softints=%u?, ipl=%d)", softints, opl);
649 }
650 }
651 #endif /* !__HAVE_PIC_FAST_SOFTINTS */
652 #endif /* __HAVE_FAST_SOFTINTS */
653
654 #ifdef MODULAR
655 /*
656 * Push any modules loaded by the boot loader.
657 */
658 void
659 module_init_md(void)
660 {
661 }
662 #endif /* MODULAR */
663
664 int
665 mm_md_physacc(paddr_t pa, vm_prot_t prot)
666 {
667
668 return (pa < ctob(physmem)) ? 0 : EFAULT;
669 }
670
671 #ifdef __HAVE_CPU_UAREA_ALLOC_IDLELWP
672 vaddr_t
673 cpu_uarea_alloc_idlelwp(struct cpu_info *ci)
674 {
675 const vaddr_t va = idlestack.pv_va + ci->ci_cpuid * USPACE;
676 // printf("%s: %s: va=%lx\n", __func__, ci->ci_data.cpu_name, va);
677 return va;
678 }
679 #endif
680
681 #ifdef MULTIPROCESSOR
682 void
683 cpu_boot_secondary_processors(void)
684 {
685 uint32_t mbox;
686 kcpuset_export_u32(kcpuset_attached, &mbox, sizeof(mbox));
687 atomic_swap_32(&arm_cpu_mbox, mbox);
688 membar_producer();
689 #ifdef _ARM_ARCH_7
690 __asm __volatile("sev; sev; sev");
691 #endif
692 }
693
694 void
695 xc_send_ipi(struct cpu_info *ci)
696 {
697 KASSERT(kpreempt_disabled());
698 KASSERT(curcpu() != ci);
699
700
701 if (ci) {
702 /* Unicast, remote CPU */
703 printf("%s: -> %s", __func__, ci->ci_data.cpu_name);
704 intr_ipi_send(ci->ci_kcpuset, IPI_XCALL);
705 } else {
706 printf("%s: -> !%s", __func__, ci->ci_data.cpu_name);
707 /* Broadcast to all but ourselves */
708 kcpuset_t *kcp;
709 kcpuset_create(&kcp, (ci != NULL));
710 KASSERT(kcp != NULL);
711 kcpuset_copy(kcp, kcpuset_running);
712 kcpuset_clear(kcp, cpu_index(ci));
713 intr_ipi_send(kcp, IPI_XCALL);
714 kcpuset_destroy(kcp);
715 }
716 printf("\n");
717 }
718 #endif /* MULTIPROCESSOR */
719
720 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
721 bool
722 mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
723 {
724 if (physical_start <= pa && pa < physical_end) {
725 *vap = KERNEL_BASE + (pa - physical_start);
726 return true;
727 }
728
729 return false;
730 }
731 #endif
732