arm32_machdep.c revision 1.83.2.3 1 /* $NetBSD: arm32_machdep.c,v 1.83.2.3 2013/06/23 06:19:59 tls Exp $ */
2
3 /*
4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1994 Brini.
6 * All rights reserved.
7 *
8 * This code is derived from software written for Brini by Mark Brinicombe
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Mark Brinicombe
21 * for the NetBSD Project.
22 * 4. The name of the company nor the name of the author may be used to
23 * endorse or promote products derived from this software without specific
24 * prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
27 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
29 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * Machine dependent functions for kernel setup
39 *
40 * Created : 17/09/94
41 * Updated : 18/04/01 updated for new wscons
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.83.2.3 2013/06/23 06:19:59 tls Exp $");
46
47 #include "opt_modular.h"
48 #include "opt_md.h"
49 #include "opt_pmap_debug.h"
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/reboot.h>
54 #include <sys/proc.h>
55 #include <sys/kauth.h>
56 #include <sys/kernel.h>
57 #include <sys/mbuf.h>
58 #include <sys/mount.h>
59 #include <sys/buf.h>
60 #include <sys/msgbuf.h>
61 #include <sys/device.h>
62 #include <sys/sysctl.h>
63 #include <sys/cpu.h>
64 #include <sys/intr.h>
65 #include <sys/module.h>
66 #include <sys/atomic.h>
67 #include <sys/xcall.h>
68
69 #include <uvm/uvm_extern.h>
70
71 #include <dev/cons.h>
72 #include <dev/mm.h>
73
74 #include <arm/arm32/katelib.h>
75 #include <arm/arm32/machdep.h>
76
77 #include <machine/bootconfig.h>
78 #include <machine/pcb.h>
79
80 void (*cpu_reset_address)(void); /* Used by locore */
81 paddr_t cpu_reset_address_paddr; /* Used by locore */
82
83 struct vm_map *phys_map = NULL;
84
85 #if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE)
86 extern size_t md_root_size; /* Memory disc size */
87 #endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */
88
89 pv_addr_t kernelstack;
90 pv_addr_t abtstack;
91 pv_addr_t fiqstack;
92 pv_addr_t irqstack;
93 pv_addr_t undstack;
94 pv_addr_t idlestack;
95
96 void * msgbufaddr;
97 extern paddr_t msgbufphys;
98
99 int kernel_debug = 0;
100 int cpu_fpu_present;
101 int cpu_neon_present;
102 int cpu_simd_present;
103 int cpu_simdex_present;
104 int cpu_umull_present;
105 const char *cpu_arch = "";
106
107 int cpu_instruction_set_attributes[6];
108 int cpu_memory_model_features[4];
109 int cpu_processor_features[2];
110 int cpu_media_and_vfp_features[2];
111
112 /* exported variable to be filled in by the bootloaders */
113 char *booted_kernel;
114
115 /* Prototypes */
116
117 void data_abort_handler(trapframe_t *frame);
118 void prefetch_abort_handler(trapframe_t *frame);
119 extern void configure(void);
120
121 /*
122 * arm32_vector_init:
123 *
124 * Initialize the vector page, and select whether or not to
125 * relocate the vectors.
126 *
127 * NOTE: We expect the vector page to be mapped at its expected
128 * destination.
129 */
130 void
131 arm32_vector_init(vaddr_t va, int which)
132 {
133 #if defined(CPU_ARMV7) || defined(CPU_ARM11) || defined(ARM_HAS_VBAR)
134 /*
135 * If this processor has the security extension, don't bother
136 * to move/map the vector page. Simply point VBAR to the copy
137 * that exists in the .text segment.
138 */
139 #ifndef ARM_HAS_VBAR
140 if (va == ARM_VECTORS_LOW
141 && (armreg_pfr1_read() && ARM_PFR1_SEC_MASK) != 0) {
142 #endif
143 extern const uint32_t page0rel[];
144 vector_page = (vaddr_t)page0rel;
145 KASSERT((vector_page & 0x1f) == 0);
146 armreg_vbar_write(vector_page);
147 #ifdef VERBOSE_INIT_ARM
148 printf(" vbar=%p", page0rel);
149 #endif
150 cpu_control(CPU_CONTROL_VECRELOC, 0);
151 return;
152 #ifndef ARM_HAS_VBAR
153 }
154 #endif
155 #endif
156 #ifndef ARM_HAS_VBAR
157 if (CPU_IS_PRIMARY(curcpu())) {
158 extern unsigned int page0[], page0_data[];
159 unsigned int *vectors = (int *) va;
160 unsigned int *vectors_data = vectors + (page0_data - page0);
161 int vec;
162
163 /*
164 * Loop through the vectors we're taking over, and copy the
165 * vector's insn and data word.
166 */
167 for (vec = 0; vec < ARM_NVEC; vec++) {
168 if ((which & (1 << vec)) == 0) {
169 /* Don't want to take over this vector. */
170 continue;
171 }
172 vectors[vec] = page0[vec];
173 vectors_data[vec] = page0_data[vec];
174 }
175
176 /* Now sync the vectors. */
177 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
178
179 vector_page = va;
180 }
181
182 if (va == ARM_VECTORS_HIGH) {
183 /*
184 * Assume the MD caller knows what it's doing here, and
185 * really does want the vector page relocated.
186 *
187 * Note: This has to be done here (and not just in
188 * cpu_setup()) because the vector page needs to be
189 * accessible *before* cpu_startup() is called.
190 * Think ddb(9) ...
191 *
192 * NOTE: If the CPU control register is not readable,
193 * this will totally fail! We'll just assume that
194 * any system that has high vector support has a
195 * readable CPU control register, for now. If we
196 * ever encounter one that does not, we'll have to
197 * rethink this.
198 */
199 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
200 }
201 #endif
202 }
203
204 /*
205 * Debug function just to park the CPU
206 */
207
208 void
209 halt(void)
210 {
211 while (1)
212 cpu_sleep(0);
213 }
214
215
216 /* Sync the discs, unmount the filesystems, and adjust the todr */
217
218 void
219 bootsync(void)
220 {
221 static bool bootsyncdone = false;
222
223 if (bootsyncdone) return;
224
225 bootsyncdone = true;
226
227 /* Make sure we can still manage to do things */
228 if (GetCPSR() & I32_bit) {
229 /*
230 * If we get here then boot has been called without RB_NOSYNC
231 * and interrupts were disabled. This means the boot() call
232 * did not come from a user process e.g. shutdown, but must
233 * have come from somewhere in the kernel.
234 */
235 IRQenable;
236 printf("Warning IRQ's disabled during boot()\n");
237 }
238
239 vfs_shutdown();
240
241 resettodr();
242 }
243
244 /*
245 * void cpu_startup(void)
246 *
247 * Machine dependent startup code.
248 *
249 */
250 void
251 cpu_startup(void)
252 {
253 vaddr_t minaddr;
254 vaddr_t maxaddr;
255 u_int loop;
256 char pbuf[9];
257
258 /*
259 * Until we better locking, we have to live under the kernel lock.
260 */
261 //KERNEL_LOCK(1, NULL);
262
263 /* Set the CPU control register */
264 cpu_setup(boot_args);
265
266 #ifndef ARM_HAS_VBAR
267 /* Lock down zero page */
268 vector_page_setprot(VM_PROT_READ);
269 #endif
270
271 /*
272 * Give pmap a chance to set up a few more things now the vm
273 * is initialised
274 */
275 pmap_postinit();
276
277 /*
278 * Initialize error message buffer (at end of core).
279 */
280
281 /* msgbufphys was setup during the secondary boot strap */
282 for (loop = 0; loop < btoc(MSGBUFSIZE); ++loop)
283 pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE,
284 msgbufphys + loop * PAGE_SIZE,
285 VM_PROT_READ|VM_PROT_WRITE, 0);
286 pmap_update(pmap_kernel());
287 initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
288
289 /*
290 * Identify ourselves for the msgbuf (everything printed earlier will
291 * not be buffered).
292 */
293 printf("%s%s", copyright, version);
294
295 format_bytes(pbuf, sizeof(pbuf), arm_ptob(physmem));
296 printf("total memory = %s\n", pbuf);
297
298 minaddr = 0;
299
300 /*
301 * Allocate a submap for physio
302 */
303 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
304 VM_PHYS_SIZE, 0, false, NULL);
305
306 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
307 printf("avail memory = %s\n", pbuf);
308
309 struct lwp * const l = &lwp0;
310 struct pcb * const pcb = lwp_getpcb(l);
311 pcb->pcb_ksp = uvm_lwp_getuarea(l) + USPACE_SVC_STACK_TOP;
312 lwp_settrapframe(l, (struct trapframe *)pcb->pcb_ksp - 1);
313 }
314
315 /*
316 * machine dependent system variables.
317 */
318 static int
319 sysctl_machdep_booted_device(SYSCTLFN_ARGS)
320 {
321 struct sysctlnode node;
322
323 if (booted_device == NULL)
324 return (EOPNOTSUPP);
325
326 node = *rnode;
327 node.sysctl_data = __UNCONST(device_xname(booted_device));
328 node.sysctl_size = strlen(device_xname(booted_device)) + 1;
329 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
330 }
331
332 static int
333 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)
334 {
335 struct sysctlnode node;
336
337 if (booted_kernel == NULL || booted_kernel[0] == '\0')
338 return (EOPNOTSUPP);
339
340 node = *rnode;
341 node.sysctl_data = booted_kernel;
342 node.sysctl_size = strlen(booted_kernel) + 1;
343 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
344 }
345
346 static int
347 sysctl_machdep_cpu_arch(SYSCTLFN_ARGS)
348 {
349 struct sysctlnode node = *rnode;
350 node.sysctl_data = __UNCONST(cpu_arch);
351 node.sysctl_size = strlen(cpu_arch) + 1;
352 return sysctl_lookup(SYSCTLFN_CALL(&node));
353 }
354
355 static int
356 sysctl_machdep_powersave(SYSCTLFN_ARGS)
357 {
358 struct sysctlnode node = *rnode;
359 int error, newval;
360
361 newval = cpu_do_powersave;
362 node.sysctl_data = &newval;
363 if (cpufuncs.cf_sleep == (void *) cpufunc_nullop)
364 node.sysctl_flags &= ~CTLFLAG_READWRITE;
365 error = sysctl_lookup(SYSCTLFN_CALL(&node));
366 if (error || newp == NULL || newval == cpu_do_powersave)
367 return (error);
368
369 if (newval < 0 || newval > 1)
370 return (EINVAL);
371 cpu_do_powersave = newval;
372
373 return (0);
374 }
375
376 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
377 {
378
379 sysctl_createv(clog, 0, NULL, NULL,
380 CTLFLAG_PERMANENT,
381 CTLTYPE_NODE, "machdep", NULL,
382 NULL, 0, NULL, 0,
383 CTL_MACHDEP, CTL_EOL);
384
385 sysctl_createv(clog, 0, NULL, NULL,
386 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
387 CTLTYPE_INT, "debug", NULL,
388 NULL, 0, &kernel_debug, 0,
389 CTL_MACHDEP, CPU_DEBUG, CTL_EOL);
390 sysctl_createv(clog, 0, NULL, NULL,
391 CTLFLAG_PERMANENT,
392 CTLTYPE_STRING, "booted_device", NULL,
393 sysctl_machdep_booted_device, 0, NULL, 0,
394 CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
395 sysctl_createv(clog, 0, NULL, NULL,
396 CTLFLAG_PERMANENT,
397 CTLTYPE_STRING, "booted_kernel", NULL,
398 sysctl_machdep_booted_kernel, 0, NULL, 0,
399 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
400 sysctl_createv(clog, 0, NULL, NULL,
401 CTLFLAG_PERMANENT,
402 CTLTYPE_STRUCT, "console_device", NULL,
403 sysctl_consdev, 0, NULL, sizeof(dev_t),
404 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
405 sysctl_createv(clog, 0, NULL, NULL,
406 CTLFLAG_PERMANENT,
407 CTLTYPE_STRING, "cpu_arch", NULL,
408 sysctl_machdep_cpu_arch, 0, NULL, 0,
409 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
410 sysctl_createv(clog, 0, NULL, NULL,
411 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
412 CTLTYPE_INT, "powersave", NULL,
413 sysctl_machdep_powersave, 0, &cpu_do_powersave, 0,
414 CTL_MACHDEP, CPU_POWERSAVE, CTL_EOL);
415 sysctl_createv(clog, 0, NULL, NULL,
416 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
417 CTLTYPE_INT, "cpu_id", NULL,
418 NULL, curcpu()->ci_arm_cpuid, NULL, 0,
419 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
420 #ifdef FPU_VFP
421 sysctl_createv(clog, 0, NULL, NULL,
422 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
423 CTLTYPE_INT, "fpu_id", NULL,
424 NULL, 0, &cpu_info_store.ci_vfp_id, 0,
425 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
426 #endif
427 sysctl_createv(clog, 0, NULL, NULL,
428 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
429 CTLTYPE_INT, "fpu_present", NULL,
430 NULL, 0, &cpu_fpu_present, 0,
431 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
432 sysctl_createv(clog, 0, NULL, NULL,
433 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
434 CTLTYPE_INT, "neon_present", NULL,
435 NULL, 0, &cpu_neon_present, 0,
436 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
437 sysctl_createv(clog, 0, NULL, NULL,
438 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
439 CTLTYPE_STRUCT, "id_isar", NULL,
440 NULL, 0,
441 cpu_instruction_set_attributes,
442 sizeof(cpu_instruction_set_attributes),
443 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
444 sysctl_createv(clog, 0, NULL, NULL,
445 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
446 CTLTYPE_STRUCT, "id_mmfr", NULL,
447 NULL, 0,
448 cpu_memory_model_features,
449 sizeof(cpu_memory_model_features),
450 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
451 sysctl_createv(clog, 0, NULL, NULL,
452 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
453 CTLTYPE_STRUCT, "id_pfr", NULL,
454 NULL, 0,
455 cpu_processor_features,
456 sizeof(cpu_processor_features),
457 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
458 sysctl_createv(clog, 0, NULL, NULL,
459 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
460 CTLTYPE_STRUCT, "id_mvfr", NULL,
461 NULL, 0,
462 cpu_media_and_vfp_features,
463 sizeof(cpu_media_and_vfp_features),
464 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
465 sysctl_createv(clog, 0, NULL, NULL,
466 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
467 CTLTYPE_INT, "simd_present", NULL,
468 NULL, 0, &cpu_simd_present, 0,
469 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
470 sysctl_createv(clog, 0, NULL, NULL,
471 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
472 CTLTYPE_INT, "simdex_present", NULL,
473 NULL, 0, &cpu_simdex_present, 0,
474 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
475 }
476
477 void
478 parse_mi_bootargs(char *args)
479 {
480 int integer;
481
482 if (get_bootconf_option(args, "single", BOOTOPT_TYPE_BOOLEAN, &integer)
483 || get_bootconf_option(args, "-s", BOOTOPT_TYPE_BOOLEAN, &integer))
484 if (integer)
485 boothowto |= RB_SINGLE;
486 if (get_bootconf_option(args, "kdb", BOOTOPT_TYPE_BOOLEAN, &integer)
487 || get_bootconf_option(args, "-k", BOOTOPT_TYPE_BOOLEAN, &integer)
488 || get_bootconf_option(args, "-d", BOOTOPT_TYPE_BOOLEAN, &integer))
489 if (integer)
490 boothowto |= RB_KDB;
491 if (get_bootconf_option(args, "ask", BOOTOPT_TYPE_BOOLEAN, &integer)
492 || get_bootconf_option(args, "-a", BOOTOPT_TYPE_BOOLEAN, &integer))
493 if (integer)
494 boothowto |= RB_ASKNAME;
495
496 #ifdef PMAP_DEBUG
497 if (get_bootconf_option(args, "pmapdebug", BOOTOPT_TYPE_INT, &integer)) {
498 pmap_debug_level = integer;
499 pmap_debug(pmap_debug_level);
500 }
501 #endif /* PMAP_DEBUG */
502
503 /* if (get_bootconf_option(args, "nbuf", BOOTOPT_TYPE_INT, &integer))
504 bufpages = integer;*/
505
506 #if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE)
507 if (get_bootconf_option(args, "memorydisc", BOOTOPT_TYPE_INT, &integer)
508 || get_bootconf_option(args, "memorydisk", BOOTOPT_TYPE_INT, &integer)) {
509 md_root_size = integer;
510 md_root_size *= 1024;
511 if (md_root_size < 32*1024)
512 md_root_size = 32*1024;
513 if (md_root_size > 2048*1024)
514 md_root_size = 2048*1024;
515 }
516 #endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */
517
518 if (get_bootconf_option(args, "quiet", BOOTOPT_TYPE_BOOLEAN, &integer)
519 || get_bootconf_option(args, "-q", BOOTOPT_TYPE_BOOLEAN, &integer))
520 if (integer)
521 boothowto |= AB_QUIET;
522 if (get_bootconf_option(args, "verbose", BOOTOPT_TYPE_BOOLEAN, &integer)
523 || get_bootconf_option(args, "-v", BOOTOPT_TYPE_BOOLEAN, &integer))
524 if (integer)
525 boothowto |= AB_VERBOSE;
526 }
527
528 #ifdef __HAVE_FAST_SOFTINTS
529 #if IPL_SOFTSERIAL != IPL_SOFTNET + 1
530 #error IPLs are screwed up
531 #elif IPL_SOFTNET != IPL_SOFTBIO + 1
532 #error IPLs are screwed up
533 #elif IPL_SOFTBIO != IPL_SOFTCLOCK + 1
534 #error IPLs are screwed up
535 #elif !(IPL_SOFTCLOCK > IPL_NONE)
536 #error IPLs are screwed up
537 #elif (IPL_NONE != 0)
538 #error IPLs are screwed up
539 #endif
540
541 #ifndef __HAVE_PIC_FAST_SOFTINTS
542 #define SOFTINT2IPLMAP \
543 (((IPL_SOFTSERIAL - IPL_SOFTCLOCK) << (SOFTINT_SERIAL * 4)) | \
544 ((IPL_SOFTNET - IPL_SOFTCLOCK) << (SOFTINT_NET * 4)) | \
545 ((IPL_SOFTBIO - IPL_SOFTCLOCK) << (SOFTINT_BIO * 4)) | \
546 ((IPL_SOFTCLOCK - IPL_SOFTCLOCK) << (SOFTINT_CLOCK * 4)))
547 #define SOFTINT2IPL(l) ((SOFTINT2IPLMAP >> ((l) * 4)) & 0x0f)
548
549 /*
550 * This returns a mask of softint IPLs that be dispatch at <ipl>
551 * SOFTIPLMASK(IPL_NONE) = 0x0000000f
552 * SOFTIPLMASK(IPL_SOFTCLOCK) = 0x0000000e
553 * SOFTIPLMASK(IPL_SOFTBIO) = 0x0000000c
554 * SOFTIPLMASK(IPL_SOFTNET) = 0x00000008
555 * SOFTIPLMASK(IPL_SOFTSERIAL) = 0x00000000
556 */
557 #define SOFTIPLMASK(ipl) ((0x0f << (ipl)) & 0x0f)
558
559 void softint_switch(lwp_t *, int);
560
561 void
562 softint_trigger(uintptr_t mask)
563 {
564 curcpu()->ci_softints |= mask;
565 }
566
567 void
568 softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
569 {
570 lwp_t ** lp = &l->l_cpu->ci_softlwps[level];
571 KASSERT(*lp == NULL || *lp == l);
572 *lp = l;
573 *machdep = 1 << SOFTINT2IPL(level);
574 KASSERT(level != SOFTINT_CLOCK || *machdep == (1 << (IPL_SOFTCLOCK - IPL_SOFTCLOCK)));
575 KASSERT(level != SOFTINT_BIO || *machdep == (1 << (IPL_SOFTBIO - IPL_SOFTCLOCK)));
576 KASSERT(level != SOFTINT_NET || *machdep == (1 << (IPL_SOFTNET - IPL_SOFTCLOCK)));
577 KASSERT(level != SOFTINT_SERIAL || *machdep == (1 << (IPL_SOFTSERIAL - IPL_SOFTCLOCK)));
578 }
579
580 void
581 dosoftints(void)
582 {
583 struct cpu_info * const ci = curcpu();
584 const int opl = ci->ci_cpl;
585 const uint32_t softiplmask = SOFTIPLMASK(opl);
586
587 splhigh();
588 for (;;) {
589 u_int softints = ci->ci_softints & softiplmask;
590 KASSERT((softints != 0) == ((ci->ci_softints >> opl) != 0));
591 KASSERT(opl == IPL_NONE || (softints & (1 << (opl - IPL_SOFTCLOCK))) == 0);
592 if (softints == 0) {
593 splx(opl);
594 return;
595 }
596 #define DOSOFTINT(n) \
597 if (ci->ci_softints & (1 << (IPL_SOFT ## n - IPL_SOFTCLOCK))) { \
598 ci->ci_softints &= \
599 ~(1 << (IPL_SOFT ## n - IPL_SOFTCLOCK)); \
600 softint_switch(ci->ci_softlwps[SOFTINT_ ## n], \
601 IPL_SOFT ## n); \
602 continue; \
603 }
604 DOSOFTINT(SERIAL);
605 DOSOFTINT(NET);
606 DOSOFTINT(BIO);
607 DOSOFTINT(CLOCK);
608 panic("dosoftints wtf (softints=%u?, ipl=%d)", softints, opl);
609 }
610 }
611 #endif /* !__HAVE_PIC_FAST_SOFTINTS */
612 #endif /* __HAVE_FAST_SOFTINTS */
613
614 #ifdef MODULAR
615 /*
616 * Push any modules loaded by the boot loader.
617 */
618 void
619 module_init_md(void)
620 {
621 }
622 #endif /* MODULAR */
623
624 int
625 mm_md_physacc(paddr_t pa, vm_prot_t prot)
626 {
627
628 return (pa < ctob(physmem)) ? 0 : EFAULT;
629 }
630
631 #ifdef __HAVE_CPU_UAREA_ALLOC_IDLELWP
632 vaddr_t
633 cpu_uarea_alloc_idlelwp(struct cpu_info *ci)
634 {
635 const vaddr_t va = idlestack.pv_va + ci->ci_cpuid * USPACE;
636 // printf("%s: %s: va=%lx\n", __func__, ci->ci_data.cpu_name, va);
637 return va;
638 }
639 #endif
640
641 #ifdef MULTIPROCESSOR
642 void
643 cpu_boot_secondary_processors(void)
644 {
645 uint32_t mbox;
646 kcpuset_export_u32(kcpuset_attached, &mbox, sizeof(mbox));
647 atomic_swap_32(&arm_cpu_mbox, mbox);
648 membar_producer();
649 #ifdef _ARM_ARCH_7
650 __asm __volatile("sev; sev; sev");
651 #endif
652 }
653
654 void
655 xc_send_ipi(struct cpu_info *ci)
656 {
657 KASSERT(kpreempt_disabled());
658 KASSERT(curcpu() != ci);
659
660
661 if (ci) {
662 /* Unicast, remote CPU */
663 printf("%s: -> %s", __func__, ci->ci_data.cpu_name);
664 intr_ipi_send(ci->ci_kcpuset, IPI_XCALL);
665 } else {
666 printf("%s: -> !%s", __func__, ci->ci_data.cpu_name);
667 /* Broadcast to all but ourselves */
668 kcpuset_t *kcp;
669 kcpuset_create(&kcp, (ci != NULL));
670 KASSERT(kcp != NULL);
671 kcpuset_copy(kcp, kcpuset_running);
672 kcpuset_clear(kcp, cpu_index(ci));
673 intr_ipi_send(kcp, IPI_XCALL);
674 kcpuset_destroy(kcp);
675 }
676 printf("\n");
677 }
678 #endif /* MULTIPROCESSOR */
679
680 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
681 bool
682 mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
683 {
684 if (physical_start <= pa && pa < physical_end) {
685 *vap = KERNEL_BASE + (pa - physical_start);
686 return true;
687 }
688
689 return false;
690 }
691 #endif
692