octeon_cpunode.c revision 1.17 1 1.1 matt /*-
2 1.1 matt * Copyright (c) 2014 The NetBSD Foundation, Inc.
3 1.1 matt * All rights reserved.
4 1.1 matt *
5 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
6 1.1 matt * by Matt Thomas of 3am Software Foundry.
7 1.1 matt *
8 1.1 matt * Redistribution and use in source and binary forms, with or without
9 1.1 matt * modification, are permitted provided that the following conditions
10 1.1 matt * are met:
11 1.1 matt * 1. Redistributions of source code must retain the above copyright
12 1.1 matt * notice, this list of conditions and the following disclaimer.
13 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer in the
15 1.1 matt * documentation and/or other materials provided with the distribution.
16 1.1 matt *
17 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
28 1.1 matt */
29 1.1 matt #define __INTR_PRIVATE
30 1.1 matt #include <sys/cdefs.h>
31 1.1 matt
32 1.17 jmcneill __KERNEL_RCSID(0, "$NetBSD: octeon_cpunode.c,v 1.17 2020/07/22 15:01:18 jmcneill Exp $");
33 1.1 matt
34 1.1 matt #include "locators.h"
35 1.2 matt #include "cpunode.h"
36 1.2 matt #include "opt_multiprocessor.h"
37 1.2 matt #include "opt_ddb.h"
38 1.1 matt
39 1.1 matt #include <sys/param.h>
40 1.16 simonb #include <sys/atomic.h>
41 1.16 simonb #include <sys/cpu.h>
42 1.1 matt #include <sys/device.h>
43 1.1 matt #include <sys/lwp.h>
44 1.16 simonb #include <sys/reboot.h>
45 1.2 matt #include <sys/wdog.h>
46 1.2 matt
47 1.2 matt #include <uvm/uvm.h>
48 1.2 matt
49 1.2 matt #include <dev/sysmon/sysmonvar.h>
50 1.1 matt
51 1.1 matt #include <mips/cache.h>
52 1.1 matt #include <mips/mips_opcode.h>
53 1.2 matt #include <mips/mips3_clock.h>
54 1.17 jmcneill #include <mips/mips3_pte.h>
55 1.1 matt
56 1.1 matt #include <mips/cavium/octeonvar.h>
57 1.1 matt #include <mips/cavium/dev/octeon_ciureg.h>
58 1.1 matt #include <mips/cavium/dev/octeon_corereg.h>
59 1.1 matt
60 1.14 jmcneill extern struct cpu_softc octeon_cpu_softc[];
61 1.14 jmcneill
62 1.1 matt struct cpunode_attach_args {
63 1.1 matt const char *cnaa_name;
64 1.1 matt int cnaa_cpunum;
65 1.1 matt };
66 1.1 matt
67 1.2 matt struct cpunode_softc {
68 1.2 matt device_t sc_dev;
69 1.2 matt device_t sc_wdog_dev;
70 1.2 matt };
71 1.2 matt
72 1.1 matt static int cpunode_mainbus_match(device_t, cfdata_t, void *);
73 1.1 matt static void cpunode_mainbus_attach(device_t, device_t, void *);
74 1.1 matt
75 1.1 matt static int cpu_cpunode_match(device_t, cfdata_t, void *);
76 1.1 matt static void cpu_cpunode_attach(device_t, device_t, void *);
77 1.1 matt
78 1.2 matt CFATTACH_DECL_NEW(cpunode, sizeof(struct cpunode_softc),
79 1.1 matt cpunode_mainbus_match, cpunode_mainbus_attach, NULL, NULL);
80 1.1 matt
81 1.2 matt CFATTACH_DECL_NEW(cpu_cpunode, 0,
82 1.1 matt cpu_cpunode_match, cpu_cpunode_attach, NULL, NULL);
83 1.1 matt
84 1.5 matt kcpuset_t *cpus_booted;
85 1.1 matt
86 1.8 martin static void wdog_cpunode_poke(void *arg);
87 1.8 martin
88 1.1 matt static int
89 1.1 matt cpunode_mainbus_print(void *aux, const char *pnp)
90 1.1 matt {
91 1.1 matt struct cpunode_attach_args * const cnaa = aux;
92 1.1 matt
93 1.7 matt if (pnp)
94 1.7 matt aprint_normal("%s", pnp);
95 1.7 matt
96 1.2 matt if (cnaa->cnaa_cpunum != CPUNODECF_CORE_DEFAULT)
97 1.2 matt aprint_normal(" core %d", cnaa->cnaa_cpunum);
98 1.1 matt
99 1.1 matt return UNCONF;
100 1.1 matt }
101 1.1 matt
102 1.1 matt int
103 1.1 matt cpunode_mainbus_match(device_t parent, cfdata_t cf, void *aux)
104 1.1 matt {
105 1.15 simonb
106 1.1 matt return 1;
107 1.1 matt }
108 1.1 matt
109 1.1 matt void
110 1.1 matt cpunode_mainbus_attach(device_t parent, device_t self, void *aux)
111 1.1 matt {
112 1.2 matt struct cpunode_softc * const sc = device_private(self);
113 1.13 simonb const uint64_t fuse = octeon_xkphys_read_8(CIU_FUSE);
114 1.1 matt int cpunum = 0;
115 1.1 matt
116 1.2 matt sc->sc_dev = self;
117 1.2 matt
118 1.13 simonb aprint_naive(": %u core%s\n", popcount64(fuse), fuse == 1 ? "" : "s");
119 1.13 simonb aprint_normal(": %u core%s", popcount64(fuse), fuse == 1 ? "" : "s");
120 1.13 simonb
121 1.1 matt const uint64_t cvmctl = mips_cp0_cvmctl_read();
122 1.1 matt aprint_normal(", %scrypto", (cvmctl & CP0_CVMCTL_NOCRYPTO) ? "no " : "");
123 1.1 matt aprint_normal((cvmctl & CP0_CVMCTL_KASUMI) ? "+kasumi" : "");
124 1.1 matt aprint_normal(", %s64bit-mul", (cvmctl & CP0_CVMCTL_NOMUL) ? "no " : "");
125 1.1 matt if (cvmctl & CP0_CVMCTL_REPUN)
126 1.1 matt aprint_normal(", unaligned-access ok");
127 1.2 matt #ifdef MULTIPROCESSOR
128 1.5 matt uint32_t booted[1];
129 1.5 matt kcpuset_export_u32(cpus_booted, booted, sizeof(booted));
130 1.5 matt aprint_normal(", booted %#" PRIx32, booted[0]);
131 1.2 matt #endif
132 1.1 matt aprint_normal("\n");
133 1.1 matt
134 1.13 simonb for (uint64_t f = fuse; f != 0; f >>= 1, cpunum++) {
135 1.1 matt struct cpunode_attach_args cnaa = {
136 1.1 matt .cnaa_name = "cpu",
137 1.1 matt .cnaa_cpunum = cpunum,
138 1.1 matt };
139 1.1 matt config_found(self, &cnaa, cpunode_mainbus_print);
140 1.1 matt }
141 1.2 matt #if NWDOG > 0
142 1.2 matt struct cpunode_attach_args cnaa = {
143 1.2 matt .cnaa_name = "wdog",
144 1.2 matt .cnaa_cpunum = CPUNODECF_CORE_DEFAULT,
145 1.2 matt };
146 1.2 matt config_found(self, &cnaa, cpunode_mainbus_print);
147 1.2 matt #endif
148 1.1 matt }
149 1.1 matt
150 1.1 matt int
151 1.1 matt cpu_cpunode_match(device_t parent, cfdata_t cf, void *aux)
152 1.1 matt {
153 1.1 matt struct cpunode_attach_args * const cnaa = aux;
154 1.1 matt const int cpunum = cf->cf_loc[CPUNODECF_CORE];
155 1.1 matt
156 1.2 matt return strcmp(cnaa->cnaa_name, cf->cf_name) == 0
157 1.2 matt && (cpunum == CPUNODECF_CORE_DEFAULT || cpunum == cnaa->cnaa_cpunum);
158 1.1 matt }
159 1.1 matt
160 1.1 matt #if defined(MULTIPROCESSOR)
161 1.1 matt static bool
162 1.1 matt octeon_fixup_cpu_info_references(int32_t load_addr, uint32_t new_insns[2],
163 1.1 matt void *arg)
164 1.1 matt {
165 1.1 matt struct cpu_info * const ci = arg;
166 1.1 matt
167 1.7 matt atomic_or_ulong(&curcpu()->ci_flags, CPUF_PRESENT);
168 1.2 matt
169 1.1 matt KASSERT(MIPS_KSEG0_P(load_addr));
170 1.1 matt #ifdef MULTIPROCESSOR
171 1.1 matt KASSERT(!CPU_IS_PRIMARY(curcpu()));
172 1.1 matt #endif
173 1.1 matt load_addr += (intptr_t)ci - (intptr_t)&cpu_info_store;
174 1.1 matt
175 1.1 matt KASSERT((intptr_t)ci <= load_addr);
176 1.1 matt KASSERT(load_addr < (intptr_t)(ci + 1));
177 1.1 matt
178 1.1 matt KASSERT(INSN_LUI_P(new_insns[0]));
179 1.1 matt KASSERT(INSN_LOAD_P(new_insns[1]) || INSN_STORE_P(new_insns[1]));
180 1.1 matt
181 1.1 matt /*
182 1.1 matt * Use the lui and load/store instruction as a prototype and
183 1.1 matt * make it refer to cpu1_info_store instead of cpu_info_store.
184 1.1 matt */
185 1.1 matt new_insns[0] &= __BITS(31,16);
186 1.1 matt new_insns[1] &= __BITS(31,16);
187 1.1 matt new_insns[0] |= (uint16_t)((load_addr + 0x8000) >> 16);
188 1.1 matt new_insns[1] |= (uint16_t)load_addr;
189 1.1 matt #ifdef DEBUG_VERBOSE
190 1.1 matt printf("%s: %08x: insn#1 %08x: lui r%u, %d\n",
191 1.9 skrll __func__, load_addr, new_insns[0],
192 1.1 matt (new_insns[0] >> 16) & 31,
193 1.1 matt (int16_t)new_insns[0]);
194 1.1 matt printf("%s: %08x: insn#2 %08x: %c%c r%u, %d(r%u)\n",
195 1.10 skrll __func__, load_addr, new_insns[1],
196 1.1 matt INSN_LOAD_P(new_insns[1]) ? 'l' : 's',
197 1.1 matt INSN_LW_P(new_insns[1]) ? 'w' : 'd',
198 1.10 skrll (new_insns[1] >> 16) & 31,
199 1.1 matt (int16_t)new_insns[1],
200 1.10 skrll (new_insns[1] >> 21) & 31);
201 1.1 matt #endif
202 1.1 matt return true;
203 1.1 matt }
204 1.1 matt
205 1.1 matt static void
206 1.1 matt octeon_cpu_init(struct cpu_info *ci)
207 1.1 matt {
208 1.17 jmcneill extern const mips_locore_jumpvec_t mips64r2_locore_vec;
209 1.1 matt bool ok __diagused;
210 1.1 matt
211 1.17 jmcneill mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_TO_MASK(PAGE_SIZE));
212 1.17 jmcneill mips3_cp0_wired_write(0);
213 1.17 jmcneill (*mips64r2_locore_vec.ljv_tlb_invalidate_all)();
214 1.17 jmcneill mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
215 1.17 jmcneill
216 1.1 matt // First thing is setup the execption vectors for this cpu.
217 1.1 matt mips64r2_vector_init(&mips_splsw);
218 1.1 matt
219 1.1 matt // Next rewrite those exceptions to use this cpu's cpu_info.
220 1.1 matt ok = mips_fixup_exceptions(octeon_fixup_cpu_info_references, ci);
221 1.1 matt KASSERT(ok);
222 1.1 matt
223 1.2 matt (void) splhigh(); // make sure interrupts are masked
224 1.1 matt
225 1.1 matt KASSERT((mipsNN_cp0_ebase_read() & MIPS_EBASE_CPUNUM) == ci->ci_cpuid);
226 1.1 matt KASSERT(curcpu() == ci);
227 1.2 matt KASSERT(ci->ci_cpl == IPL_HIGH);
228 1.2 matt KASSERT((mips_cp0_status_read() & MIPS_INT_MASK) == 0);
229 1.1 matt }
230 1.1 matt
231 1.1 matt static void
232 1.1 matt octeon_cpu_run(struct cpu_info *ci)
233 1.1 matt {
234 1.15 simonb
235 1.2 matt octeon_intr_init(ci);
236 1.2 matt
237 1.2 matt mips3_initclocks();
238 1.2 matt KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl);
239 1.2 matt KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
240 1.2 matt
241 1.2 matt aprint_normal("%s: ", device_xname(ci->ci_dev));
242 1.2 matt cpu_identify(ci->ci_dev);
243 1.1 matt }
244 1.1 matt #endif /* MULTIPROCESSOR */
245 1.1 matt
246 1.1 matt static void
247 1.1 matt cpu_cpunode_attach_common(device_t self, struct cpu_info *ci)
248 1.1 matt {
249 1.2 matt struct cpu_softc * const cpu __diagused = ci->ci_softc;
250 1.2 matt
251 1.1 matt ci->ci_dev = self;
252 1.1 matt self->dv_private = ci;
253 1.1 matt
254 1.2 matt KASSERTMSG(cpu != NULL, "ci %p index %d", ci, cpu_index(ci));
255 1.2 matt
256 1.2 matt #if NWDOG > 0 || defined(DDB)
257 1.13 simonb /* XXXXXX __mips_n32 and MIPS_PHYS_TO_XKPHYS_CACHED needed here?????? */
258 1.2 matt void **nmi_vector = (void *)MIPS_PHYS_TO_KSEG0(0x800 + 32*ci->ci_cpuid);
259 1.2 matt *nmi_vector = octeon_reset_vector;
260 1.2 matt
261 1.7 matt struct vm_page * const pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_ZERO);
262 1.2 matt KASSERT(pg != NULL);
263 1.7 matt const vaddr_t kva = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
264 1.2 matt KASSERT(kva != 0);
265 1.3 matt ci->ci_nmi_stack = (void *)(kva + PAGE_SIZE - sizeof(struct kernframe));
266 1.2 matt #endif
267 1.2 matt
268 1.8 martin #if NWDOG > 0
269 1.2 matt cpu->cpu_wdog_sih = softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE,
270 1.2 matt wdog_cpunode_poke, cpu);
271 1.2 matt KASSERT(cpu->cpu_wdog_sih != NULL);
272 1.2 matt #endif
273 1.2 matt
274 1.13 simonb aprint_normal(": %lu.%02luMHz\n",
275 1.13 simonb (ci->ci_cpu_freq + 5000) / 1000000,
276 1.13 simonb ((ci->ci_cpu_freq + 5000) % 1000000) / 10000);
277 1.13 simonb aprint_debug_dev(self, "hz cycles = %lu, delay divisor = %lu\n",
278 1.1 matt ci->ci_cycles_per_hz, ci->ci_divisor_delay);
279 1.1 matt
280 1.2 matt if (CPU_IS_PRIMARY(ci)) {
281 1.2 matt aprint_normal("%s: ", device_xname(self));
282 1.2 matt cpu_identify(self);
283 1.2 matt }
284 1.1 matt cpu_attach_common(self, ci);
285 1.2 matt #ifdef MULTIPROCESSOR
286 1.2 matt KASSERT(cpuid_infos[ci->ci_cpuid] == ci);
287 1.2 matt #endif
288 1.1 matt }
289 1.1 matt
290 1.1 matt void
291 1.1 matt cpu_cpunode_attach(device_t parent, device_t self, void *aux)
292 1.1 matt {
293 1.1 matt struct cpunode_attach_args * const cnaa = aux;
294 1.1 matt const int cpunum = cnaa->cnaa_cpunum;
295 1.1 matt
296 1.1 matt if (cpunum == 0) {
297 1.1 matt cpu_cpunode_attach_common(self, curcpu());
298 1.1 matt #ifdef MULTIPROCESSOR
299 1.1 matt mips_locoresw.lsw_cpu_init = octeon_cpu_init;
300 1.1 matt mips_locoresw.lsw_cpu_run = octeon_cpu_run;
301 1.1 matt #endif
302 1.1 matt return;
303 1.1 matt }
304 1.1 matt #ifdef MULTIPROCESSOR
305 1.16 simonb if ((boothowto & RB_MD1) != 0) {
306 1.16 simonb aprint_naive("\n");
307 1.16 simonb aprint_normal(": multiprocessor boot disabled\n");
308 1.16 simonb return;
309 1.16 simonb }
310 1.16 simonb
311 1.5 matt if (!kcpuset_isset(cpus_booted, cpunum)) {
312 1.1 matt aprint_naive(" disabled\n");
313 1.1 matt aprint_normal(" disabled (unresponsive)\n");
314 1.1 matt return;
315 1.1 matt }
316 1.1 matt struct cpu_info * const ci = cpu_info_alloc(NULL, cpunum, 0, cpunum, 0);
317 1.1 matt
318 1.14 jmcneill ci->ci_softc = &octeon_cpu_softc[cpunum];
319 1.1 matt ci->ci_softc->cpu_ci = ci;
320 1.1 matt
321 1.1 matt cpu_cpunode_attach_common(self, ci);
322 1.2 matt
323 1.2 matt KASSERT(ci->ci_data.cpu_idlelwp != NULL);
324 1.5 matt for (int i = 0; i < 100 && !kcpuset_isset(cpus_hatched, cpunum); i++) {
325 1.2 matt delay(10000);
326 1.2 matt }
327 1.5 matt if (!kcpuset_isset(cpus_hatched, cpunum)) {
328 1.2 matt #ifdef DDB
329 1.7 matt aprint_verbose_dev(self, "hatch failed ci=%p flags=%#lx\n", ci, ci->ci_flags);
330 1.2 matt cpu_Debugger();
331 1.2 matt #endif
332 1.7 matt panic("%s failed to hatch: ci=%p flags=%#lx",
333 1.2 matt cpu_name(ci), ci, ci->ci_flags);
334 1.2 matt }
335 1.1 matt #else
336 1.1 matt aprint_naive(": disabled\n");
337 1.1 matt aprint_normal(": disabled (uniprocessor kernel)\n");
338 1.1 matt #endif
339 1.1 matt }
340 1.2 matt
341 1.2 matt #if NWDOG > 0
342 1.2 matt struct wdog_softc {
343 1.2 matt struct sysmon_wdog sc_smw;
344 1.2 matt device_t sc_dev;
345 1.2 matt u_int sc_wdog_period;
346 1.2 matt bool sc_wdog_armed;
347 1.2 matt };
348 1.2 matt
349 1.2 matt #ifndef OCTEON_WDOG_PERIOD_DEFAULT
350 1.2 matt #define OCTEON_WDOG_PERIOD_DEFAULT 4
351 1.2 matt #endif
352 1.2 matt
353 1.2 matt static int wdog_cpunode_match(device_t, cfdata_t, void *);
354 1.2 matt static void wdog_cpunode_attach(device_t, device_t, void *);
355 1.2 matt
356 1.2 matt CFATTACH_DECL_NEW(wdog_cpunode, sizeof(struct wdog_softc),
357 1.2 matt wdog_cpunode_match, wdog_cpunode_attach, NULL, NULL);
358 1.2 matt
359 1.2 matt static int
360 1.2 matt wdog_cpunode_setmode(struct sysmon_wdog *smw)
361 1.2 matt {
362 1.2 matt struct wdog_softc * const sc = smw->smw_cookie;
363 1.2 matt
364 1.2 matt if ((smw->smw_mode & WDOG_MODE_MASK) == WDOG_MODE_DISARMED) {
365 1.2 matt if (sc->sc_wdog_armed) {
366 1.2 matt CPU_INFO_ITERATOR cii;
367 1.2 matt struct cpu_info *ci;
368 1.2 matt for (CPU_INFO_FOREACH(cii, ci)) {
369 1.2 matt struct cpu_softc * const cpu = ci->ci_softc;
370 1.7 matt uint64_t wdog = mips3_ld(cpu->cpu_wdog);
371 1.2 matt wdog &= ~CIU_WDOGX_MODE;
372 1.7 matt mips3_sd(cpu->cpu_pp_poke, wdog);
373 1.2 matt aprint_verbose_dev(sc->sc_dev,
374 1.2 matt "%s: disable wdog=%#"PRIx64"\n",
375 1.2 matt cpu_name(ci), wdog);
376 1.7 matt mips3_sd(cpu->cpu_wdog, wdog);
377 1.7 matt mips3_sd(cpu->cpu_pp_poke, wdog);
378 1.2 matt }
379 1.2 matt sc->sc_wdog_armed = false;
380 1.2 matt }
381 1.2 matt } else if (!sc->sc_wdog_armed) {
382 1.2 matt kpreempt_disable();
383 1.2 matt struct cpu_info *ci = curcpu();
384 1.2 matt if (smw->smw_period == WDOG_PERIOD_DEFAULT) {
385 1.2 matt smw->smw_period = OCTEON_WDOG_PERIOD_DEFAULT;
386 1.2 matt }
387 1.2 matt uint64_t wdog_len = smw->smw_period * ci->ci_cpu_freq;
388 1.2 matt //
389 1.2 matt // This wdog is a 24-bit counter that decrements every 256
390 1.2 matt // cycles. This is then a 32-bit counter so as long wdog_len
391 1.2 matt // doesn't overflow a 32-bit value, we are fine. We write the
392 1.2 matt // 16-bits of the 32-bit period.
393 1.2 matt if ((wdog_len >> 32) != 0) {
394 1.4 martin kpreempt_enable();
395 1.2 matt return EINVAL;
396 1.2 matt }
397 1.2 matt sc->sc_wdog_period = smw->smw_period;
398 1.2 matt CPU_INFO_ITERATOR cii;
399 1.2 matt for (CPU_INFO_FOREACH(cii, ci)) {
400 1.2 matt struct cpu_softc * const cpu = ci->ci_softc;
401 1.7 matt uint64_t wdog = mips3_ld(cpu->cpu_wdog);
402 1.2 matt wdog &= ~(CIU_WDOGX_MODE|CIU_WDOGX_LEN);
403 1.2 matt wdog |= __SHIFTIN(3, CIU_WDOGX_MODE);
404 1.2 matt wdog |= __SHIFTIN(wdog_len >> 16, CIU_WDOGX_LEN);
405 1.2 matt aprint_verbose_dev(sc->sc_dev,
406 1.2 matt "%s: enable wdog=%#"PRIx64" (%#"PRIx64")\n",
407 1.2 matt cpu_name(ci), wdog, wdog_len);
408 1.7 matt mips3_sd(cpu->cpu_wdog, wdog);
409 1.2 matt }
410 1.2 matt sc->sc_wdog_armed = true;
411 1.2 matt kpreempt_enable();
412 1.2 matt }
413 1.2 matt return 0;
414 1.2 matt }
415 1.2 matt
416 1.2 matt static void
417 1.2 matt wdog_cpunode_poke(void *arg)
418 1.2 matt {
419 1.2 matt struct cpu_softc *cpu = arg;
420 1.15 simonb
421 1.7 matt mips3_sd(cpu->cpu_pp_poke, 0);
422 1.2 matt }
423 1.2 matt
424 1.2 matt static int
425 1.2 matt wdog_cpunode_tickle(struct sysmon_wdog *smw)
426 1.2 matt {
427 1.15 simonb
428 1.2 matt wdog_cpunode_poke(curcpu()->ci_softc);
429 1.2 matt #ifdef MULTIPROCESSOR
430 1.2 matt // We need to send IPIs to the other CPUs to poke their wdog.
431 1.2 matt cpu_send_ipi(NULL, IPI_WDOG);
432 1.2 matt #endif
433 1.2 matt return 0;
434 1.2 matt }
435 1.2 matt
436 1.2 matt int
437 1.2 matt wdog_cpunode_match(device_t parent, cfdata_t cf, void *aux)
438 1.2 matt {
439 1.2 matt struct cpunode_softc * const sc = device_private(parent);
440 1.2 matt struct cpunode_attach_args * const cnaa = aux;
441 1.2 matt const int cpunum = cf->cf_loc[CPUNODECF_CORE];
442 1.2 matt
443 1.2 matt return sc->sc_wdog_dev == NULL
444 1.2 matt && strcmp(cnaa->cnaa_name, cf->cf_name) == 0
445 1.2 matt && cpunum == CPUNODECF_CORE_DEFAULT;
446 1.2 matt }
447 1.2 matt
448 1.2 matt void
449 1.2 matt wdog_cpunode_attach(device_t parent, device_t self, void *aux)
450 1.2 matt {
451 1.2 matt struct cpunode_softc * const psc = device_private(parent);
452 1.2 matt struct wdog_softc * const sc = device_private(self);
453 1.2 matt cfdata_t const cf = device_cfdata(self);
454 1.2 matt
455 1.2 matt psc->sc_wdog_dev = self;
456 1.2 matt
457 1.2 matt sc->sc_dev = self;
458 1.2 matt sc->sc_smw.smw_name = device_xname(self);
459 1.2 matt sc->sc_smw.smw_cookie = sc;
460 1.2 matt sc->sc_smw.smw_setmode = wdog_cpunode_setmode;
461 1.2 matt sc->sc_smw.smw_tickle = wdog_cpunode_tickle;
462 1.2 matt sc->sc_smw.smw_period = OCTEON_WDOG_PERIOD_DEFAULT;
463 1.2 matt sc->sc_wdog_period = sc->sc_smw.smw_period;
464 1.2 matt
465 1.2 matt /*
466 1.2 matt * We need one softint per cpu. It's to tickle the softints on
467 1.2 matt * other CPUs.
468 1.2 matt */
469 1.12 maya #if 0 /* XXX unused? */
470 1.2 matt CPU_INFO_ITERATOR cii;
471 1.2 matt struct cpu_info *ci;
472 1.2 matt for (CPU_INFO_FOREACH(cii, ci)) {
473 1.2 matt }
474 1.12 maya #endif
475 1.2 matt
476 1.6 skrll aprint_normal(": default period is %u second%s\n",
477 1.2 matt sc->sc_wdog_period, sc->sc_wdog_period == 1 ? "" : "s");
478 1.2 matt
479 1.2 matt if (sysmon_wdog_register(&sc->sc_smw) != 0) {
480 1.2 matt aprint_error_dev(self, "unable to register with sysmon\n");
481 1.2 matt return;
482 1.2 matt }
483 1.2 matt
484 1.2 matt if (cf->cf_flags & 1) {
485 1.2 matt int error = sysmon_wdog_setmode(&sc->sc_smw, WDOG_MODE_KTICKLE,
486 1.2 matt sc->sc_wdog_period);
487 1.2 matt if (error)
488 1.2 matt aprint_error_dev(self,
489 1.2 matt "failed to start kernel tickler: %d\n", error);
490 1.2 matt }
491 1.2 matt }
492 1.2 matt #endif /* NWDOG > 0 */
493