octeon_cpunode.c revision 1.10 1 1.1 matt /*-
2 1.1 matt * Copyright (c) 2014 The NetBSD Foundation, Inc.
3 1.1 matt * All rights reserved.
4 1.1 matt *
5 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
6 1.1 matt * by Matt Thomas of 3am Software Foundry.
7 1.1 matt *
8 1.1 matt * Redistribution and use in source and binary forms, with or without
9 1.1 matt * modification, are permitted provided that the following conditions
10 1.1 matt * are met:
11 1.1 matt * 1. Redistributions of source code must retain the above copyright
12 1.1 matt * notice, this list of conditions and the following disclaimer.
13 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer in the
15 1.1 matt * documentation and/or other materials provided with the distribution.
16 1.1 matt *
17 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
28 1.1 matt */
29 1.1 matt #define __INTR_PRIVATE
30 1.1 matt #include <sys/cdefs.h>
31 1.1 matt
32 1.1 matt __KERNEL_RCSID(0, "$NetBSD");
33 1.1 matt
34 1.1 matt #include "locators.h"
35 1.2 matt #include "cpunode.h"
36 1.2 matt #include "opt_multiprocessor.h"
37 1.2 matt #include "opt_ddb.h"
38 1.1 matt
39 1.1 matt #include <sys/param.h>
40 1.1 matt #include <sys/device.h>
41 1.1 matt #include <sys/lwp.h>
42 1.1 matt #include <sys/cpu.h>
43 1.5 matt #include <sys/atomic.h>
44 1.2 matt #include <sys/wdog.h>
45 1.2 matt
46 1.2 matt #include <uvm/uvm.h>
47 1.2 matt
48 1.2 matt #include <dev/sysmon/sysmonvar.h>
49 1.1 matt
50 1.1 matt #include <mips/cache.h>
51 1.1 matt #include <mips/mips_opcode.h>
52 1.2 matt #include <mips/mips3_clock.h>
53 1.1 matt
54 1.1 matt #include <mips/cavium/octeonvar.h>
55 1.1 matt #include <mips/cavium/dev/octeon_ciureg.h>
56 1.1 matt #include <mips/cavium/dev/octeon_corereg.h>
57 1.1 matt
58 1.1 matt struct cpunode_attach_args {
59 1.1 matt const char *cnaa_name;
60 1.1 matt int cnaa_cpunum;
61 1.1 matt };
62 1.1 matt
63 1.2 matt struct cpunode_softc {
64 1.2 matt device_t sc_dev;
65 1.2 matt device_t sc_wdog_dev;
66 1.2 matt uint64_t sc_fuse;
67 1.2 matt };
68 1.2 matt
69 1.1 matt static int cpunode_mainbus_match(device_t, cfdata_t, void *);
70 1.1 matt static void cpunode_mainbus_attach(device_t, device_t, void *);
71 1.1 matt
72 1.1 matt static int cpu_cpunode_match(device_t, cfdata_t, void *);
73 1.1 matt static void cpu_cpunode_attach(device_t, device_t, void *);
74 1.1 matt
75 1.2 matt CFATTACH_DECL_NEW(cpunode, sizeof(struct cpunode_softc),
76 1.1 matt cpunode_mainbus_match, cpunode_mainbus_attach, NULL, NULL);
77 1.1 matt
78 1.2 matt CFATTACH_DECL_NEW(cpu_cpunode, 0,
79 1.1 matt cpu_cpunode_match, cpu_cpunode_attach, NULL, NULL);
80 1.1 matt
81 1.5 matt kcpuset_t *cpus_booted;
82 1.1 matt
83 1.2 matt void octeon_reset_vector(void);
84 1.2 matt
85 1.8 martin static void wdog_cpunode_poke(void *arg);
86 1.8 martin
87 1.1 matt static int
88 1.1 matt cpunode_mainbus_print(void *aux, const char *pnp)
89 1.1 matt {
90 1.1 matt struct cpunode_attach_args * const cnaa = aux;
91 1.1 matt
92 1.7 matt if (pnp)
93 1.7 matt aprint_normal("%s", pnp);
94 1.7 matt
95 1.2 matt if (cnaa->cnaa_cpunum != CPUNODECF_CORE_DEFAULT)
96 1.2 matt aprint_normal(" core %d", cnaa->cnaa_cpunum);
97 1.1 matt
98 1.1 matt return UNCONF;
99 1.1 matt }
100 1.1 matt
101 1.1 matt int
102 1.1 matt cpunode_mainbus_match(device_t parent, cfdata_t cf, void *aux)
103 1.1 matt {
104 1.1 matt
105 1.1 matt return 1;
106 1.1 matt }
107 1.1 matt
108 1.1 matt void
109 1.1 matt cpunode_mainbus_attach(device_t parent, device_t self, void *aux)
110 1.1 matt {
111 1.2 matt struct cpunode_softc * const sc = device_private(self);
112 1.1 matt int cpunum = 0;
113 1.1 matt
114 1.2 matt sc->sc_dev = self;
115 1.2 matt sc->sc_fuse = octeon_xkphys_read_8(CIU_FUSE);
116 1.2 matt
117 1.1 matt aprint_naive(": %u core%s\n",
118 1.2 matt popcount32((uint32_t)sc->sc_fuse),
119 1.2 matt sc->sc_fuse == 1 ? "" : "s");
120 1.1 matt
121 1.1 matt aprint_normal(": %u core%s",
122 1.2 matt popcount32((uint32_t)sc->sc_fuse),
123 1.2 matt sc->sc_fuse == 1 ? "" : "s");
124 1.1 matt const uint64_t cvmctl = mips_cp0_cvmctl_read();
125 1.1 matt aprint_normal(", %scrypto", (cvmctl & CP0_CVMCTL_NOCRYPTO) ? "no " : "");
126 1.1 matt aprint_normal((cvmctl & CP0_CVMCTL_KASUMI) ? "+kasumi" : "");
127 1.1 matt aprint_normal(", %s64bit-mul", (cvmctl & CP0_CVMCTL_NOMUL) ? "no " : "");
128 1.1 matt if (cvmctl & CP0_CVMCTL_REPUN)
129 1.1 matt aprint_normal(", unaligned-access ok");
130 1.2 matt #ifdef MULTIPROCESSOR
131 1.5 matt uint32_t booted[1];
132 1.5 matt kcpuset_export_u32(cpus_booted, booted, sizeof(booted));
133 1.5 matt aprint_normal(", booted %#" PRIx32, booted[0]);
134 1.2 matt #endif
135 1.1 matt aprint_normal("\n");
136 1.1 matt
137 1.2 matt for (uint64_t fuse = sc->sc_fuse; fuse != 0; fuse >>= 1, cpunum++) {
138 1.1 matt struct cpunode_attach_args cnaa = {
139 1.1 matt .cnaa_name = "cpu",
140 1.1 matt .cnaa_cpunum = cpunum,
141 1.1 matt };
142 1.1 matt config_found(self, &cnaa, cpunode_mainbus_print);
143 1.1 matt }
144 1.2 matt #if NWDOG > 0
145 1.2 matt struct cpunode_attach_args cnaa = {
146 1.2 matt .cnaa_name = "wdog",
147 1.2 matt .cnaa_cpunum = CPUNODECF_CORE_DEFAULT,
148 1.2 matt };
149 1.2 matt config_found(self, &cnaa, cpunode_mainbus_print);
150 1.2 matt #endif
151 1.1 matt }
152 1.1 matt
153 1.1 matt int
154 1.1 matt cpu_cpunode_match(device_t parent, cfdata_t cf, void *aux)
155 1.1 matt {
156 1.1 matt struct cpunode_attach_args * const cnaa = aux;
157 1.1 matt const int cpunum = cf->cf_loc[CPUNODECF_CORE];
158 1.1 matt
159 1.2 matt return strcmp(cnaa->cnaa_name, cf->cf_name) == 0
160 1.2 matt && (cpunum == CPUNODECF_CORE_DEFAULT || cpunum == cnaa->cnaa_cpunum);
161 1.1 matt }
162 1.1 matt
163 1.1 matt #if defined(MULTIPROCESSOR)
164 1.1 matt static bool
165 1.1 matt octeon_fixup_cpu_info_references(int32_t load_addr, uint32_t new_insns[2],
166 1.1 matt void *arg)
167 1.1 matt {
168 1.1 matt struct cpu_info * const ci = arg;
169 1.1 matt
170 1.7 matt atomic_or_ulong(&curcpu()->ci_flags, CPUF_PRESENT);
171 1.2 matt
172 1.1 matt KASSERT(MIPS_KSEG0_P(load_addr));
173 1.1 matt #ifdef MULTIPROCESSOR
174 1.1 matt KASSERT(!CPU_IS_PRIMARY(curcpu()));
175 1.1 matt #endif
176 1.1 matt load_addr += (intptr_t)ci - (intptr_t)&cpu_info_store;
177 1.1 matt
178 1.1 matt KASSERT((intptr_t)ci <= load_addr);
179 1.1 matt KASSERT(load_addr < (intptr_t)(ci + 1));
180 1.1 matt
181 1.1 matt KASSERT(INSN_LUI_P(new_insns[0]));
182 1.1 matt KASSERT(INSN_LOAD_P(new_insns[1]) || INSN_STORE_P(new_insns[1]));
183 1.1 matt
184 1.1 matt /*
185 1.1 matt * Use the lui and load/store instruction as a prototype and
186 1.1 matt * make it refer to cpu1_info_store instead of cpu_info_store.
187 1.1 matt */
188 1.1 matt new_insns[0] &= __BITS(31,16);
189 1.1 matt new_insns[1] &= __BITS(31,16);
190 1.1 matt new_insns[0] |= (uint16_t)((load_addr + 0x8000) >> 16);
191 1.1 matt new_insns[1] |= (uint16_t)load_addr;
192 1.1 matt #ifdef DEBUG_VERBOSE
193 1.1 matt printf("%s: %08x: insn#1 %08x: lui r%u, %d\n",
194 1.9 skrll __func__, load_addr, new_insns[0],
195 1.1 matt (new_insns[0] >> 16) & 31,
196 1.1 matt (int16_t)new_insns[0]);
197 1.1 matt printf("%s: %08x: insn#2 %08x: %c%c r%u, %d(r%u)\n",
198 1.10 skrll __func__, load_addr, new_insns[1],
199 1.1 matt INSN_LOAD_P(new_insns[1]) ? 'l' : 's',
200 1.1 matt INSN_LW_P(new_insns[1]) ? 'w' : 'd',
201 1.10 skrll (new_insns[1] >> 16) & 31,
202 1.1 matt (int16_t)new_insns[1],
203 1.10 skrll (new_insns[1] >> 21) & 31);
204 1.1 matt #endif
205 1.1 matt return true;
206 1.1 matt }
207 1.1 matt
208 1.1 matt static void
209 1.1 matt octeon_cpu_init(struct cpu_info *ci)
210 1.1 matt {
211 1.1 matt bool ok __diagused;
212 1.1 matt
213 1.1 matt // First thing is setup the execption vectors for this cpu.
214 1.1 matt mips64r2_vector_init(&mips_splsw);
215 1.1 matt
216 1.1 matt // Next rewrite those exceptions to use this cpu's cpu_info.
217 1.1 matt ok = mips_fixup_exceptions(octeon_fixup_cpu_info_references, ci);
218 1.1 matt KASSERT(ok);
219 1.1 matt
220 1.2 matt (void) splhigh(); // make sure interrupts are masked
221 1.1 matt
222 1.1 matt KASSERT((mipsNN_cp0_ebase_read() & MIPS_EBASE_CPUNUM) == ci->ci_cpuid);
223 1.1 matt KASSERT(curcpu() == ci);
224 1.2 matt KASSERT(ci->ci_cpl == IPL_HIGH);
225 1.2 matt KASSERT((mips_cp0_status_read() & MIPS_INT_MASK) == 0);
226 1.1 matt }
227 1.1 matt
228 1.1 matt static void
229 1.1 matt octeon_cpu_run(struct cpu_info *ci)
230 1.1 matt {
231 1.2 matt octeon_intr_init(ci);
232 1.2 matt
233 1.2 matt mips3_initclocks();
234 1.2 matt KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl);
235 1.2 matt KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
236 1.2 matt
237 1.2 matt aprint_normal("%s: ", device_xname(ci->ci_dev));
238 1.2 matt cpu_identify(ci->ci_dev);
239 1.1 matt }
240 1.1 matt #endif /* MULTIPROCESSOR */
241 1.1 matt
242 1.1 matt static void
243 1.1 matt cpu_cpunode_attach_common(device_t self, struct cpu_info *ci)
244 1.1 matt {
245 1.2 matt struct cpu_softc * const cpu __diagused = ci->ci_softc;
246 1.2 matt
247 1.1 matt ci->ci_dev = self;
248 1.1 matt self->dv_private = ci;
249 1.1 matt
250 1.2 matt KASSERTMSG(cpu != NULL, "ci %p index %d", ci, cpu_index(ci));
251 1.2 matt
252 1.2 matt #if NWDOG > 0 || defined(DDB)
253 1.2 matt void **nmi_vector = (void *)MIPS_PHYS_TO_KSEG0(0x800 + 32*ci->ci_cpuid);
254 1.2 matt *nmi_vector = octeon_reset_vector;
255 1.2 matt
256 1.7 matt struct vm_page * const pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_ZERO);
257 1.2 matt KASSERT(pg != NULL);
258 1.7 matt const vaddr_t kva = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
259 1.2 matt KASSERT(kva != 0);
260 1.3 matt ci->ci_nmi_stack = (void *)(kva + PAGE_SIZE - sizeof(struct kernframe));
261 1.2 matt #endif
262 1.2 matt
263 1.8 martin #if NWDOG > 0
264 1.2 matt cpu->cpu_wdog_sih = softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE,
265 1.2 matt wdog_cpunode_poke, cpu);
266 1.2 matt KASSERT(cpu->cpu_wdog_sih != NULL);
267 1.2 matt #endif
268 1.2 matt
269 1.1 matt aprint_normal(": %lu.%02luMHz (hz cycles = %lu, delay divisor = %lu)\n",
270 1.1 matt ci->ci_cpu_freq / 1000000,
271 1.1 matt (ci->ci_cpu_freq % 1000000) / 10000,
272 1.1 matt ci->ci_cycles_per_hz, ci->ci_divisor_delay);
273 1.1 matt
274 1.2 matt if (CPU_IS_PRIMARY(ci)) {
275 1.2 matt aprint_normal("%s: ", device_xname(self));
276 1.2 matt cpu_identify(self);
277 1.2 matt }
278 1.1 matt cpu_attach_common(self, ci);
279 1.2 matt #ifdef MULTIPROCESSOR
280 1.2 matt KASSERT(cpuid_infos[ci->ci_cpuid] == ci);
281 1.2 matt #endif
282 1.1 matt }
283 1.1 matt
284 1.1 matt void
285 1.1 matt cpu_cpunode_attach(device_t parent, device_t self, void *aux)
286 1.1 matt {
287 1.1 matt struct cpunode_attach_args * const cnaa = aux;
288 1.1 matt const int cpunum = cnaa->cnaa_cpunum;
289 1.1 matt
290 1.1 matt if (cpunum == 0) {
291 1.1 matt cpu_cpunode_attach_common(self, curcpu());
292 1.1 matt #ifdef MULTIPROCESSOR
293 1.1 matt mips_locoresw.lsw_cpu_init = octeon_cpu_init;
294 1.1 matt mips_locoresw.lsw_cpu_run = octeon_cpu_run;
295 1.1 matt #endif
296 1.1 matt return;
297 1.1 matt }
298 1.1 matt #ifdef MULTIPROCESSOR
299 1.1 matt KASSERTMSG(cpunum == 1, "cpunum %d", cpunum);
300 1.5 matt if (!kcpuset_isset(cpus_booted, cpunum)) {
301 1.1 matt aprint_naive(" disabled\n");
302 1.1 matt aprint_normal(" disabled (unresponsive)\n");
303 1.1 matt return;
304 1.1 matt }
305 1.1 matt struct cpu_info * const ci = cpu_info_alloc(NULL, cpunum, 0, cpunum, 0);
306 1.1 matt
307 1.1 matt ci->ci_softc = &octeon_cpu1_softc;
308 1.1 matt ci->ci_softc->cpu_ci = ci;
309 1.1 matt
310 1.1 matt cpu_cpunode_attach_common(self, ci);
311 1.2 matt
312 1.2 matt KASSERT(ci->ci_data.cpu_idlelwp != NULL);
313 1.5 matt for (int i = 0; i < 100 && !kcpuset_isset(cpus_hatched, cpunum); i++) {
314 1.2 matt delay(10000);
315 1.2 matt }
316 1.5 matt if (!kcpuset_isset(cpus_hatched, cpunum)) {
317 1.2 matt #ifdef DDB
318 1.7 matt aprint_verbose_dev(self, "hatch failed ci=%p flags=%#lx\n", ci, ci->ci_flags);
319 1.2 matt cpu_Debugger();
320 1.2 matt #endif
321 1.7 matt panic("%s failed to hatch: ci=%p flags=%#lx",
322 1.2 matt cpu_name(ci), ci, ci->ci_flags);
323 1.2 matt }
324 1.1 matt #else
325 1.1 matt aprint_naive(": disabled\n");
326 1.1 matt aprint_normal(": disabled (uniprocessor kernel)\n");
327 1.1 matt #endif
328 1.1 matt }
329 1.2 matt
330 1.2 matt #if NWDOG > 0
331 1.2 matt struct wdog_softc {
332 1.2 matt struct sysmon_wdog sc_smw;
333 1.2 matt device_t sc_dev;
334 1.2 matt u_int sc_wdog_period;
335 1.2 matt bool sc_wdog_armed;
336 1.2 matt };
337 1.2 matt
338 1.2 matt #ifndef OCTEON_WDOG_PERIOD_DEFAULT
339 1.2 matt #define OCTEON_WDOG_PERIOD_DEFAULT 4
340 1.2 matt #endif
341 1.2 matt
342 1.2 matt static int wdog_cpunode_match(device_t, cfdata_t, void *);
343 1.2 matt static void wdog_cpunode_attach(device_t, device_t, void *);
344 1.2 matt
345 1.2 matt CFATTACH_DECL_NEW(wdog_cpunode, sizeof(struct wdog_softc),
346 1.2 matt wdog_cpunode_match, wdog_cpunode_attach, NULL, NULL);
347 1.2 matt
348 1.2 matt static int
349 1.2 matt wdog_cpunode_setmode(struct sysmon_wdog *smw)
350 1.2 matt {
351 1.2 matt struct wdog_softc * const sc = smw->smw_cookie;
352 1.2 matt
353 1.2 matt if ((smw->smw_mode & WDOG_MODE_MASK) == WDOG_MODE_DISARMED) {
354 1.2 matt if (sc->sc_wdog_armed) {
355 1.2 matt CPU_INFO_ITERATOR cii;
356 1.2 matt struct cpu_info *ci;
357 1.2 matt for (CPU_INFO_FOREACH(cii, ci)) {
358 1.2 matt struct cpu_softc * const cpu = ci->ci_softc;
359 1.7 matt uint64_t wdog = mips3_ld(cpu->cpu_wdog);
360 1.2 matt wdog &= ~CIU_WDOGX_MODE;
361 1.7 matt mips3_sd(cpu->cpu_pp_poke, wdog);
362 1.2 matt aprint_verbose_dev(sc->sc_dev,
363 1.2 matt "%s: disable wdog=%#"PRIx64"\n",
364 1.2 matt cpu_name(ci), wdog);
365 1.7 matt mips3_sd(cpu->cpu_wdog, wdog);
366 1.7 matt mips3_sd(cpu->cpu_pp_poke, wdog);
367 1.2 matt }
368 1.2 matt sc->sc_wdog_armed = false;
369 1.2 matt }
370 1.2 matt } else if (!sc->sc_wdog_armed) {
371 1.2 matt kpreempt_disable();
372 1.2 matt struct cpu_info *ci = curcpu();
373 1.2 matt if (smw->smw_period == WDOG_PERIOD_DEFAULT) {
374 1.2 matt smw->smw_period = OCTEON_WDOG_PERIOD_DEFAULT;
375 1.2 matt }
376 1.2 matt uint64_t wdog_len = smw->smw_period * ci->ci_cpu_freq;
377 1.2 matt //
378 1.2 matt // This wdog is a 24-bit counter that decrements every 256
379 1.2 matt // cycles. This is then a 32-bit counter so as long wdog_len
380 1.2 matt // doesn't overflow a 32-bit value, we are fine. We write the
381 1.2 matt // 16-bits of the 32-bit period.
382 1.2 matt if ((wdog_len >> 32) != 0) {
383 1.4 martin kpreempt_enable();
384 1.2 matt return EINVAL;
385 1.2 matt }
386 1.2 matt sc->sc_wdog_period = smw->smw_period;
387 1.2 matt CPU_INFO_ITERATOR cii;
388 1.2 matt for (CPU_INFO_FOREACH(cii, ci)) {
389 1.2 matt struct cpu_softc * const cpu = ci->ci_softc;
390 1.7 matt uint64_t wdog = mips3_ld(cpu->cpu_wdog);
391 1.2 matt wdog &= ~(CIU_WDOGX_MODE|CIU_WDOGX_LEN);
392 1.2 matt wdog |= __SHIFTIN(3, CIU_WDOGX_MODE);
393 1.2 matt wdog |= __SHIFTIN(wdog_len >> 16, CIU_WDOGX_LEN);
394 1.2 matt aprint_verbose_dev(sc->sc_dev,
395 1.2 matt "%s: enable wdog=%#"PRIx64" (%#"PRIx64")\n",
396 1.2 matt cpu_name(ci), wdog, wdog_len);
397 1.7 matt mips3_sd(cpu->cpu_wdog, wdog);
398 1.2 matt }
399 1.2 matt sc->sc_wdog_armed = true;
400 1.2 matt kpreempt_enable();
401 1.2 matt }
402 1.2 matt return 0;
403 1.2 matt }
404 1.2 matt
405 1.2 matt static void
406 1.2 matt wdog_cpunode_poke(void *arg)
407 1.2 matt {
408 1.2 matt struct cpu_softc *cpu = arg;
409 1.7 matt mips3_sd(cpu->cpu_pp_poke, 0);
410 1.2 matt }
411 1.2 matt
412 1.2 matt static int
413 1.2 matt wdog_cpunode_tickle(struct sysmon_wdog *smw)
414 1.2 matt {
415 1.2 matt wdog_cpunode_poke(curcpu()->ci_softc);
416 1.2 matt #ifdef MULTIPROCESSOR
417 1.2 matt // We need to send IPIs to the other CPUs to poke their wdog.
418 1.2 matt cpu_send_ipi(NULL, IPI_WDOG);
419 1.2 matt #endif
420 1.2 matt return 0;
421 1.2 matt }
422 1.2 matt
423 1.2 matt int
424 1.2 matt wdog_cpunode_match(device_t parent, cfdata_t cf, void *aux)
425 1.2 matt {
426 1.2 matt struct cpunode_softc * const sc = device_private(parent);
427 1.2 matt struct cpunode_attach_args * const cnaa = aux;
428 1.2 matt const int cpunum = cf->cf_loc[CPUNODECF_CORE];
429 1.2 matt
430 1.2 matt return sc->sc_wdog_dev == NULL
431 1.2 matt && strcmp(cnaa->cnaa_name, cf->cf_name) == 0
432 1.2 matt && cpunum == CPUNODECF_CORE_DEFAULT;
433 1.2 matt }
434 1.2 matt
435 1.2 matt void
436 1.2 matt wdog_cpunode_attach(device_t parent, device_t self, void *aux)
437 1.2 matt {
438 1.2 matt struct cpunode_softc * const psc = device_private(parent);
439 1.2 matt struct wdog_softc * const sc = device_private(self);
440 1.2 matt cfdata_t const cf = device_cfdata(self);
441 1.2 matt
442 1.2 matt psc->sc_wdog_dev = self;
443 1.2 matt
444 1.2 matt sc->sc_dev = self;
445 1.2 matt sc->sc_smw.smw_name = device_xname(self);
446 1.2 matt sc->sc_smw.smw_cookie = sc;
447 1.2 matt sc->sc_smw.smw_setmode = wdog_cpunode_setmode;
448 1.2 matt sc->sc_smw.smw_tickle = wdog_cpunode_tickle;
449 1.2 matt sc->sc_smw.smw_period = OCTEON_WDOG_PERIOD_DEFAULT;
450 1.2 matt sc->sc_wdog_period = sc->sc_smw.smw_period;
451 1.2 matt
452 1.2 matt /*
453 1.2 matt * We need one softint per cpu. It's to tickle the softints on
454 1.2 matt * other CPUs.
455 1.2 matt */
456 1.2 matt CPU_INFO_ITERATOR cii;
457 1.2 matt struct cpu_info *ci;
458 1.2 matt for (CPU_INFO_FOREACH(cii, ci)) {
459 1.2 matt }
460 1.2 matt
461 1.6 skrll aprint_normal(": default period is %u second%s\n",
462 1.2 matt sc->sc_wdog_period, sc->sc_wdog_period == 1 ? "" : "s");
463 1.2 matt
464 1.2 matt if (sysmon_wdog_register(&sc->sc_smw) != 0) {
465 1.2 matt aprint_error_dev(self, "unable to register with sysmon\n");
466 1.2 matt return;
467 1.2 matt }
468 1.2 matt
469 1.2 matt if (cf->cf_flags & 1) {
470 1.2 matt int error = sysmon_wdog_setmode(&sc->sc_smw, WDOG_MODE_KTICKLE,
471 1.2 matt sc->sc_wdog_period);
472 1.2 matt if (error)
473 1.2 matt aprint_error_dev(self,
474 1.2 matt "failed to start kernel tickler: %d\n", error);
475 1.2 matt }
476 1.2 matt }
477 1.2 matt #endif /* NWDOG > 0 */
478