a9tmr.c revision 1.17 1 1.17 aymeric /* $NetBSD: a9tmr.c,v 1.17 2018/10/14 19:01:00 aymeric Exp $ */
2 1.1 matt
3 1.1 matt /*-
4 1.1 matt * Copyright (c) 2012 The NetBSD Foundation, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.1 matt * by Matt Thomas
9 1.1 matt *
10 1.1 matt * Redistribution and use in source and binary forms, with or without
11 1.1 matt * modification, are permitted provided that the following conditions
12 1.1 matt * are met:
13 1.1 matt * 1. Redistributions of source code must retain the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer.
15 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 matt * notice, this list of conditions and the following disclaimer in the
17 1.1 matt * documentation and/or other materials provided with the distribution.
18 1.1 matt *
19 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
30 1.1 matt */
31 1.1 matt
32 1.1 matt #include <sys/cdefs.h>
33 1.17 aymeric __KERNEL_RCSID(0, "$NetBSD: a9tmr.c,v 1.17 2018/10/14 19:01:00 aymeric Exp $");
34 1.1 matt
35 1.1 matt #include <sys/param.h>
36 1.1 matt #include <sys/bus.h>
37 1.1 matt #include <sys/device.h>
38 1.1 matt #include <sys/intr.h>
39 1.1 matt #include <sys/kernel.h>
40 1.1 matt #include <sys/proc.h>
41 1.1 matt #include <sys/systm.h>
42 1.1 matt #include <sys/timetc.h>
43 1.9 jmcneill #include <sys/xcall.h>
44 1.1 matt
45 1.1 matt #include <prop/proplib.h>
46 1.1 matt
47 1.1 matt #include <arm/cortex/a9tmr_reg.h>
48 1.1 matt #include <arm/cortex/a9tmr_var.h>
49 1.1 matt
50 1.1 matt #include <arm/cortex/mpcore_var.h>
51 1.1 matt
52 1.1 matt static int a9tmr_match(device_t, cfdata_t, void *);
53 1.1 matt static void a9tmr_attach(device_t, device_t, void *);
54 1.1 matt
55 1.1 matt static u_int a9tmr_get_timecount(struct timecounter *);
56 1.1 matt
57 1.1 matt static struct a9tmr_softc a9tmr_sc;
58 1.1 matt
59 1.1 matt static struct timecounter a9tmr_timecounter = {
60 1.1 matt .tc_get_timecount = a9tmr_get_timecount,
61 1.1 matt .tc_poll_pps = 0,
62 1.1 matt .tc_counter_mask = ~0u,
63 1.1 matt .tc_frequency = 0, /* set by cpu_initclocks() */
64 1.1 matt .tc_name = NULL, /* set by attach */
65 1.1 matt .tc_quality = 500,
66 1.1 matt .tc_priv = &a9tmr_sc,
67 1.1 matt .tc_next = NULL,
68 1.1 matt };
69 1.1 matt
70 1.15 hkenken CFATTACH_DECL_NEW(arma9tmr, 0, a9tmr_match, a9tmr_attach, NULL, NULL);
71 1.1 matt
72 1.1 matt static inline uint32_t
73 1.1 matt a9tmr_global_read(struct a9tmr_softc *sc, bus_size_t o)
74 1.1 matt {
75 1.1 matt return bus_space_read_4(sc->sc_memt, sc->sc_global_memh, o);
76 1.1 matt }
77 1.1 matt
78 1.1 matt static inline void
79 1.1 matt a9tmr_global_write(struct a9tmr_softc *sc, bus_size_t o, uint32_t v)
80 1.1 matt {
81 1.1 matt bus_space_write_4(sc->sc_memt, sc->sc_global_memh, o, v);
82 1.1 matt }
83 1.1 matt
84 1.1 matt
85 1.1 matt /* ARGSUSED */
86 1.1 matt static int
87 1.1 matt a9tmr_match(device_t parent, cfdata_t cf, void *aux)
88 1.1 matt {
89 1.1 matt struct mpcore_attach_args * const mpcaa = aux;
90 1.1 matt
91 1.1 matt if (a9tmr_sc.sc_dev != NULL)
92 1.1 matt return 0;
93 1.1 matt
94 1.5 matt if ((armreg_pfr1_read() & ARM_PFR1_GTIMER_MASK) != 0)
95 1.5 matt return 0;
96 1.5 matt
97 1.11 jmcneill if (!CPU_ID_CORTEX_A9_P(curcpu()->ci_arm_cpuid) &&
98 1.11 jmcneill !CPU_ID_CORTEX_A5_P(curcpu()->ci_arm_cpuid))
99 1.1 matt return 0;
100 1.1 matt
101 1.1 matt if (strcmp(mpcaa->mpcaa_name, cf->cf_name) != 0)
102 1.1 matt return 0;
103 1.1 matt
104 1.1 matt /*
105 1.1 matt * This isn't present on UP A9s (since CBAR isn't present).
106 1.1 matt */
107 1.1 matt uint32_t mpidr = armreg_mpidr_read();
108 1.1 matt if (mpidr == 0 || (mpidr & MPIDR_U))
109 1.1 matt return 0;
110 1.1 matt
111 1.1 matt return 1;
112 1.1 matt }
113 1.1 matt
114 1.1 matt static void
115 1.1 matt a9tmr_attach(device_t parent, device_t self, void *aux)
116 1.1 matt {
117 1.14 ryo struct a9tmr_softc *sc = &a9tmr_sc;
118 1.1 matt struct mpcore_attach_args * const mpcaa = aux;
119 1.1 matt prop_dictionary_t dict = device_properties(self);
120 1.1 matt char freqbuf[sizeof("XXX SHz")];
121 1.12 jmcneill const char *cpu_type;
122 1.1 matt
123 1.1 matt /*
124 1.17 aymeric * This runs at the ARM PERIPHCLOCK.
125 1.1 matt * The MD code should have setup our frequency for us.
126 1.1 matt */
127 1.15 hkenken if (!prop_dictionary_get_uint32(dict, "frequency", &sc->sc_freq)) {
128 1.15 hkenken dict = device_properties(parent);
129 1.15 hkenken prop_dictionary_get_uint32(dict, "frequency", &sc->sc_freq);
130 1.15 hkenken }
131 1.1 matt
132 1.1 matt humanize_number(freqbuf, sizeof(freqbuf), sc->sc_freq, "Hz", 1000);
133 1.1 matt
134 1.1 matt aprint_naive("\n");
135 1.12 jmcneill if (CPU_ID_CORTEX_A5_P(curcpu()->ci_arm_cpuid)) {
136 1.12 jmcneill cpu_type = "A5";
137 1.12 jmcneill } else {
138 1.12 jmcneill cpu_type = "A9";
139 1.12 jmcneill }
140 1.12 jmcneill aprint_normal(": %s Global 64-bit Timer (%s)\n", cpu_type, freqbuf);
141 1.1 matt
142 1.2 matt self->dv_private = sc;
143 1.1 matt sc->sc_dev = self;
144 1.1 matt sc->sc_memt = mpcaa->mpcaa_memt;
145 1.1 matt sc->sc_memh = mpcaa->mpcaa_memh;
146 1.1 matt
147 1.3 matt evcnt_attach_dynamic(&sc->sc_ev_missing_ticks, EVCNT_TYPE_MISC, NULL,
148 1.3 matt device_xname(self), "missing interrupts");
149 1.3 matt
150 1.16 hkenken bus_space_subregion(sc->sc_memt, sc->sc_memh,
151 1.16 hkenken mpcaa->mpcaa_off1, TMR_GLOBAL_SIZE, &sc->sc_global_memh);
152 1.1 matt
153 1.15 hkenken if (mpcaa->mpcaa_irq != -1) {
154 1.15 hkenken sc->sc_global_ih = intr_establish(mpcaa->mpcaa_irq, IPL_CLOCK,
155 1.15 hkenken IST_EDGE | IST_MPSAFE, a9tmr_intr, NULL);
156 1.15 hkenken if (sc->sc_global_ih == NULL)
157 1.15 hkenken panic("%s: unable to register timer interrupt", __func__);
158 1.15 hkenken aprint_normal_dev(sc->sc_dev, "interrupting on irq %d\n",
159 1.15 hkenken mpcaa->mpcaa_irq);
160 1.15 hkenken }
161 1.1 matt }
162 1.1 matt
163 1.1 matt static inline uint64_t
164 1.1 matt a9tmr_gettime(struct a9tmr_softc *sc)
165 1.1 matt {
166 1.1 matt uint32_t lo, hi;
167 1.1 matt
168 1.1 matt do {
169 1.1 matt hi = a9tmr_global_read(sc, TMR_GBL_CTR_U);
170 1.1 matt lo = a9tmr_global_read(sc, TMR_GBL_CTR_L);
171 1.1 matt } while (hi != a9tmr_global_read(sc, TMR_GBL_CTR_U));
172 1.1 matt
173 1.1 matt return ((uint64_t)hi << 32) | lo;
174 1.1 matt }
175 1.1 matt
176 1.1 matt void
177 1.1 matt a9tmr_init_cpu_clock(struct cpu_info *ci)
178 1.1 matt {
179 1.1 matt struct a9tmr_softc * const sc = &a9tmr_sc;
180 1.1 matt uint64_t now = a9tmr_gettime(sc);
181 1.1 matt
182 1.1 matt KASSERT(ci == curcpu());
183 1.1 matt
184 1.1 matt ci->ci_lastintr = now;
185 1.1 matt
186 1.1 matt a9tmr_global_write(sc, TMR_GBL_AUTOINC, sc->sc_autoinc);
187 1.1 matt
188 1.1 matt /*
189 1.1 matt * To update the compare register we have to disable comparisions first.
190 1.1 matt */
191 1.1 matt uint32_t ctl = a9tmr_global_read(sc, TMR_GBL_CTL);
192 1.1 matt if (ctl & TMR_GBL_CTL_CMP_ENABLE) {
193 1.14 ryo a9tmr_global_write(sc, TMR_GBL_CTL,
194 1.14 ryo ctl & ~TMR_GBL_CTL_CMP_ENABLE);
195 1.1 matt }
196 1.1 matt
197 1.1 matt /*
198 1.1 matt * Schedule the next interrupt.
199 1.1 matt */
200 1.1 matt now += sc->sc_autoinc;
201 1.1 matt a9tmr_global_write(sc, TMR_GBL_CMP_L, (uint32_t) now);
202 1.1 matt a9tmr_global_write(sc, TMR_GBL_CMP_H, (uint32_t) (now >> 32));
203 1.1 matt
204 1.1 matt /*
205 1.1 matt * Re-enable the comparator and now enable interrupts.
206 1.1 matt */
207 1.1 matt a9tmr_global_write(sc, TMR_GBL_INT, 1); /* clear interrupt pending */
208 1.14 ryo ctl |= TMR_GBL_CTL_CMP_ENABLE | TMR_GBL_CTL_INT_ENABLE |
209 1.14 ryo TMR_GBL_CTL_AUTO_INC | TMR_CTL_ENABLE;
210 1.1 matt a9tmr_global_write(sc, TMR_GBL_CTL, ctl);
211 1.1 matt #if 0
212 1.1 matt printf("%s: %s: ctl %#x autoinc %u cmp %#x%08x now %#"PRIx64"\n",
213 1.1 matt __func__, ci->ci_data.cpu_name,
214 1.1 matt a9tmr_global_read(sc, TMR_GBL_CTL),
215 1.1 matt a9tmr_global_read(sc, TMR_GBL_AUTOINC),
216 1.1 matt a9tmr_global_read(sc, TMR_GBL_CMP_H),
217 1.1 matt a9tmr_global_read(sc, TMR_GBL_CMP_L),
218 1.1 matt a9tmr_gettime(sc));
219 1.1 matt
220 1.1 matt int s = splsched();
221 1.1 matt uint64_t when = now;
222 1.1 matt u_int n = 0;
223 1.1 matt while ((now = a9tmr_gettime(sc)) < when) {
224 1.1 matt /* spin */
225 1.1 matt n++;
226 1.1 matt KASSERTMSG(n <= sc->sc_autoinc,
227 1.1 matt "spun %u times but only %"PRIu64" has passed",
228 1.1 matt n, when - now);
229 1.1 matt }
230 1.1 matt printf("%s: %s: status %#x cmp %#x%08x now %#"PRIx64"\n",
231 1.1 matt __func__, ci->ci_data.cpu_name,
232 1.1 matt a9tmr_global_read(sc, TMR_GBL_INT),
233 1.1 matt a9tmr_global_read(sc, TMR_GBL_CMP_H),
234 1.1 matt a9tmr_global_read(sc, TMR_GBL_CMP_L),
235 1.1 matt a9tmr_gettime(sc));
236 1.1 matt splx(s);
237 1.1 matt #elif 0
238 1.1 matt delay(1000000 / hz + 1000);
239 1.1 matt #endif
240 1.1 matt }
241 1.1 matt
242 1.1 matt void
243 1.15 hkenken a9tmr_cpu_initclocks(void)
244 1.1 matt {
245 1.1 matt struct a9tmr_softc * const sc = &a9tmr_sc;
246 1.14 ryo
247 1.1 matt KASSERT(sc->sc_dev != NULL);
248 1.1 matt KASSERT(sc->sc_freq != 0);
249 1.1 matt
250 1.1 matt sc->sc_autoinc = sc->sc_freq / hz;
251 1.1 matt
252 1.1 matt a9tmr_init_cpu_clock(curcpu());
253 1.1 matt
254 1.1 matt a9tmr_timecounter.tc_name = device_xname(sc->sc_dev);
255 1.1 matt a9tmr_timecounter.tc_frequency = sc->sc_freq;
256 1.1 matt
257 1.1 matt tc_init(&a9tmr_timecounter);
258 1.1 matt }
259 1.1 matt
260 1.9 jmcneill static void
261 1.9 jmcneill a9tmr_update_freq_cb(void *arg1, void *arg2)
262 1.9 jmcneill {
263 1.9 jmcneill a9tmr_init_cpu_clock(curcpu());
264 1.9 jmcneill }
265 1.9 jmcneill
266 1.1 matt void
267 1.8 jmcneill a9tmr_update_freq(uint32_t freq)
268 1.8 jmcneill {
269 1.8 jmcneill struct a9tmr_softc * const sc = &a9tmr_sc;
270 1.9 jmcneill uint64_t xc;
271 1.8 jmcneill
272 1.8 jmcneill KASSERT(sc->sc_dev != NULL);
273 1.8 jmcneill KASSERT(freq != 0);
274 1.8 jmcneill
275 1.9 jmcneill tc_detach(&a9tmr_timecounter);
276 1.9 jmcneill
277 1.8 jmcneill sc->sc_freq = freq;
278 1.8 jmcneill sc->sc_autoinc = sc->sc_freq / hz;
279 1.9 jmcneill
280 1.9 jmcneill xc = xc_broadcast(0, a9tmr_update_freq_cb, NULL, NULL);
281 1.9 jmcneill xc_wait(xc);
282 1.9 jmcneill
283 1.8 jmcneill a9tmr_timecounter.tc_frequency = sc->sc_freq;
284 1.9 jmcneill tc_init(&a9tmr_timecounter);
285 1.8 jmcneill }
286 1.8 jmcneill
287 1.8 jmcneill void
288 1.1 matt a9tmr_delay(unsigned int n)
289 1.1 matt {
290 1.1 matt struct a9tmr_softc * const sc = &a9tmr_sc;
291 1.1 matt
292 1.1 matt KASSERT(sc != NULL);
293 1.1 matt
294 1.14 ryo uint32_t freq = sc->sc_freq ? sc->sc_freq :
295 1.14 ryo curcpu()->ci_data.cpu_cc_freq / 2;
296 1.1 matt KASSERT(freq != 0);
297 1.1 matt
298 1.1 matt /*
299 1.1 matt * not quite divide by 1000000 but close enough
300 1.1 matt * (higher by 1.3% which means we wait 1.3% longer).
301 1.1 matt */
302 1.1 matt const uint64_t incr_per_us = (freq >> 20) + (freq >> 24);
303 1.1 matt
304 1.1 matt const uint64_t delta = n * incr_per_us;
305 1.1 matt const uint64_t base = a9tmr_gettime(sc);
306 1.1 matt const uint64_t finish = base + delta;
307 1.1 matt
308 1.1 matt while (a9tmr_gettime(sc) < finish) {
309 1.1 matt /* spin */
310 1.1 matt }
311 1.1 matt }
312 1.1 matt
313 1.1 matt /*
314 1.15 hkenken * a9tmr_intr:
315 1.1 matt *
316 1.1 matt * Handle the hardclock interrupt.
317 1.1 matt */
318 1.15 hkenken int
319 1.15 hkenken a9tmr_intr(void *arg)
320 1.1 matt {
321 1.1 matt struct clockframe * const cf = arg;
322 1.1 matt struct a9tmr_softc * const sc = &a9tmr_sc;
323 1.1 matt struct cpu_info * const ci = curcpu();
324 1.14 ryo
325 1.1 matt const uint64_t now = a9tmr_gettime(sc);
326 1.1 matt uint64_t delta = now - ci->ci_lastintr;
327 1.1 matt
328 1.14 ryo a9tmr_global_write(sc, TMR_GBL_INT, 1); /* Ack the interrupt */
329 1.1 matt
330 1.1 matt #if 0
331 1.1 matt printf("%s(%p): %s: now %#"PRIx64" delta %"PRIu64"\n",
332 1.1 matt __func__, cf, ci->ci_data.cpu_name, now, delta);
333 1.1 matt #endif
334 1.13 ryo KASSERTMSG(delta > sc->sc_autoinc / 64,
335 1.1 matt "%s: interrupting too quickly (delta=%"PRIu64")",
336 1.1 matt ci->ci_data.cpu_name, delta);
337 1.1 matt
338 1.1 matt ci->ci_lastintr = now;
339 1.1 matt
340 1.1 matt hardclock(cf);
341 1.1 matt
342 1.13 ryo if (delta > sc->sc_autoinc) {
343 1.13 ryo u_int ticks = hz;
344 1.13 ryo for (delta -= sc->sc_autoinc;
345 1.13 ryo delta >= sc->sc_autoinc && ticks > 0;
346 1.13 ryo delta -= sc->sc_autoinc, ticks--) {
347 1.3 matt #if 0
348 1.13 ryo /*
349 1.13 ryo * Try to make up up to a seconds amount of
350 1.13 ryo * missed clock interrupts
351 1.13 ryo */
352 1.13 ryo hardclock(cf);
353 1.3 matt #else
354 1.13 ryo sc->sc_ev_missing_ticks.ev_count++;
355 1.3 matt #endif
356 1.13 ryo }
357 1.13 ryo }
358 1.1 matt
359 1.1 matt return 1;
360 1.1 matt }
361 1.1 matt
362 1.1 matt void
363 1.1 matt setstatclockrate(int newhz)
364 1.1 matt {
365 1.1 matt }
366 1.1 matt
367 1.1 matt static u_int
368 1.1 matt a9tmr_get_timecount(struct timecounter *tc)
369 1.1 matt {
370 1.1 matt struct a9tmr_softc * const sc = tc->tc_priv;
371 1.1 matt
372 1.2 matt return (u_int) (a9tmr_gettime(sc));
373 1.1 matt }
374