Home | History | Annotate | Line # | Download | only in powerpc
      1 /*	$NetBSD: clock.c,v 1.19 2024/10/19 12:34:09 jmcneill Exp $	*/
      2 /*      $OpenBSD: clock.c,v 1.3 1997/10/13 13:42:53 pefo Exp $	*/
      3 
      4 /*
      5  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
      6  * Copyright (C) 1995, 1996 TooLs GmbH.
      7  * All rights reserved.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed by TooLs GmbH.
     20  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     28  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     29  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     31  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     32  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  */
     34 
     35 #include <sys/cdefs.h>
     36 __KERNEL_RCSID(0, "$NetBSD: clock.c,v 1.19 2024/10/19 12:34:09 jmcneill Exp $");
     37 
     38 #ifdef _KERNEL_OPT
     39 #include "opt_ppcarch.h"
     40 #endif
     41 
     42 #include <sys/param.h>
     43 #include <sys/kernel.h>
     44 #include <sys/systm.h>
     45 #include <sys/device.h>
     46 #include <sys/timetc.h>
     47 
     48 #include <uvm/uvm_extern.h>
     49 
     50 #include <powerpc/psl.h>
     51 #include <powerpc/spr.h>
     52 #if defined (PPC_OEA) || defined(PPC_OEA64) || defined (PPC_OEA64_BRIDGE)
     53 #include <powerpc/oea/spr.h>
     54 #elif defined (PPC_BOOKE)
     55 #include <powerpc/booke/spr.h>
     56 #elif defined (PPC_IBM4XX)
     57 #include <powerpc/ibm4xx/spr.h>
     58 #else
     59 #error unknown powerpc variant
     60 #endif
     61 
     62 void decr_intr(struct clockframe *);
     63 void init_powerpc_tc(void);
     64 static u_int get_powerpc_timecount(struct timecounter *);
     65 #ifdef PPC_OEA601
     66 static u_int get_601_timecount(struct timecounter *);
     67 #endif
     68 
     69 uint32_t ticks_per_sec;
     70 uint32_t ticks_per_msec;
     71 uint32_t ns_per_tick;
     72 uint32_t ticks_per_intr = 0;
     73 
     74 #ifdef PPC_OEA601
     75 static struct timecounter powerpc_601_timecounter = {
     76 	.tc_get_timecount = get_601_timecount,
     77 	.tc_poll_pps = 0,
     78 	.tc_counter_mask = 0x7fffffff,
     79 	.tc_frequency = 0,
     80 	.tc_name = "rtc",
     81 	.tc_quality = 100,
     82 	.tc_priv = NULL,
     83 	.tc_next = NULL
     84 };
     85 #endif
     86 
     87 static struct timecounter powerpc_timecounter = {
     88 	.tc_get_timecount = get_powerpc_timecount,
     89 	.tc_poll_pps = 0,
     90 	.tc_counter_mask = 0x7fffffff,
     91 	.tc_frequency = 0,
     92 	.tc_name = "mftb",
     93 	.tc_quality = 100,
     94 	.tc_priv = NULL,
     95 	.tc_next = NULL
     96 };
     97 
     98 /*
     99  * Start the real-time and statistics clocks. Leave stathz 0 since there
    100  * are no other timers available.
    101  */
    102 void
    103 cpu_initclocks(void)
    104 {
    105 	struct cpu_info * const ci = curcpu();
    106 	uint32_t msr;
    107 
    108 	ticks_per_intr = ticks_per_sec / hz;
    109 	cpu_timebase = ticks_per_sec;
    110 #ifdef PPC_OEA601
    111 	if ((mfpvr() >> 16) == MPC601)
    112 		ci->ci_lasttb = rtc_nanosecs();
    113 	else
    114 #endif
    115 		__asm volatile ("mftb %0" : "=r"(ci->ci_lasttb));
    116 	__asm volatile ("mtdec %0" :: "r"(ticks_per_intr));
    117 	init_powerpc_tc();
    118 
    119 	/*
    120 	 * Now allow all hardware interrupts including hardclock(9).
    121 	 */
    122 	__asm volatile ("mfmsr %0; ori %0,%0,%1; mtmsr %0"
    123 	    : "=r"(msr) : "K"(PSL_EE|PSL_RI));
    124 }
    125 
    126 /*
    127  * We assume newhz is either stathz or profhz, and that neither will
    128  * change after being set up above.  Could recalculate intervals here
    129  * but that would be a drag.
    130  */
    131 void
    132 setstatclockrate(int arg)
    133 {
    134 
    135 	/* Nothing we can do */
    136 }
    137 
    138 void
    139 decr_intr(struct clockframe *cfp)
    140 {
    141 	struct cpu_info * const ci = curcpu();
    142 	const register_t msr = mfmsr();
    143 	int pri;
    144 	u_long tb;
    145 	long ticks;
    146 	int nticks;
    147 
    148 	/* Check whether we are initialized */
    149 	if (!ticks_per_intr)
    150 		return;
    151 
    152 	/*
    153 	 * Based on the actual time delay since the last decrementer reload,
    154 	 * we arrange for earlier interrupt next time.
    155 	 */
    156 	__asm ("mfdec %0" : "=r"(ticks));
    157 	for (nticks = 0; ticks < 0; nticks++)
    158 		ticks += ticks_per_intr;
    159 	__asm volatile ("mtdec %0" :: "r"(ticks));
    160 
    161 	ci->ci_data.cpu_nintr++;
    162 	ci->ci_ev_clock.ev_count++;
    163 
    164 	pri = splclock();
    165 	if (pri >= IPL_CLOCK) {
    166 		ci->ci_tickspending += nticks;
    167 	} else {
    168 		nticks += ci->ci_tickspending;
    169 		ci->ci_tickspending = 0;
    170 
    171 		/*
    172 		 * lasttb is used during microtime. Set it to the virtual
    173 		 * start of this tick interval.
    174 		 */
    175 #ifdef PPC_OEA601
    176 		if ((mfpvr() >> 16) == MPC601)
    177 			tb = rtc_nanosecs();
    178 		else
    179 #endif
    180 			__asm volatile ("mftb %0" : "=r"(tb));
    181 
    182 		ci->ci_lasttb = tb + ticks - ticks_per_intr;
    183 		ci->ci_idepth++;
    184 		mtmsr(msr | PSL_EE);
    185 		/*
    186 		 * Do standard timer interrupt stuff.
    187 		 * Do softclock stuff only on the last iteration.
    188 		 */
    189 		while (--nticks > 0)
    190 			hardclock(cfp);
    191 		hardclock(cfp);
    192 		mtmsr(msr);
    193 		ci->ci_idepth--;
    194 	}
    195 	mtmsr(msr | PSL_EE);
    196 	splx(pri);
    197 	mtmsr(msr);
    198 }
    199 
    200 /*
    201  * Wait for about n microseconds (at least!).
    202  */
    203 void
    204 delay(unsigned int n)
    205 {
    206 #ifdef _ARCH_PPC64
    207 	uint64_t tb, scratch;
    208 #else
    209 	uint64_t tb;
    210 	uint32_t tbh, tbl, scratch;
    211 
    212 #ifdef PPC_OEA601
    213 	if ((mfpvr() >> 16) == MPC601) {
    214 		u_int32_t rtc[2];
    215 
    216 		mfrtc(rtc);
    217 		while (n >= 1000000) {
    218 			rtc[0]++;
    219 			n -= 1000000;
    220 		}
    221 		rtc[1] += (n * 1000);
    222 		if (rtc[1] >= 1000000000) {
    223 			rtc[0]++;
    224 			rtc[1] -= 1000000000;
    225 		}
    226 		__asm volatile ("1: mfspr %0,%3; cmplw %0,%1; blt 1b; bgt 2f;"
    227 		    "mfspr %0,%4; cmplw %0,%2; blt 1b; 2:"
    228 		    : "=&r"(scratch)
    229 		    : "r"(rtc[0]), "r"(rtc[1]), "n"(SPR_RTCU_R), "n"(SPR_RTCL_R)
    230 		    : "cr0");
    231 	} else
    232 #endif /* PPC_OEA601 */
    233 #endif /* !_ARCH_PPC64 */
    234 	{
    235 		tb = mftb();
    236 		if (ticks_per_msec != 0 && n >= 1000) {
    237 			tb += (n / 1000ULL) * ticks_per_msec;
    238 			n = n % 1000;
    239 		}
    240 		tb += (n * 1000ULL + ns_per_tick - 1) / ns_per_tick;
    241 #ifdef _ARCH_PPC64
    242 		__asm volatile ("1: mftb %0; cmpld %0,%1; blt 1b;"
    243 			      : "=&r"(scratch) : "r"(tb)
    244 			      : "cr0");
    245 #else
    246 		tbh = tb >> 32;
    247 		tbl = tb;
    248 		__asm volatile ("1: mftbu %0; cmplw %0,%1; blt 1b; bgt 2f;"
    249 			      "mftb %0; cmplw %0,%2; blt 1b; 2:"
    250 			      : "=&r"(scratch) : "r"(tbh), "r"(tbl)
    251 			      : "cr0");
    252 #endif
    253 	}
    254 }
    255 
    256 static u_int
    257 get_powerpc_timecount(struct timecounter *tc)
    258 {
    259 	u_long tb;
    260 	int msr, scratch;
    261 
    262 	__asm volatile ("mfmsr %0; andi. %1,%0,%2; mtmsr %1"
    263 		      : "=r"(msr), "=r"(scratch) : "K"((u_short)~PSL_EE));
    264 
    265 	tb = (u_int)(mftb() & 0x7fffffff);
    266 	mtmsr(msr);
    267 
    268 	return tb;
    269 }
    270 
    271 #ifdef PPC_OEA601
    272 static u_int
    273 get_601_timecount(struct timecounter *tc)
    274 {
    275 	u_long tb;
    276 	int msr, scratch;
    277 
    278 	__asm volatile ("mfmsr %0; andi. %1,%0,%2; mtmsr %1"
    279 		      : "=r"(msr), "=r"(scratch) : "K"((u_short)~PSL_EE));
    280 
    281 	tb = rtc_nanosecs();
    282 	mtmsr(msr);
    283 
    284 	return tb;
    285 }
    286 #endif
    287 
    288 void
    289 init_powerpc_tc(void)
    290 {
    291 	struct timecounter *tc;
    292 
    293 #ifdef PPC_OEA601
    294 	if ((mfpvr() >> 16) == MPC601) {
    295 		tc = &powerpc_601_timecounter;
    296 	} else
    297 #endif
    298 		tc = &powerpc_timecounter;
    299 
    300 	tc->tc_frequency = ticks_per_sec;
    301 	tc_init(tc);
    302 }
    303