Home | History | Annotate | Line # | Download | only in include
sysreg.h revision 1.11
      1  1.11  christos /* $NetBSD: sysreg.h,v 1.11 2020/12/16 19:49:04 christos Exp $ */
      2   1.4      maxv 
      3   1.4      maxv /*
      4   1.1      matt  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5   1.1      matt  * All rights reserved.
      6   1.1      matt  *
      7   1.1      matt  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1      matt  * by Matt Thomas of 3am Software Foundry.
      9   1.1      matt  *
     10   1.1      matt  * Redistribution and use in source and binary forms, with or without
     11   1.1      matt  * modification, are permitted provided that the following conditions
     12   1.1      matt  * are met:
     13   1.1      matt  * 1. Redistributions of source code must retain the above copyright
     14   1.1      matt  *    notice, this list of conditions and the following disclaimer.
     15   1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     17   1.1      matt  *    documentation and/or other materials provided with the distribution.
     18   1.1      matt  *
     19   1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1      matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1      matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1      matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1      matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1      matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1      matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1      matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1      matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1      matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1      matt  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1      matt  */
     31   1.1      matt 
     32   1.1      matt #ifndef _RISCV_SYSREG_H_
     33   1.1      matt #define _RISCV_SYSREG_H_
     34   1.1      matt 
     35   1.1      matt #ifndef _KERNEL
     36   1.1      matt #include <sys/param.h>
     37   1.1      matt #endif
     38   1.1      matt 
     39   1.1      matt #define FCSR_FMASK	0	// no exception bits
     40   1.1      matt #define FCSR_FRM	__BITS(7,5)
     41   1.1      matt #define FCSR_FRM_RNE	0b000	// Round Nearest, ties to Even
     42   1.1      matt #define FCSR_FRM_RTZ	0b001	// Round Towards Zero
     43   1.1      matt #define FCSR_FRM_RDN	0b010	// Round DowN (-infinity)
     44   1.1      matt #define FCSR_FRM_RUP	0b011	// Round UP (+infinity)
     45   1.1      matt #define FCSR_FRM_RMM	0b100	// Round to nearest, ties to Max Magnitude
     46   1.9     skrll #define FCSR_FRM_DYN	0b111	// Dynamic rounding
     47   1.1      matt #define FCSR_FFLAGS	__BITS(4,0)	// Sticky bits
     48   1.1      matt #define FCSR_NV		__BIT(4)	// iNValid operation
     49   1.1      matt #define FCSR_DZ		__BIT(3)	// Divide by Zero
     50   1.1      matt #define FCSR_OF		__BIT(2)	// OverFlow
     51   1.1      matt #define FCSR_UF		__BIT(1)	// UnderFlow
     52   1.1      matt #define FCSR_NX		__BIT(0)	// iNeXact
     53   1.1      matt 
     54   1.1      matt static inline uint32_t
     55   1.1      matt riscvreg_fcsr_read(void)
     56   1.1      matt {
     57   1.1      matt 	uint32_t __fcsr;
     58   1.1      matt 	__asm("frcsr %0" : "=r"(__fcsr));
     59   1.1      matt 	return __fcsr;
     60   1.1      matt }
     61   1.1      matt 
     62   1.1      matt 
     63   1.1      matt static inline uint32_t
     64   1.1      matt riscvreg_fcsr_write(uint32_t __new)
     65   1.1      matt {
     66   1.1      matt 	uint32_t __old;
     67   1.1      matt 	__asm("fscsr %0, %1" : "=r"(__old) : "r"(__new));
     68   1.1      matt 	return __old;
     69   1.1      matt }
     70   1.1      matt 
     71   1.1      matt static inline uint32_t
     72   1.1      matt riscvreg_fcsr_read_fflags(void)
     73   1.1      matt {
     74   1.1      matt 	uint32_t __old;
     75   1.1      matt 	__asm("frflags %0" : "=r"(__old));
     76   1.1      matt 	return __SHIFTOUT(__old, FCSR_FFLAGS);
     77   1.1      matt }
     78   1.1      matt 
     79   1.1      matt static inline uint32_t
     80   1.1      matt riscvreg_fcsr_write_fflags(uint32_t __new)
     81   1.1      matt {
     82   1.1      matt 	uint32_t __old;
     83   1.1      matt 	__new = __SHIFTIN(__new, FCSR_FFLAGS);
     84   1.1      matt 	__asm("fsflags %0, %1" : "=r"(__old) : "r"(__new));
     85   1.1      matt 	return __SHIFTOUT(__old, FCSR_FFLAGS);
     86   1.1      matt }
     87   1.1      matt 
     88   1.1      matt static inline uint32_t
     89   1.1      matt riscvreg_fcsr_read_frm(void)
     90   1.1      matt {
     91   1.1      matt 	uint32_t __old;
     92   1.1      matt 	__asm("frrm\t%0" : "=r"(__old));
     93   1.1      matt 	return __SHIFTOUT(__old, FCSR_FRM);
     94   1.1      matt }
     95   1.1      matt 
     96   1.1      matt static inline uint32_t
     97   1.1      matt riscvreg_fcsr_write_frm(uint32_t __new)
     98   1.1      matt {
     99   1.1      matt 	uint32_t __old;
    100   1.1      matt 	__new = __SHIFTIN(__new, FCSR_FRM);
    101   1.1      matt 	__asm volatile("fsrm\t%0, %1" : "=r"(__old) : "r"(__new));
    102   1.1      matt 	return __SHIFTOUT(__old, FCSR_FRM);
    103   1.1      matt }
    104   1.1      matt 
    105   1.9     skrll /* Supervisor Status Register */
    106   1.9     skrll #ifdef _LP64
    107   1.9     skrll #define SR_WPRI		__BITS(62, 34) | __BITS(31,20) | __BIT(17) | \
    108   1.9     skrll 			    __BITS(12,9) | __BITS(7,6) | __BITS(3,2)
    109   1.9     skrll #define SR_SD		__BIT(63)
    110   1.9     skrll 			/* Bits 62-34 are WPRI */
    111   1.9     skrll #define SR_UXL		__BITS(33,32)
    112   1.9     skrll #define  SR_UXL_32	1
    113   1.9     skrll #define  SR_UXL_64	2
    114   1.9     skrll #define  SR_UXL_128	3
    115   1.9     skrll 			/* Bits 31-20 are WPRI*/
    116   1.9     skrll #else
    117   1.9     skrll #define SR_WPRI		__BITS(30,20) | __BIT(17) | __BITS(12,9) | \
    118   1.9     skrll 			    __BITS(7,6) | __BITS(3,2)
    119   1.9     skrll #define SR_SD		__BIT(31)
    120   1.9     skrll 			/* Bits 30-20 are WPRI*/
    121   1.9     skrll #endif /* _LP64 */
    122   1.9     skrll 
    123   1.9     skrll /* Both RV32 and RV64 have the bottom 20 bits shared */
    124   1.9     skrll #define SR_MXR		__BIT(19)
    125   1.9     skrll #define SR_SUM		__BIT(18)
    126   1.9     skrll 			/* Bit 17 is WPRI */
    127   1.9     skrll #define SR_XS		__BITS(16,15)
    128   1.9     skrll #define SR_FS		__BITS(14,13)
    129   1.9     skrll #define  SR_FS_OFF	0
    130   1.9     skrll #define  SR_FS_INITIAL	1
    131   1.9     skrll #define  SR_FS_CLEAN	2
    132   1.9     skrll #define  SR_FS_DIRTY	3
    133   1.9     skrll 
    134   1.9     skrll 			/* Bits 12-9 are WPRI */
    135   1.9     skrll #define SR_SPP		__BIT(8)
    136   1.9     skrll 			/* Bits 7-6 are WPRI */
    137   1.9     skrll #define SR_SPIE		__BIT(5)
    138   1.9     skrll #define SR_UPIE		__BIT(4)
    139   1.9     skrll 			/* Bits 3-2 are WPRI */
    140   1.9     skrll #define SR_SIE		__BIT(1)
    141   1.9     skrll #define SR_UIE		__BIT(0)
    142   1.9     skrll 
    143   1.9     skrll /* Supervisor interrupt registers */
    144  1.11  christos /* ... interrupt pending register (sip) */
    145   1.9     skrll 			/* Bit (XLEN-1)-10 is WIRI */
    146   1.9     skrll #define SIP_SEIP	__BIT(9)
    147   1.9     skrll #define SIP_UEIP	__BIT(8)
    148   1.9     skrll 			/* Bit 7-6 is WIRI */
    149   1.9     skrll #define SIP_STIP	__BIT(5)
    150   1.9     skrll #define SIP_UTIP	__BIT(4)
    151   1.9     skrll 			/* Bit 3-2 is WIRI */
    152   1.9     skrll #define SIP_SSIP	__BIT(1)
    153   1.9     skrll #define SIP_USIP	__BIT(0)
    154   1.9     skrll 
    155  1.11  christos /* ... interrupt-enable register (sie) */
    156   1.9     skrll 			/* Bit (XLEN-1) - 10 is WIRI */
    157   1.9     skrll #define SIE_SEIE	__BIT(9)
    158   1.9     skrll #define SIE_UEIE	__BIT(8)
    159   1.9     skrll 			/* Bit 7-6 is WIRI */
    160   1.9     skrll #define SIE_STIE	__BIT(5)
    161   1.9     skrll #define SIE_UTIE	__BIT(4)
    162   1.9     skrll 			/* Bit 3-2 is WIRI */
    163   1.9     skrll #define SIE_SSIE	__BIT(1)
    164   1.9     skrll #define SIE_USIE	__BIT(0)
    165   1.9     skrll 
    166   1.9     skrll /* Mask for all interrupts */
    167   1.9     skrll #define SIE_IM		(SIE_SEI|SIE_UEIE|SIE_STIE|SIE_UTIE|SIE_SSIE|SIE_USIE)
    168   1.1      matt 
    169   1.1      matt #ifdef _LP64
    170  1.10     skrll #define	SR_USER		(SR_UIE)
    171  1.10     skrll #define	SR_USER32	(SR_USER)
    172  1.10     skrll #define	SR_KERNEL	(SR_SIE | SR_UIE)
    173   1.1      matt #else
    174  1.10     skrll #define	SR_USER		(SR_UIE)
    175  1.10     skrll #define	SR_KERNEL	(SR_SIE | SR_UIE)
    176   1.1      matt #endif
    177   1.1      matt 
    178   1.1      matt static inline uint32_t
    179   1.1      matt riscvreg_status_read(void)
    180   1.1      matt {
    181   1.1      matt 	uint32_t __sr;
    182   1.2      matt 	__asm("csrr\t%0, sstatus" : "=r"(__sr));
    183   1.1      matt 	return __sr;
    184   1.1      matt }
    185   1.1      matt 
    186   1.1      matt static inline uint32_t
    187   1.1      matt riscvreg_status_clear(uint32_t __mask)
    188   1.1      matt {
    189   1.1      matt 	uint32_t __sr;
    190   1.1      matt 	if (__builtin_constant_p(__mask) && __mask < 0x20) {
    191   1.2      matt 		__asm("csrrci\t%0, sstatus, %1" : "=r"(__sr) : "i"(__mask));
    192   1.1      matt 	} else {
    193   1.2      matt 		__asm("csrrc\t%0, sstatus, %1" : "=r"(__sr) : "r"(__mask));
    194   1.1      matt 	}
    195   1.1      matt 	return __sr;
    196   1.1      matt }
    197   1.1      matt 
    198   1.1      matt static inline uint32_t
    199   1.1      matt riscvreg_status_set(uint32_t __mask)
    200   1.1      matt {
    201   1.1      matt 	uint32_t __sr;
    202   1.1      matt 	if (__builtin_constant_p(__mask) && __mask < 0x20) {
    203   1.2      matt 		__asm("csrrsi\t%0, sstatus, %1" : "=r"(__sr) : "i"(__mask));
    204   1.1      matt 	} else {
    205   1.2      matt 		__asm("csrrs\t%0, sstatus, %1" : "=r"(__sr) : "r"(__mask));
    206   1.1      matt 	}
    207   1.1      matt 	return __sr;
    208   1.1      matt }
    209   1.1      matt 
    210   1.1      matt // Cause register
    211   1.6     skrll #define CAUSE_FETCH_MISALIGNED		0
    212   1.6     skrll #define CAUSE_FETCH_ACCESS		1
    213   1.1      matt #define CAUSE_ILLEGAL_INSTRUCTION	2
    214   1.6     skrll #define CAUSE_BREAKPOINT		3
    215   1.6     skrll #define CAUSE_LOAD_MISALIGNED		4
    216   1.6     skrll #define CAUSE_LOAD_ACCESS		5
    217   1.6     skrll #define CAUSE_STORE_MISALIGNED		6
    218   1.6     skrll #define CAUSE_STORE_ACCESS		7
    219   1.2      matt #define CAUSE_SYSCALL			8
    220   1.6     skrll #define CAUSE_USER_ECALL		8
    221   1.6     skrll #define CAUSE_SUPERVISOR_ECALL		9
    222   1.6     skrll /* 10 is reserved */
    223   1.6     skrll #define CAUSE_MACHINE_ECALL		11
    224   1.6     skrll #define CAUSE_FETCH_PAGE_FAULT		12
    225   1.6     skrll #define CAUSE_LOAD_PAGE_FAULT		13
    226   1.6     skrll /* 14 is Reserved */
    227   1.6     skrll #define CAUSE_STORE_PAGE_FAULT		15
    228   1.6     skrll /* >= 16 is reserved */
    229   1.1      matt 
    230   1.1      matt static inline uint64_t
    231   1.1      matt riscvreg_cycle_read(void)
    232   1.1      matt {
    233   1.1      matt #ifdef _LP64
    234   1.1      matt 	uint64_t __lo;
    235   1.2      matt 	__asm __volatile("csrr\t%0, cycle" : "=r"(__lo));
    236   1.1      matt 	return __lo;
    237   1.1      matt #else
    238   1.1      matt 	uint32_t __hi0, __hi1, __lo0;
    239   1.1      matt 	do {
    240   1.1      matt 		__asm __volatile(
    241   1.5     skrll 			"csrr\t%[__hi0], cycleh"
    242   1.1      matt 		"\n\t"	"csrr\t%[__lo0], cycle"
    243   1.1      matt 		"\n\t"	"csrr\t%[__hi1], cycleh"
    244   1.1      matt 		   :	[__hi0] "=r"(__hi0),
    245   1.1      matt 			[__lo0] "=r"(__lo0),
    246   1.1      matt 			[__hi1] "=r"(__hi1));
    247   1.1      matt 	} while (__hi0 != __hi1);
    248   1.1      matt 	return ((uint64_t)__hi0 << 32) | (uint64_t)__lo0;
    249   1.1      matt #endif
    250   1.1      matt }
    251   1.1      matt 
    252   1.4      maxv #ifdef _LP64
    253   1.7     skrll #define SATP_MODE		__BITS(63,60)
    254   1.8     skrll #define  SATP_MODE_SV39		8
    255   1.8     skrll #define  SATP_MODE_SV48		9
    256   1.7     skrll #define SATP_ASID		__BITS(59,44)
    257   1.7     skrll #define SATP_PPN		__BITS(43,0)
    258   1.4      maxv #else
    259   1.7     skrll #define SATP_MODE		__BIT(31)
    260   1.8     skrll #define  SATP_MODE_SV32		1
    261   1.7     skrll #define SATP_ASID		__BITS(30,22)
    262   1.7     skrll #define SATP_PPN		__BITS(21,0)
    263   1.4      maxv #endif
    264   1.2      matt 
    265   1.2      matt static inline uint32_t
    266   1.2      matt riscvreg_asid_read(void)
    267   1.2      matt {
    268   1.4      maxv 	uintptr_t satp;
    269   1.4      maxv 	__asm __volatile("csrr	%0, satp" : "=r" (satp));
    270   1.4      maxv 	return __SHIFTOUT(satp, SATP_ASID);
    271   1.2      matt }
    272   1.2      matt 
    273   1.2      matt static inline void
    274   1.4      maxv riscvreg_asid_write(uint32_t asid)
    275   1.2      matt {
    276   1.4      maxv 	uintptr_t satp;
    277   1.4      maxv 	__asm __volatile("csrr	%0, satp" : "=r" (satp));
    278   1.4      maxv 	satp &= ~SATP_ASID;
    279   1.4      maxv 	satp |= __SHIFTIN((uintptr_t)asid, SATP_ASID);
    280   1.4      maxv 	__asm __volatile("csrw	satp, %0" :: "r" (satp));
    281   1.2      matt }
    282   1.2      matt 
    283   1.1      matt #endif /* _RISCV_SYSREG_H_ */
    284