Home | History | Annotate | Line # | Download | only in include
sysreg.h revision 1.1
      1  1.1  matt /* $NetBSD: sysreg.h,v 1.1 2014/09/19 17:36:26 matt Exp $ */
      2  1.1  matt /*-
      3  1.1  matt  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      4  1.1  matt  * All rights reserved.
      5  1.1  matt  *
      6  1.1  matt  * This code is derived from software contributed to The NetBSD Foundation
      7  1.1  matt  * by Matt Thomas of 3am Software Foundry.
      8  1.1  matt  *
      9  1.1  matt  * Redistribution and use in source and binary forms, with or without
     10  1.1  matt  * modification, are permitted provided that the following conditions
     11  1.1  matt  * are met:
     12  1.1  matt  * 1. Redistributions of source code must retain the above copyright
     13  1.1  matt  *    notice, this list of conditions and the following disclaimer.
     14  1.1  matt  * 2. Redistributions in binary form must reproduce the above copyright
     15  1.1  matt  *    notice, this list of conditions and the following disclaimer in the
     16  1.1  matt  *    documentation and/or other materials provided with the distribution.
     17  1.1  matt  *
     18  1.1  matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     19  1.1  matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     20  1.1  matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     21  1.1  matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     22  1.1  matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     23  1.1  matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     24  1.1  matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     25  1.1  matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     26  1.1  matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     27  1.1  matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     28  1.1  matt  * POSSIBILITY OF SUCH DAMAGE.
     29  1.1  matt  */
     30  1.1  matt 
     31  1.1  matt #ifndef _RISCV_SYSREG_H_
     32  1.1  matt #define _RISCV_SYSREG_H_
     33  1.1  matt 
     34  1.1  matt #ifndef _KERNEL
     35  1.1  matt #include <sys/param.h>
     36  1.1  matt #endif
     37  1.1  matt 
     38  1.1  matt #define FCSR_FMASK	0	// no exception bits
     39  1.1  matt #define FCSR_FRM	__BITS(7,5)
     40  1.1  matt #define FCSR_FRM_RNE	0b000	// Round Nearest, ties to Even
     41  1.1  matt #define FCSR_FRM_RTZ	0b001	// Round Towards Zero
     42  1.1  matt #define FCSR_FRM_RDN	0b010	// Round DowN (-infinity)
     43  1.1  matt #define FCSR_FRM_RUP	0b011	// Round UP (+infinity)
     44  1.1  matt #define FCSR_FRM_RMM	0b100	// Round to nearest, ties to Max Magnitude
     45  1.1  matt #define FCSR_FFLAGS	__BITS(4,0)	// Sticky bits
     46  1.1  matt #define FCSR_NV		__BIT(4)	// iNValid operation
     47  1.1  matt #define FCSR_DZ		__BIT(3)	// Divide by Zero
     48  1.1  matt #define FCSR_OF		__BIT(2)	// OverFlow
     49  1.1  matt #define FCSR_UF		__BIT(1)	// UnderFlow
     50  1.1  matt #define FCSR_NX		__BIT(0)	// iNeXact
     51  1.1  matt 
     52  1.1  matt static inline uint32_t
     53  1.1  matt riscvreg_fcsr_read(void)
     54  1.1  matt {
     55  1.1  matt 	uint32_t __fcsr;
     56  1.1  matt 	__asm("frcsr %0" : "=r"(__fcsr));
     57  1.1  matt 	return __fcsr;
     58  1.1  matt }
     59  1.1  matt 
     60  1.1  matt 
     61  1.1  matt static inline uint32_t
     62  1.1  matt riscvreg_fcsr_write(uint32_t __new)
     63  1.1  matt {
     64  1.1  matt 	uint32_t __old;
     65  1.1  matt 	__asm("fscsr %0, %1" : "=r"(__old) : "r"(__new));
     66  1.1  matt 	return __old;
     67  1.1  matt }
     68  1.1  matt 
     69  1.1  matt static inline uint32_t
     70  1.1  matt riscvreg_fcsr_read_fflags(void)
     71  1.1  matt {
     72  1.1  matt 	uint32_t __old;
     73  1.1  matt 	__asm("frflags %0" : "=r"(__old));
     74  1.1  matt 	return __SHIFTOUT(__old, FCSR_FFLAGS);
     75  1.1  matt }
     76  1.1  matt 
     77  1.1  matt static inline uint32_t
     78  1.1  matt riscvreg_fcsr_write_fflags(uint32_t __new)
     79  1.1  matt {
     80  1.1  matt 	uint32_t __old;
     81  1.1  matt 	__new = __SHIFTIN(__new, FCSR_FFLAGS);
     82  1.1  matt 	__asm("fsflags %0, %1" : "=r"(__old) : "r"(__new));
     83  1.1  matt 	return __SHIFTOUT(__old, FCSR_FFLAGS);
     84  1.1  matt }
     85  1.1  matt 
     86  1.1  matt static inline uint32_t
     87  1.1  matt riscvreg_fcsr_read_frm(void)
     88  1.1  matt {
     89  1.1  matt 	uint32_t __old;
     90  1.1  matt 	__asm("frrm\t%0" : "=r"(__old));
     91  1.1  matt 	return __SHIFTOUT(__old, FCSR_FRM);
     92  1.1  matt }
     93  1.1  matt 
     94  1.1  matt static inline uint32_t
     95  1.1  matt riscvreg_fcsr_write_frm(uint32_t __new)
     96  1.1  matt {
     97  1.1  matt 	uint32_t __old;
     98  1.1  matt 	__new = __SHIFTIN(__new, FCSR_FRM);
     99  1.1  matt 	__asm volatile("fsrm\t%0, %1" : "=r"(__old) : "r"(__new));
    100  1.1  matt 	return __SHIFTOUT(__old, FCSR_FRM);
    101  1.1  matt }
    102  1.1  matt 
    103  1.1  matt // Status Register
    104  1.1  matt #define SR_IP		__BITS(31,24)	// Pending interrupts
    105  1.1  matt #define SR_IM		__BITS(23,16)	// Interrupt Mask
    106  1.1  matt #define SR_VM		__BIT(7)	// MMU On
    107  1.1  matt #define SR_S64		__BIT(6)	// RV64 supervisor mode
    108  1.1  matt #define SR_U64		__BIT(5)	// RV64 user mode
    109  1.1  matt #define SR_EF		__BIT(4)	// Enable Floating Point
    110  1.1  matt #define SR_PEI		__BIT(3)	// Previous EI setting
    111  1.1  matt #define SR_EI		__BIT(2)	// Enable interrupts
    112  1.1  matt #define SR_PS		__BIT(1)	// Previous (S) supervisor setting
    113  1.1  matt #define SR_S		__BIT(0)	// Supervisor
    114  1.1  matt 
    115  1.1  matt #ifdef _LP64
    116  1.1  matt #define	SR_USER		(SR_EI|SR_U64|SR_S64|SR_VM|SR_IM)
    117  1.1  matt #define	SR_USER32	(SR_USER & ~SR_U64)
    118  1.1  matt #define	SR_KERNEL	(SR_S|SR_EI|SR_U64|SR_S64|SR_VM)
    119  1.1  matt #else
    120  1.1  matt #define	SR_USER		(SR_EI|SR_VM|SR_IM)
    121  1.1  matt #define	SR_KERNEL	(SR_S|SR_EI|SR_VM)
    122  1.1  matt #endif
    123  1.1  matt 
    124  1.1  matt static inline uint32_t
    125  1.1  matt riscvreg_status_read(void)
    126  1.1  matt {
    127  1.1  matt 	uint32_t __sr;
    128  1.1  matt 	__asm("csrr\t%0, status" : "=r"(__sr));
    129  1.1  matt 	return __sr;
    130  1.1  matt }
    131  1.1  matt 
    132  1.1  matt static inline uint32_t
    133  1.1  matt riscvreg_status_clear(uint32_t __mask)
    134  1.1  matt {
    135  1.1  matt 	uint32_t __sr;
    136  1.1  matt 	if (__builtin_constant_p(__mask) && __mask < 0x20) {
    137  1.1  matt 		__asm("csrrci\t%0, status, %1" : "=r"(__sr) : "i"(__mask));
    138  1.1  matt 	} else {
    139  1.1  matt 		__asm("csrrc\t%0, status, %1" : "=r"(__sr) : "r"(__mask));
    140  1.1  matt 	}
    141  1.1  matt 	return __sr;
    142  1.1  matt }
    143  1.1  matt 
    144  1.1  matt static inline uint32_t
    145  1.1  matt riscvreg_status_set(uint32_t __mask)
    146  1.1  matt {
    147  1.1  matt 	uint32_t __sr;
    148  1.1  matt 	if (__builtin_constant_p(__mask) && __mask < 0x20) {
    149  1.1  matt 		__asm("csrrsi\t%0, status, %1" : "=r"(__sr) : "i"(__mask));
    150  1.1  matt 	} else {
    151  1.1  matt 		__asm("csrrs\t%0, status, %1" : "=r"(__sr) : "r"(__mask));
    152  1.1  matt 	}
    153  1.1  matt 	return __sr;
    154  1.1  matt }
    155  1.1  matt 
    156  1.1  matt // Cause register
    157  1.1  matt #define CAUSE_MISALIGNED_FETCH		0
    158  1.1  matt #define CAUSE_FAULT_FETCH		1
    159  1.1  matt #define CAUSE_ILLEGAL_INSTRUCTION	2
    160  1.1  matt #define CAUSE_PRIVILEGED_INSTRUCTION	3
    161  1.1  matt #define CAUSE_FP_DISABLED		4
    162  1.1  matt #define CAUSE_SYSCALL			6
    163  1.1  matt #define CAUSE_BREAKPOINT		7
    164  1.1  matt #define CAUSE_MISALIGNED_LOAD		8
    165  1.1  matt #define CAUSE_MISALIGNED_STORE		9
    166  1.1  matt #define CAUSE_FAULT_LOAD		10
    167  1.1  matt #define CAUSE_FAULT_STORE		11
    168  1.1  matt #define CAUSE_ACCELERATOR_DISABLED	12
    169  1.1  matt 
    170  1.1  matt static inline uint64_t
    171  1.1  matt riscvreg_cycle_read(void)
    172  1.1  matt {
    173  1.1  matt #ifdef _LP64
    174  1.1  matt 	uint64_t __lo;
    175  1.1  matt 	__asm __volatile("csrr\t%0,cycle" : "=r"(__lo));
    176  1.1  matt 	return __lo;
    177  1.1  matt #else
    178  1.1  matt 	uint32_t __hi0, __hi1, __lo0;
    179  1.1  matt 	do {
    180  1.1  matt 		__asm __volatile(
    181  1.1  matt 			"csrr\t%[__hi0], cycleh"
    182  1.1  matt 		"\n\t"	"csrr\t%[__lo0], cycle"
    183  1.1  matt 		"\n\t"	"csrr\t%[__hi1], cycleh"
    184  1.1  matt 		   :	[__hi0] "=r"(__hi0),
    185  1.1  matt 			[__lo0] "=r"(__lo0),
    186  1.1  matt 			[__hi1] "=r"(__hi1));
    187  1.1  matt 	} while (__hi0 != __hi1);
    188  1.1  matt 	return ((uint64_t)__hi0 << 32) | (uint64_t)__lo0;
    189  1.1  matt #endif
    190  1.1  matt }
    191  1.1  matt 
    192  1.1  matt #endif /* _RISCV_SYSREG_H_ */
    193