armreg.h revision 1.8 1 1.8 ryo /* $NetBSD: armreg.h,v 1.8 2018/03/20 10:14:29 ryo Exp $ */
2 1.1 matt
3 1.1 matt /*-
4 1.1 matt * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.1 matt * by Matt Thomas of 3am Software Foundry.
9 1.1 matt *
10 1.1 matt * Redistribution and use in source and binary forms, with or without
11 1.1 matt * modification, are permitted provided that the following conditions
12 1.1 matt * are met:
13 1.1 matt * 1. Redistributions of source code must retain the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer.
15 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 matt * notice, this list of conditions and the following disclaimer in the
17 1.1 matt * documentation and/or other materials provided with the distribution.
18 1.1 matt *
19 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
30 1.1 matt */
31 1.1 matt
32 1.1 matt #ifndef _AARCH64_ARMREG_H_
33 1.1 matt #define _AARCH64_ARMREG_H_
34 1.1 matt
35 1.1 matt #ifdef __aarch64__
36 1.1 matt
37 1.8 ryo #include <arm/cputypes.h>
38 1.1 matt #include <sys/types.h>
39 1.1 matt
40 1.7 skrll #define AARCH64REG_READ_INLINE2(regname, regdesc) \
41 1.7 skrll static uint64_t inline \
42 1.7 skrll reg_##regname##_read(void) \
43 1.7 skrll { \
44 1.7 skrll uint64_t __rv; \
45 1.7 skrll __asm __volatile("mrs %0, " #regdesc : "=r"(__rv)); \
46 1.7 skrll return __rv; \
47 1.1 matt }
48 1.1 matt
49 1.7 skrll #define AARCH64REG_WRITE_INLINE2(regname, regdesc) \
50 1.7 skrll static void inline \
51 1.7 skrll reg_##regname##_write(uint64_t __val) \
52 1.7 skrll { \
53 1.7 skrll __asm __volatile("msr " #regdesc ", %0" :: "r"(__val)); \
54 1.1 matt }
55 1.1 matt
56 1.7 skrll #define AARCH64REG_WRITEIMM_INLINE2(regname, regdesc) \
57 1.7 skrll static void inline \
58 1.7 skrll reg_##regname##_write(uint64_t __val) \
59 1.7 skrll { \
60 1.7 skrll __asm __volatile("msr " #regdesc ", %0" :: "n"(__val)); \
61 1.1 matt }
62 1.1 matt
63 1.7 skrll #define AARCH64REG_READ_INLINE(regname) \
64 1.1 matt AARCH64REG_READ_INLINE2(regname, regname)
65 1.1 matt
66 1.7 skrll #define AARCH64REG_WRITE_INLINE(regname) \
67 1.1 matt AARCH64REG_WRITE_INLINE2(regname, regname)
68 1.1 matt
69 1.7 skrll #define AARCH64REG_WRITEIMM_INLINE(regname) \
70 1.1 matt AARCH64REG_WRITEIMM_INLINE2(regname, regname)
71 1.1 matt /*
72 1.1 matt * System registers available at EL0 (user)
73 1.1 matt */
74 1.1 matt AARCH64REG_READ_INLINE(ctr_el0) // Cache Type Register
75 1.1 matt
76 1.1 matt static const uintmax_t
77 1.1 matt CTR_EL0_CWG_LINE = __BITS(27,24), // Cacheback Writeback Granule
78 1.1 matt CTR_EL0_ERG_LINE = __BITS(23,20), // Exclusives Reservation Granule
79 1.1 matt CTR_EL0_DMIN_LINE = __BITS(19,16), // Dcache MIN LINE size (log2 - 2)
80 1.1 matt CTR_EL0_L1IP_MASK = __BITS(15,14),
81 1.1 matt CTR_EL0_L1IP_AIVIVT = 1, // ASID-tagged Virtual Index, Virtual Tag
82 1.1 matt CTR_EL0_L1IP_VIPT = 2, // Virtual Index, Physical Tag
83 1.1 matt CTR_EL0_L1IP_PIPT = 3, // Physical Index, Physical Tag
84 1.1 matt CTR_EL0_IMIN_LINE = __BITS(3,0); // Icache MIN LINE size (log2 - 2)
85 1.1 matt
86 1.1 matt AARCH64REG_READ_INLINE(dczid_el0) // Data Cache Zero ID Register
87 1.1 matt
88 1.1 matt static const uintmax_t
89 1.1 matt DCZID_DZP = __BIT(4), // Data Zero Prohibited
90 1.1 matt DCZID_BS = __BITS(3,0); // Block Size (log2 - 2)
91 1.1 matt
92 1.1 matt AARCH64REG_READ_INLINE(tpidrro_el0) // Thread Pointer ID Register (RO)
93 1.1 matt
94 1.1 matt AARCH64REG_READ_INLINE(fpcr) // Floating Point Control Register
95 1.1 matt AARCH64REG_WRITE_INLINE(fpcr)
96 1.1 matt
97 1.1 matt static const uintmax_t
98 1.1 matt FPCR_AHP = __BIT(26), // Alternative Half Precision
99 1.1 matt FPCR_DN = __BIT(25), // Default Nan Control
100 1.1 matt FPCR_FZ = __BIT(24), // Flush-To-Zero
101 1.1 matt FPCR_RMODE = __BITS(23,22),// Rounding Mode
102 1.1 matt FPCR_RN = 0, // Round Nearest
103 1.1 matt FPCR_RP = 1, // Round towards Plus infinity
104 1.1 matt FPCR_RM = 2, // Round towards Minus infinity
105 1.1 matt FPCR_RZ = 3, // Round towards Zero
106 1.1 matt FPCR_STRIDE = __BITS(21,20),
107 1.1 matt FPCR_LEN = __BITS(18,16),
108 1.1 matt FPCR_IDE = __BIT(15), // Input Denormal Exception enable
109 1.1 matt FPCR_IXE = __BIT(12), // IneXact Exception enable
110 1.1 matt FPCR_UFE = __BIT(11), // UnderFlow Exception enable
111 1.1 matt FPCR_OFE = __BIT(10), // OverFlow Exception enable
112 1.1 matt FPCR_DZE = __BIT(9), // Divide by Zero Exception enable
113 1.1 matt FPCR_IOE = __BIT(8), // Invalid Operation Exception enable
114 1.1 matt FPCR_ESUM = 0x1F00;
115 1.1 matt
116 1.1 matt AARCH64REG_READ_INLINE(fpsr) // Floating Point Status Register
117 1.1 matt AARCH64REG_WRITE_INLINE(fpsr)
118 1.1 matt
119 1.1 matt static const uintmax_t
120 1.1 matt FPSR_N32 = __BIT(31), // AARCH32 Negative
121 1.1 matt FPSR_Z32 = __BIT(30), // AARCH32 Zero
122 1.1 matt FPSR_C32 = __BIT(29), // AARCH32 Carry
123 1.1 matt FPSR_V32 = __BIT(28), // AARCH32 Overflow
124 1.1 matt FPSR_QC = __BIT(27), // SIMD Saturation
125 1.1 matt FPSR_IDC = __BIT(7), // Input Denormal Cumulative status
126 1.1 matt FPSR_IXC = __BIT(4), // IneXact Cumulative status
127 1.1 matt FPSR_UFC = __BIT(3), // UnderFlow Cumulative status
128 1.1 matt FPSR_OFC = __BIT(2), // OverFlow Cumulative status
129 1.1 matt FPSR_DZC = __BIT(1), // Divide by Zero Cumulative status
130 1.1 matt FPSR_IOC = __BIT(0), // Invalid Operation Cumulative status
131 1.1 matt FPSR_CSUM = 0x1F;
132 1.1 matt
133 1.1 matt AARCH64REG_READ_INLINE(nzcv) // condition codes
134 1.1 matt AARCH64REG_WRITE_INLINE(nzcv)
135 1.1 matt
136 1.1 matt static const uintmax_t
137 1.3 skrll NZCV_N = __BIT(31), // Negative
138 1.1 matt NZCV_Z = __BIT(30), // Zero
139 1.1 matt NZCV_C = __BIT(29), // Carry
140 1.1 matt NZCV_V = __BIT(28); // Overflow
141 1.1 matt
142 1.1 matt AARCH64REG_READ_INLINE(tpidr_el0) // Thread Pointer ID Register (RW)
143 1.1 matt AARCH64REG_WRITE_INLINE(tpidr_el0)
144 1.1 matt
145 1.3 skrll /*
146 1.1 matt * From here on, these can only be accessed at EL1 (kernel)
147 1.1 matt */
148 1.1 matt
149 1.1 matt /*
150 1.1 matt * These are readonly registers
151 1.1 matt */
152 1.1 matt AARCH64REG_READ_INLINE2(cbar_el1, s3_1_c15_c3_0) // Cortex-A57
153 1.1 matt
154 1.1 matt static const uintmax_t CBAR_PA = __BITS(47,18);
155 1.1 matt
156 1.1 matt AARCH64REG_READ_INLINE(clidr_el1)
157 1.1 matt AARCH64REG_READ_INLINE(ccsidr_el1)
158 1.1 matt AARCH64REG_READ_INLINE(id_afr0_el1)
159 1.1 matt AARCH64REG_READ_INLINE(id_adr0_el1)
160 1.1 matt AARCH64REG_READ_INLINE(id_isar0_el1)
161 1.1 matt AARCH64REG_READ_INLINE(id_isar1_el1)
162 1.1 matt AARCH64REG_READ_INLINE(id_isar2_el1)
163 1.1 matt AARCH64REG_READ_INLINE(id_isar3_el1)
164 1.1 matt AARCH64REG_READ_INLINE(id_isar4_el1)
165 1.1 matt AARCH64REG_READ_INLINE(id_isar5_el1)
166 1.1 matt AARCH64REG_READ_INLINE(id_mmfr0_el1)
167 1.1 matt AARCH64REG_READ_INLINE(id_mmfr1_el1)
168 1.1 matt AARCH64REG_READ_INLINE(id_mmfr2_el1)
169 1.1 matt AARCH64REG_READ_INLINE(id_mmfr3_el1)
170 1.1 matt AARCH64REG_READ_INLINE(id_prf0_el1)
171 1.1 matt AARCH64REG_READ_INLINE(id_prf1_el1)
172 1.1 matt AARCH64REG_READ_INLINE(isr_el1)
173 1.1 matt AARCH64REG_READ_INLINE(midr_el1)
174 1.1 matt AARCH64REG_READ_INLINE(mpidr_el1)
175 1.1 matt AARCH64REG_READ_INLINE(mvfr0_el1)
176 1.1 matt AARCH64REG_READ_INLINE(mvfr1_el1)
177 1.1 matt AARCH64REG_READ_INLINE(mvfr2_el1)
178 1.1 matt AARCH64REG_READ_INLINE(revidr_el1)
179 1.1 matt
180 1.1 matt /*
181 1.1 matt * These are read/write registers
182 1.1 matt */
183 1.1 matt AARCH64REG_READ_INLINE(ccselr_el1) // Cache Size Selection Register
184 1.1 matt AARCH64REG_WRITE_INLINE(ccselr_el1)
185 1.1 matt
186 1.1 matt AARCH64REG_READ_INLINE(cpacr_el1) // Coprocessor Access Control Regiser
187 1.1 matt AARCH64REG_WRITE_INLINE(cpacr_el1)
188 1.1 matt
189 1.1 matt static const uintmax_t
190 1.1 matt CPACR_TTA = __BIT(28), // System Register Access Traps
191 1.1 matt CPACR_FPEN = __BITS(21,20),
192 1.1 matt CPACR_FPEN_NONE = __SHIFTIN(0, CPACR_FPEN),
193 1.1 matt CPACR_FPEN_EL1 = __SHIFTIN(1, CPACR_FPEN),
194 1.1 matt CPACR_FPEN_NONE_2 = __SHIFTIN(2, CPACR_FPEN),
195 1.1 matt CPACR_FPEN_ALL = __SHIFTIN(3, CPACR_FPEN);
196 1.1 matt
197 1.1 matt AARCH64REG_READ_INLINE(elr_el1) // Exception Link Register
198 1.1 matt AARCH64REG_WRITE_INLINE(elr_el1)
199 1.1 matt
200 1.1 matt AARCH64REG_READ_INLINE(esr_el1) // Exception Symdrone Register
201 1.1 matt AARCH64REG_WRITE_INLINE(esr_el1)
202 1.1 matt
203 1.1 matt static const uintmax_t
204 1.1 matt ESR_EC = __BITS(31,26), // Exception Cause
205 1.6 skrll ESR_EC_UNKNOWN = 0x00, // AXX: Unknown Reason
206 1.6 skrll ESR_EC_WFX = 0x01, // AXX: WFI or WFE instruction execution
207 1.6 skrll ESR_EC_CP15_RT = 0x03, // A32: MCR/MRC access to CP15 !EC=0
208 1.6 skrll ESR_EC_CP15_RRT = 0x04, // A32: MCRR/MRRC access to CP15 !EC=0
209 1.6 skrll ESR_EC_CP14_RT = 0x05, // A32: MCR/MRC access to CP14
210 1.6 skrll ESR_EC_CP14_DT = 0x06, // A32: LDC/STC access to CP14
211 1.6 skrll ESR_EC_FP_ACCESS = 0x07, // AXX: Access to SIMD/FP Registers
212 1.6 skrll ESR_EC_FPID = 0x08, // A32: MCR/MRC access to CP10 !EC=7
213 1.6 skrll ESR_EC_CP14_RRT = 0x0c, // A32: MRRC access to CP14
214 1.6 skrll ESR_EC_ILL_STATE = 0x0e, // AXX: Illegal Execution State
215 1.6 skrll ESR_EC_SVC_A32 = 0x11, // A32: SVC Instruction Execution
216 1.6 skrll ESR_EC_HVC_A32 = 0x12, // A32: HVC Instruction Execution
217 1.6 skrll ESR_EC_SMC_A32 = 0x13, // A32: SMC Instruction Execution
218 1.6 skrll ESR_EC_SVC_A64 = 0x15, // A64: SVC Instruction Execution
219 1.6 skrll ESR_EC_HVC_A64 = 0x16, // A64: HVC Instruction Execution
220 1.6 skrll ESR_EC_SMC_A64 = 0x17, // A64: SMC Instruction Execution
221 1.6 skrll ESR_EC_SYS_REG = 0x18, // A64: MSR/MRS/SYS instruction (!EC0/1/7)
222 1.6 skrll ESR_EC_INSN_ABT_EL0 = 0x20, // AXX: Instruction Abort (EL0)
223 1.6 skrll ESR_EC_INSN_ABT_EL1 = 0x21, // AXX: Instruction Abort (EL1)
224 1.6 skrll ESR_EC_PC_ALIGNMENT = 0x22, // AXX: Misaligned PC
225 1.6 skrll ESR_EC_DATA_ABT_EL0 = 0x24, // AXX: Data Abort (EL0)
226 1.6 skrll ESR_EC_DATA_ABT_EL1 = 0x25, // AXX: Data Abort (EL1)
227 1.6 skrll ESR_EC_SP_ALIGNMENT = 0x26, // AXX: Misaligned SP
228 1.6 skrll ESR_EC_FP_TRAP_A32 = 0x28, // A32: FP Exception
229 1.6 skrll ESR_EC_FP_TRAP_A64 = 0x2c, // A64: FP Exception
230 1.6 skrll ESR_EC_SERROR = 0x2f, // AXX: SError Interrupt
231 1.6 skrll ESR_EC_BRKPNT_EL0 = 0x30, // AXX: Breakpoint Exception (EL0)
232 1.6 skrll ESR_EC_BRKPNT_EL1 = 0x31, // AXX: Breakpoint Exception (EL1)
233 1.6 skrll ESR_EC_SW_STEP_EL0 = 0x32, // AXX: Software Step (EL0)
234 1.6 skrll ESR_EC_SW_STEP_EL1 = 0x33, // AXX: Software Step (EL1)
235 1.6 skrll ESR_EC_WTCHPNT_EL0 = 0x34, // AXX: Watchpoint (EL0)
236 1.6 skrll ESR_EC_WTCHPNT_EL1 = 0x35, // AXX: Watchpoint (EL1)
237 1.6 skrll ESR_EC_BKPT_INSN_A32 = 0x38, // A32: BKPT Instruction Execution
238 1.6 skrll ESR_EC_VECTOR_CATCH = 0x3a, // A32: Vector Catch Exception
239 1.6 skrll ESR_EC_BKPT_INSN_A64 = 0x3c, // A64: BKPT Instruction Execution
240 1.1 matt ESR_IL = __BIT(25), // Instruction Length (1=32-bit)
241 1.1 matt ESR_ISS = __BITS(24,0); // Instruction Specific Syndrome
242 1.1 matt
243 1.1 matt
244 1.1 matt AARCH64REG_READ_INLINE(far_el1) // Fault Address Register
245 1.1 matt AARCH64REG_WRITE_INLINE(far_el1)
246 1.1 matt
247 1.1 matt AARCH64REG_READ_INLINE(mair_el1) // Main Id Register
248 1.1 matt AARCH64REG_WRITE_INLINE(mair_el1)
249 1.1 matt
250 1.1 matt AARCH64REG_READ_INLINE(par_el1) // Physical Address Register
251 1.1 matt AARCH64REG_WRITE_INLINE(par_el1)
252 1.1 matt
253 1.1 matt static const uintmax_t
254 1.1 matt PAR_ATTR = __BITS(63,56),// F=0 memory attributes
255 1.1 matt PAR_PA = __BITS(47,12),// F=0 physical address
256 1.1 matt PAR_NS = __BIT(9), // F=0 non-secure
257 1.1 matt PAR_S = __BIT(9), // F=1 failure stage
258 1.1 matt PAR_SHA = __BITS(8,7), // F=0 shareability attribute
259 1.1 matt PAR_SHA_NONE = 0,
260 1.1 matt PAR_SHA_OUTER = 2,
261 1.1 matt PAR_SHA_INNER = 3,
262 1.1 matt PAR_PTW = __BIT(8), // F=1 partial table walk
263 1.1 matt PAR_FST = __BITS(6,1), // F=1 fault status code
264 1.1 matt PAR_F = __BIT(0); // translation failed
265 1.1 matt
266 1.1 matt AARCH64REG_READ_INLINE(rmr_el1) // Reset Management Register
267 1.1 matt AARCH64REG_WRITE_INLINE(rmr_el1)
268 1.1 matt
269 1.1 matt AARCH64REG_READ_INLINE(rvbar_el1) // Reset Vector Base Address Register
270 1.1 matt AARCH64REG_WRITE_INLINE(rvbar_el1)
271 1.1 matt
272 1.2 skrll AARCH64REG_READ_INLINE(sctlr_el1) // System Control Register
273 1.2 skrll AARCH64REG_WRITE_INLINE(sctlr_el1)
274 1.1 matt
275 1.1 matt AARCH64REG_READ_INLINE(sp_el0) // Stack Pointer
276 1.1 matt AARCH64REG_WRITE_INLINE(sp_el0)
277 1.1 matt
278 1.1 matt AARCH64REG_READ_INLINE(daif) // Debug Async Irq Fiq mask register
279 1.1 matt AARCH64REG_WRITE_INLINE(daif)
280 1.1 matt AARCH64REG_WRITEIMM_INLINE(daifclr)
281 1.1 matt AARCH64REG_WRITEIMM_INLINE(daifset)
282 1.1 matt
283 1.1 matt static const uintmax_t
284 1.1 matt DAIF_D = __BIT(3), // Debug Exception Mask
285 1.1 matt DAIF_A = __BIT(2), // SError Abort Mask
286 1.1 matt DAIF_I = __BIT(1), // IRQ Mask
287 1.1 matt DAIF_F = __BIT(0); // FIQ Mask
288 1.1 matt
289 1.1 matt AARCH64REG_READ_INLINE(spsr_el1) // Saved Program Status Register
290 1.1 matt AARCH64REG_WRITE_INLINE(spsr_el1)
291 1.1 matt
292 1.1 matt static const uintmax_t
293 1.1 matt SPSR_NZCV = __BITS(31,28), // mask of N Z C V
294 1.3 skrll SPSR_N = __BIT(31), // Negative
295 1.1 matt SPSR_Z = __BIT(30), // Zero
296 1.1 matt SPSR_C = __BIT(29), // Carry
297 1.1 matt SPSR_V = __BIT(28), // oVerflow
298 1.1 matt SPSR_A32_Q = __BIT(27), // A32: Overflow
299 1.1 matt SPSR_A32_J = __BIT(24), // A32: Jazelle Mode
300 1.1 matt SPSR_A32_IT1 = __BIT(23), // A32: IT[1]
301 1.1 matt SPSR_A32_IT0 = __BIT(22), // A32: IT[0]
302 1.1 matt SPSR_SS = __BIT(21), // Software Step
303 1.1 matt SPSR_IL = __BIT(20), // Instruction Length
304 1.1 matt SPSR_GE = __BITS(19,16), // A32: SIMD GE
305 1.1 matt SPSR_IT7 = __BIT(15), // A32: IT[7]
306 1.1 matt SPSR_IT6 = __BIT(14), // A32: IT[6]
307 1.1 matt SPSR_IT5 = __BIT(13), // A32: IT[5]
308 1.1 matt SPSR_IT4 = __BIT(12), // A32: IT[4]
309 1.1 matt SPSR_IT3 = __BIT(11), // A32: IT[3]
310 1.1 matt SPSR_IT2 = __BIT(10), // A32: IT[2]
311 1.1 matt SPSR_A64_D = __BIT(9), // A64: Debug Exception Mask
312 1.1 matt SPSR_A32_E = __BIT(9), // A32: BE Endian Mode
313 1.1 matt SPSR_A = __BIT(8), // Async abort (SError) Mask
314 1.1 matt SPSR_I = __BIT(7), // IRQ Mask
315 1.1 matt SPSR_F = __BIT(6), // FIQ Mask
316 1.1 matt SPSR_A32_T = __BIT(5), // A32 Thumb Mode
317 1.1 matt SPSR_M = __BITS(4,0), // Execution State
318 1.1 matt SPSR_M_EL3H = 0x0d,
319 1.1 matt SPSR_M_EL3T = 0x0c,
320 1.1 matt SPSR_M_EL2H = 0x09,
321 1.1 matt SPSR_M_EL2T = 0x08,
322 1.1 matt SPSR_M_EL1H = 0x05,
323 1.1 matt SPSR_M_EL1T = 0x04,
324 1.1 matt SPSR_M_EL0T = 0x00,
325 1.1 matt SPSR_M_SYS32 = 0x1f,
326 1.1 matt SPSR_M_UND32 = 0x1b,
327 1.1 matt SPSR_M_ABT32 = 0x17,
328 1.1 matt SPSR_M_SVC32 = 0x13,
329 1.1 matt SPSR_M_IRQ32 = 0x12,
330 1.1 matt SPSR_M_FIQ32 = 0x11,
331 1.1 matt SPSR_M_USR32 = 0x10;
332 1.1 matt
333 1.1 matt AARCH64REG_READ_INLINE(tcr_el1) // Translation Control Register
334 1.1 matt AARCH64REG_WRITE_INLINE(tcr_el1)
335 1.1 matt
336 1.1 matt static const uintmax_t
337 1.1 matt TCR_TBI1 = __BIT(38), // ignore Top Byte for TTBR1_EL1
338 1.1 matt TCR_TBI0 = __BIT(37), // ignore Top Byte for TTBR0_EL1
339 1.1 matt TCR_AS64K = __BIT(36), // Use 64K ASIDs
340 1.1 matt TCR_IPS = __BITS(34,32), // Intermediate Phys Addr Size
341 1.1 matt TCR_IPS_256TB = 5, // 48 bits (256 TB)
342 1.1 matt TCR_IPS_64TB = 4, // 44 bits (16 TB)
343 1.1 matt TCR_IPS_4TB = 3, // 42 bits ( 4 TB)
344 1.1 matt TCR_IPS_1TB = 2, // 40 bits ( 1 TB)
345 1.1 matt TCR_IPS_64GB = 1, // 36 bits (64 GB)
346 1.1 matt TCR_IPS_4GB = 0, // 32 bits (4 GB)
347 1.1 matt TCR_TG1 = __BITS(31,30), // Page Granule Size
348 1.1 matt TCR_TG_4KB = 1, // 4KB page size
349 1.1 matt TCR_TG_16KB = 2, // 16KB page size
350 1.1 matt TCR_TG_64KB = 3, // 64KB page size
351 1.1 matt TCR_SH1 = __BITS(29,28),
352 1.1 matt TCR_SH_NONE = 0,
353 1.1 matt TCR_SH_OUTER = 1,
354 1.1 matt TCR_SH_INNER = 2,
355 1.1 matt TCR_ORGN1 = __BITS(27,26),
356 1.1 matt TCR_XRGN_NC = 0, // Non Cacheable
357 1.1 matt TCR_XRGN_WB_WA = 1, // WriteBack WriteAllocate
358 1.1 matt TCR_XRGN_WT = 2, // WriteThrough
359 1.1 matt TCR_XRGN_WB = 3, // WriteBack
360 1.1 matt TCR_IRGN1 = __BITS(25,24),
361 1.1 matt TCR_EPD1 = __BIT(23), // Walk Disable for TTBR1_EL1
362 1.1 matt TCR_A1 = __BIT(22), // ASID is in TTBR1_EL1
363 1.1 matt TCR_T1SZ = __BITS(21,16), // Size offset for TTBR1_EL1
364 1.1 matt TCR_TG0 = __BITS(15,14),
365 1.1 matt TCR_SH0 = __BITS(13,12),
366 1.1 matt TCR_ORGN0 = __BITS(11,10),
367 1.1 matt TCR_IRGN0 = __BITS(9,8),
368 1.1 matt TCR_EPD0 = __BIT(7), // Walk Disable for TTBR0
369 1.1 matt TCR_T0SZ = __BITS(5,0); // Size offset for TTBR0_EL1
370 1.1 matt
371 1.1 matt #define TCR_PAGE_SIZE1(tcr) (1L << (__SHIFTOUT(tcr, TCR_TG1) * 2 + 10))
372 1.1 matt
373 1.1 matt AARCH64REG_READ_INLINE(tpidr_el1) // Thread ID Register (EL1)
374 1.1 matt AARCH64REG_WRITE_INLINE(tpidr_el1)
375 1.1 matt
376 1.1 matt AARCH64REG_WRITE_INLINE(tpidrro_el0) // Thread ID Register (RO for EL0)
377 1.1 matt
378 1.1 matt AARCH64REG_READ_INLINE(ttbr0_el0) // Translation Table Base Register 0 EL0
379 1.1 matt AARCH64REG_WRITE_INLINE(ttbr0_el0)
380 1.1 matt
381 1.1 matt AARCH64REG_READ_INLINE(ttbr0_el1) // Translation Table Base Register 0 EL0
382 1.1 matt AARCH64REG_WRITE_INLINE(ttbr0_el1)
383 1.1 matt
384 1.1 matt AARCH64REG_READ_INLINE(ttbr1_el1) // Translation Table Base Register 1 EL1
385 1.1 matt AARCH64REG_WRITE_INLINE(ttbr1_el1)
386 1.1 matt
387 1.1 matt static const uint64_t
388 1.1 matt TTBR_ASID = __BITS(63, 48),
389 1.1 matt TTBR_BADDR = __BITS(47, 0);
390 1.1 matt
391 1.1 matt AARCH64REG_READ_INLINE(vbar_el1) // Vector Base Address Register
392 1.1 matt AARCH64REG_WRITE_INLINE(vbar_el1)
393 1.1 matt
394 1.1 matt AARCH64REG_READ_INLINE(pmccfiltr_el0)
395 1.1 matt AARCH64REG_WRITE_INLINE(pmccfiltr_el0)
396 1.1 matt
397 1.1 matt static const uintmax_t
398 1.1 matt PMCCFILTR_P = __BIT(31), // Don't count cycles in EL1
399 1.1 matt PMCCFILTR_U = __BIT(30), // Don't count cycles in EL0
400 1.1 matt PMCCFILTR_NSK = __BIT(29), // Don't count cycles in NS EL1
401 1.1 matt PMCCFILTR_NSU = __BIT(28), // Don't count cycles in NS EL0
402 1.1 matt PMCCFILTR_NSH = __BIT(27), // Don't count cycles in NS EL2
403 1.1 matt PMCCFILTR_M = __BIT(26); // Don't count cycles in EL3
404 1.1 matt
405 1.1 matt AARCH64REG_READ_INLINE(pmccntr_el0)
406 1.1 matt
407 1.1 matt AARCH64REG_READ_INLINE(cntfrq_el0)
408 1.1 matt
409 1.1 matt AARCH64REG_READ_INLINE(cntkctl_el1)
410 1.1 matt AARCH64REG_WRITE_INLINE(cntkctl_el1)
411 1.1 matt
412 1.1 matt static const uintmax_t
413 1.1 matt CNTKCTL_EL0PTEN = __BIT(9), // EL0 access for CNTP CVAL/TVAL/CTL
414 1.1 matt CNTKCTL_EL0VTEN = __BIT(8), // EL0 access for CNTV CVAL/TVAL/CTL
415 1.1 matt CNTKCTL_ELNTI = __BITS(7,4),
416 1.1 matt CNTKCTL_EVNTDIR = __BIT(3),
417 1.1 matt CNTKCTL_EVNTEN = __BIT(2),
418 1.1 matt CNTKCTL_EL0VCTEN = __BIT(1), // EL0 access for CNTVCT and CNTFRQ
419 1.1 matt CNTKCTL_EL0PCTEN = __BIT(0); // EL0 access for CNTPCT and CNTFRQ
420 1.1 matt
421 1.1 matt AARCH64REG_READ_INLINE(cntp_ctl_el0)
422 1.1 matt AARCH64REG_WRITE_INLINE(cntp_ctl_el0)
423 1.1 matt AARCH64REG_READ_INLINE(cntp_cval_el0)
424 1.1 matt AARCH64REG_WRITE_INLINE(cntp_cval_el0)
425 1.1 matt AARCH64REG_READ_INLINE(cntp_tval_el0)
426 1.1 matt AARCH64REG_WRITE_INLINE(cntp_tval_el0)
427 1.1 matt AARCH64REG_READ_INLINE(cntpct_el0)
428 1.1 matt AARCH64REG_WRITE_INLINE(cntpct_el0)
429 1.1 matt
430 1.1 matt AARCH64REG_READ_INLINE(cntps_ctl_el1)
431 1.1 matt AARCH64REG_WRITE_INLINE(cntps_ctl_el1)
432 1.1 matt AARCH64REG_READ_INLINE(cntps_cval_el1)
433 1.1 matt AARCH64REG_WRITE_INLINE(cntps_cval_el1)
434 1.1 matt AARCH64REG_READ_INLINE(cntps_tval_el1)
435 1.1 matt AARCH64REG_WRITE_INLINE(cntps_tval_el1)
436 1.1 matt
437 1.1 matt AARCH64REG_READ_INLINE(cntv_ctl_el0)
438 1.1 matt AARCH64REG_WRITE_INLINE(cntv_ctl_el0)
439 1.1 matt AARCH64REG_READ_INLINE(cntv_cval_el0)
440 1.1 matt AARCH64REG_WRITE_INLINE(cntv_cval_el0)
441 1.1 matt AARCH64REG_READ_INLINE(cntv_tval_el0)
442 1.1 matt AARCH64REG_WRITE_INLINE(cntv_tval_el0)
443 1.1 matt AARCH64REG_READ_INLINE(cntvct_el0)
444 1.1 matt AARCH64REG_WRITE_INLINE(cntvct_el0)
445 1.1 matt
446 1.1 matt static const uintmax_t
447 1.1 matt CNTCTL_ISTATUS = __BIT(2), // Interrupt Asserted
448 1.1 matt CNTCTL_IMASK = __BIT(1), // Timer Interrupt is Masked
449 1.1 matt CNTCTL_ENABLE = __BIT(0); // Timer Enabled
450 1.1 matt
451 1.1 matt #elif defined(__arm__)
452 1.1 matt
453 1.1 matt #include <arm/armreg.h>
454 1.1 matt
455 1.1 matt #endif /* __aarch64__/__arm__ */
456 1.1 matt
457 1.1 matt #endif /* _AARCH64_ARMREG_H_ */
458