armreg.h revision 1.2 1 1.2 skrll /* $NetBSD: armreg.h,v 1.2 2015/04/27 06:54:12 skrll Exp $ */
2 1.1 matt
3 1.1 matt /*-
4 1.1 matt * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.1 matt * by Matt Thomas of 3am Software Foundry.
9 1.1 matt *
10 1.1 matt * Redistribution and use in source and binary forms, with or without
11 1.1 matt * modification, are permitted provided that the following conditions
12 1.1 matt * are met:
13 1.1 matt * 1. Redistributions of source code must retain the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer.
15 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 matt * notice, this list of conditions and the following disclaimer in the
17 1.1 matt * documentation and/or other materials provided with the distribution.
18 1.1 matt *
19 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
30 1.1 matt */
31 1.1 matt
32 1.1 matt #ifndef _AARCH64_ARMREG_H_
33 1.1 matt #define _AARCH64_ARMREG_H_
34 1.1 matt
35 1.1 matt #ifdef __aarch64__
36 1.1 matt
37 1.1 matt #include <sys/types.h>
38 1.1 matt
39 1.1 matt #define AARCH64REG_READ_INLINE2(regname, regdesc) \
40 1.1 matt static uint64_t inline \
41 1.1 matt reg_##regname##_read(void) \
42 1.1 matt { \
43 1.1 matt uint64_t __rv; \
44 1.1 matt __asm("mrs %0, " #regdesc : "=r"(__rv)); \
45 1.1 matt return __rv; \
46 1.1 matt }
47 1.1 matt
48 1.1 matt #define AARCH64REG_WRITE_INLINE2(regname, regdesc) \
49 1.1 matt static void inline \
50 1.1 matt reg_##regname##_write(uint64_t __val) \
51 1.1 matt { \
52 1.1 matt __asm("msr " #regdesc ", %0" :: "r"(__val)); \
53 1.1 matt }
54 1.1 matt
55 1.1 matt #define AARCH64REG_WRITEIMM_INLINE2(regname, regdesc) \
56 1.1 matt static void inline \
57 1.1 matt reg_##regname##_write(uint64_t __val) \
58 1.1 matt { \
59 1.1 matt __asm("msr " #regdesc ", %0" :: "n"(__val)); \
60 1.1 matt }
61 1.1 matt
62 1.1 matt #define AARCH64REG_READ_INLINE(regname) \
63 1.1 matt AARCH64REG_READ_INLINE2(regname, regname)
64 1.1 matt
65 1.1 matt #define AARCH64REG_WRITE_INLINE(regname) \
66 1.1 matt AARCH64REG_WRITE_INLINE2(regname, regname)
67 1.1 matt
68 1.1 matt #define AARCH64REG_WRITEIMM_INLINE(regname) \
69 1.1 matt AARCH64REG_WRITEIMM_INLINE2(regname, regname)
70 1.1 matt /*
71 1.1 matt * System registers available at EL0 (user)
72 1.1 matt */
73 1.1 matt AARCH64REG_READ_INLINE(ctr_el0) // Cache Type Register
74 1.1 matt
75 1.1 matt static const uintmax_t
76 1.1 matt CTR_EL0_CWG_LINE = __BITS(27,24), // Cacheback Writeback Granule
77 1.1 matt CTR_EL0_ERG_LINE = __BITS(23,20), // Exclusives Reservation Granule
78 1.1 matt CTR_EL0_DMIN_LINE = __BITS(19,16), // Dcache MIN LINE size (log2 - 2)
79 1.1 matt CTR_EL0_L1IP_MASK = __BITS(15,14),
80 1.1 matt CTR_EL0_L1IP_AIVIVT = 1, // ASID-tagged Virtual Index, Virtual Tag
81 1.1 matt CTR_EL0_L1IP_VIPT = 2, // Virtual Index, Physical Tag
82 1.1 matt CTR_EL0_L1IP_PIPT = 3, // Physical Index, Physical Tag
83 1.1 matt CTR_EL0_IMIN_LINE = __BITS(3,0); // Icache MIN LINE size (log2 - 2)
84 1.1 matt
85 1.1 matt AARCH64REG_READ_INLINE(dczid_el0) // Data Cache Zero ID Register
86 1.1 matt
87 1.1 matt static const uintmax_t
88 1.1 matt DCZID_DZP = __BIT(4), // Data Zero Prohibited
89 1.1 matt DCZID_BS = __BITS(3,0); // Block Size (log2 - 2)
90 1.1 matt
91 1.1 matt AARCH64REG_READ_INLINE(tpidrro_el0) // Thread Pointer ID Register (RO)
92 1.1 matt
93 1.1 matt AARCH64REG_READ_INLINE(fpcr) // Floating Point Control Register
94 1.1 matt AARCH64REG_WRITE_INLINE(fpcr)
95 1.1 matt
96 1.1 matt static const uintmax_t
97 1.1 matt FPCR_AHP = __BIT(26), // Alternative Half Precision
98 1.1 matt FPCR_DN = __BIT(25), // Default Nan Control
99 1.1 matt FPCR_FZ = __BIT(24), // Flush-To-Zero
100 1.1 matt FPCR_RMODE = __BITS(23,22),// Rounding Mode
101 1.1 matt FPCR_RN = 0, // Round Nearest
102 1.1 matt FPCR_RP = 1, // Round towards Plus infinity
103 1.1 matt FPCR_RM = 2, // Round towards Minus infinity
104 1.1 matt FPCR_RZ = 3, // Round towards Zero
105 1.1 matt FPCR_STRIDE = __BITS(21,20),
106 1.1 matt FPCR_LEN = __BITS(18,16),
107 1.1 matt FPCR_IDE = __BIT(15), // Input Denormal Exception enable
108 1.1 matt FPCR_IXE = __BIT(12), // IneXact Exception enable
109 1.1 matt FPCR_UFE = __BIT(11), // UnderFlow Exception enable
110 1.1 matt FPCR_OFE = __BIT(10), // OverFlow Exception enable
111 1.1 matt FPCR_DZE = __BIT(9), // Divide by Zero Exception enable
112 1.1 matt FPCR_IOE = __BIT(8), // Invalid Operation Exception enable
113 1.1 matt FPCR_ESUM = 0x1F00;
114 1.1 matt
115 1.1 matt AARCH64REG_READ_INLINE(fpsr) // Floating Point Status Register
116 1.1 matt AARCH64REG_WRITE_INLINE(fpsr)
117 1.1 matt
118 1.1 matt static const uintmax_t
119 1.1 matt FPSR_N32 = __BIT(31), // AARCH32 Negative
120 1.1 matt FPSR_Z32 = __BIT(30), // AARCH32 Zero
121 1.1 matt FPSR_C32 = __BIT(29), // AARCH32 Carry
122 1.1 matt FPSR_V32 = __BIT(28), // AARCH32 Overflow
123 1.1 matt FPSR_QC = __BIT(27), // SIMD Saturation
124 1.1 matt FPSR_IDC = __BIT(7), // Input Denormal Cumulative status
125 1.1 matt FPSR_IXC = __BIT(4), // IneXact Cumulative status
126 1.1 matt FPSR_UFC = __BIT(3), // UnderFlow Cumulative status
127 1.1 matt FPSR_OFC = __BIT(2), // OverFlow Cumulative status
128 1.1 matt FPSR_DZC = __BIT(1), // Divide by Zero Cumulative status
129 1.1 matt FPSR_IOC = __BIT(0), // Invalid Operation Cumulative status
130 1.1 matt FPSR_CSUM = 0x1F;
131 1.1 matt
132 1.1 matt AARCH64REG_READ_INLINE(nzcv) // condition codes
133 1.1 matt AARCH64REG_WRITE_INLINE(nzcv)
134 1.1 matt
135 1.1 matt static const uintmax_t
136 1.1 matt NZCV_N = __BIT(31), // Negative
137 1.1 matt NZCV_Z = __BIT(30), // Zero
138 1.1 matt NZCV_C = __BIT(29), // Carry
139 1.1 matt NZCV_V = __BIT(28); // Overflow
140 1.1 matt
141 1.1 matt AARCH64REG_READ_INLINE(tpidr_el0) // Thread Pointer ID Register (RW)
142 1.1 matt AARCH64REG_WRITE_INLINE(tpidr_el0)
143 1.1 matt
144 1.1 matt /*
145 1.1 matt * From here on, these can only be accessed at EL1 (kernel)
146 1.1 matt */
147 1.1 matt
148 1.1 matt /*
149 1.1 matt * These are readonly registers
150 1.1 matt */
151 1.1 matt AARCH64REG_READ_INLINE2(cbar_el1, s3_1_c15_c3_0) // Cortex-A57
152 1.1 matt
153 1.1 matt static const uintmax_t CBAR_PA = __BITS(47,18);
154 1.1 matt
155 1.1 matt AARCH64REG_READ_INLINE(clidr_el1)
156 1.1 matt AARCH64REG_READ_INLINE(ccsidr_el1)
157 1.1 matt AARCH64REG_READ_INLINE(id_afr0_el1)
158 1.1 matt AARCH64REG_READ_INLINE(id_adr0_el1)
159 1.1 matt AARCH64REG_READ_INLINE(id_isar0_el1)
160 1.1 matt AARCH64REG_READ_INLINE(id_isar1_el1)
161 1.1 matt AARCH64REG_READ_INLINE(id_isar2_el1)
162 1.1 matt AARCH64REG_READ_INLINE(id_isar3_el1)
163 1.1 matt AARCH64REG_READ_INLINE(id_isar4_el1)
164 1.1 matt AARCH64REG_READ_INLINE(id_isar5_el1)
165 1.1 matt AARCH64REG_READ_INLINE(id_mmfr0_el1)
166 1.1 matt AARCH64REG_READ_INLINE(id_mmfr1_el1)
167 1.1 matt AARCH64REG_READ_INLINE(id_mmfr2_el1)
168 1.1 matt AARCH64REG_READ_INLINE(id_mmfr3_el1)
169 1.1 matt AARCH64REG_READ_INLINE(id_prf0_el1)
170 1.1 matt AARCH64REG_READ_INLINE(id_prf1_el1)
171 1.1 matt AARCH64REG_READ_INLINE(isr_el1)
172 1.1 matt AARCH64REG_READ_INLINE(midr_el1)
173 1.1 matt AARCH64REG_READ_INLINE(mpidr_el1)
174 1.1 matt AARCH64REG_READ_INLINE(mvfr0_el1)
175 1.1 matt AARCH64REG_READ_INLINE(mvfr1_el1)
176 1.1 matt AARCH64REG_READ_INLINE(mvfr2_el1)
177 1.1 matt AARCH64REG_READ_INLINE(revidr_el1)
178 1.1 matt
179 1.1 matt /*
180 1.1 matt * These are read/write registers
181 1.1 matt */
182 1.1 matt AARCH64REG_READ_INLINE(ccselr_el1) // Cache Size Selection Register
183 1.1 matt AARCH64REG_WRITE_INLINE(ccselr_el1)
184 1.1 matt
185 1.1 matt AARCH64REG_READ_INLINE(cpacr_el1) // Coprocessor Access Control Regiser
186 1.1 matt AARCH64REG_WRITE_INLINE(cpacr_el1)
187 1.1 matt
188 1.1 matt static const uintmax_t
189 1.1 matt CPACR_TTA = __BIT(28), // System Register Access Traps
190 1.1 matt CPACR_FPEN = __BITS(21,20),
191 1.1 matt CPACR_FPEN_NONE = __SHIFTIN(0, CPACR_FPEN),
192 1.1 matt CPACR_FPEN_EL1 = __SHIFTIN(1, CPACR_FPEN),
193 1.1 matt CPACR_FPEN_NONE_2 = __SHIFTIN(2, CPACR_FPEN),
194 1.1 matt CPACR_FPEN_ALL = __SHIFTIN(3, CPACR_FPEN);
195 1.1 matt
196 1.1 matt AARCH64REG_READ_INLINE(elr_el1) // Exception Link Register
197 1.1 matt AARCH64REG_WRITE_INLINE(elr_el1)
198 1.1 matt
199 1.1 matt AARCH64REG_READ_INLINE(esr_el1) // Exception Symdrone Register
200 1.1 matt AARCH64REG_WRITE_INLINE(esr_el1)
201 1.1 matt
202 1.1 matt static const uintmax_t
203 1.1 matt ESR_EC = __BITS(31,26), // Exception Cause
204 1.1 matt ESR_EC_UNKOWN = 0, // AXX: Unknown Reason
205 1.1 matt ESR_EC_WFX = 1, // AXX: WFI or WFE instruction execution
206 1.1 matt ESR_EC_CP15_RT = 3, // A32: MCR/MRC access to CP15 !EC=0
207 1.1 matt ESR_EC_CP15_RRT = 4, // A32: MCRR/MRRC access to CP15 !EC=0
208 1.1 matt ESR_EC_CP14_RT = 5, // A32: MCR/MRC access to CP14
209 1.1 matt ESR_EC_CP14_DT = 6, // A32: LDC/STC access to CP14
210 1.1 matt ESR_EC_FP_ACCCES = 7, // AXX: Access to SIMD/FP Registers
211 1.1 matt ESR_EC_FPID = 8, // A32: MCR/MRC access to CP10 !EC=7
212 1.1 matt ESR_EC_CP14_RRT = 12, // A32: MRRC access to CP14
213 1.1 matt ESR_EC_ILL_STATE = 14, // AXX: Illegal Execution State
214 1.1 matt ESR_EC_SVC_A32 = 17, // A32: SVC Instruction Execution
215 1.1 matt ESR_EC_HVC_A32 = 18, // A32: HVC Instruction Execution
216 1.1 matt ESR_EC_SMC_A32 = 19, // A32: SMC Instruction Execution
217 1.1 matt ESR_EC_SVC_A64 = 21, // A64: SVC Instruction Execution
218 1.1 matt ESR_EC_HVC_A64 = 22, // A64: HVC Instruction Execution
219 1.1 matt ESR_EC_SMC_A64 = 23, // A64: SMC Instruction Execution
220 1.1 matt ESR_EC_SYS_REG = 24, // A64: MSR/MRS/SYS instruction (!EC0/1/7)
221 1.1 matt ESR_EC_INSN_ABT_EL0 = 32, // AXX: Instruction Abort (EL0)
222 1.1 matt ESR_EC_INSN_ABT_EL1 = 33, // AXX: Instruction Abort (EL1)
223 1.1 matt ESR_EC_PC_ALIGNMENT = 34, // AXX: Misaligned PC
224 1.1 matt ESR_EC_DATA_ABT_EL0 = 36, // AXX: Data Abort (EL0)
225 1.1 matt ESR_EC_DATA_ABT_EL1 = 37, // AXX: Data Abort (EL1)
226 1.1 matt ESR_EC_SP_ALIGNMENT = 38, // AXX: Misaligned SP
227 1.1 matt ESR_EC_FP_TRAP_A32 = 40, // A32: FP Exception
228 1.1 matt ESR_EC_FP_TRAP_A64 = 44, // A64: FP Exception
229 1.1 matt ESR_EC_SERROR = 47, // AXX: SError Interrupt
230 1.1 matt ESR_EC_BRKPNT_EL0 = 48, // AXX: Breakpoint Exception (EL0)
231 1.1 matt ESR_EC_BRKPNT_EL1 = 49, // AXX: Breakpoint Exception (EL1)
232 1.1 matt ESR_EC_SW_STEP_EL0 = 50, // AXX: Software Step (EL0)
233 1.1 matt ESR_EC_SW_STEP_EL1 = 51, // AXX: Software Step (EL1)
234 1.1 matt ESR_EC_WTCHPNT_EL0 = 52, // AXX: Watchpoint (EL0)
235 1.1 matt ESR_EC_WTCHPNT_EL1 = 53, // AXX: Watchpoint (EL1)
236 1.1 matt ESR_EC_BKPT_INSN_A32 = 56, // A32: BKPT Instruction Execution
237 1.1 matt ESR_EC_VECTOR_CATCH = 58, // A32: Vector Catch Exception
238 1.1 matt ESR_EC_BKPT_INSN_A64 = 60, // A64: BKPT Instruction Execution
239 1.1 matt ESR_IL = __BIT(25), // Instruction Length (1=32-bit)
240 1.1 matt ESR_ISS = __BITS(24,0); // Instruction Specific Syndrome
241 1.1 matt
242 1.1 matt
243 1.1 matt AARCH64REG_READ_INLINE(far_el1) // Fault Address Register
244 1.1 matt AARCH64REG_WRITE_INLINE(far_el1)
245 1.1 matt
246 1.1 matt AARCH64REG_READ_INLINE(mair_el1) // Main Id Register
247 1.1 matt AARCH64REG_WRITE_INLINE(mair_el1)
248 1.1 matt
249 1.1 matt AARCH64REG_READ_INLINE(par_el1) // Physical Address Register
250 1.1 matt AARCH64REG_WRITE_INLINE(par_el1)
251 1.1 matt
252 1.1 matt static const uintmax_t
253 1.1 matt PAR_ATTR = __BITS(63,56),// F=0 memory attributes
254 1.1 matt PAR_PA = __BITS(47,12),// F=0 physical address
255 1.1 matt PAR_NS = __BIT(9), // F=0 non-secure
256 1.1 matt PAR_S = __BIT(9), // F=1 failure stage
257 1.1 matt PAR_SHA = __BITS(8,7), // F=0 shareability attribute
258 1.1 matt PAR_SHA_NONE = 0,
259 1.1 matt PAR_SHA_OUTER = 2,
260 1.1 matt PAR_SHA_INNER = 3,
261 1.1 matt PAR_PTW = __BIT(8), // F=1 partial table walk
262 1.1 matt PAR_FST = __BITS(6,1), // F=1 fault status code
263 1.1 matt PAR_F = __BIT(0); // translation failed
264 1.1 matt
265 1.1 matt AARCH64REG_READ_INLINE(rmr_el1) // Reset Management Register
266 1.1 matt AARCH64REG_WRITE_INLINE(rmr_el1)
267 1.1 matt
268 1.1 matt AARCH64REG_READ_INLINE(rvbar_el1) // Reset Vector Base Address Register
269 1.1 matt AARCH64REG_WRITE_INLINE(rvbar_el1)
270 1.1 matt
271 1.2 skrll AARCH64REG_READ_INLINE(sctlr_el1) // System Control Register
272 1.2 skrll AARCH64REG_WRITE_INLINE(sctlr_el1)
273 1.1 matt
274 1.1 matt AARCH64REG_READ_INLINE(sp_el0) // Stack Pointer
275 1.1 matt AARCH64REG_WRITE_INLINE(sp_el0)
276 1.1 matt
277 1.1 matt AARCH64REG_READ_INLINE(daif) // Debug Async Irq Fiq mask register
278 1.1 matt AARCH64REG_WRITE_INLINE(daif)
279 1.1 matt AARCH64REG_WRITEIMM_INLINE(daifclr)
280 1.1 matt AARCH64REG_WRITEIMM_INLINE(daifset)
281 1.1 matt
282 1.1 matt static const uintmax_t
283 1.1 matt DAIF_D = __BIT(3), // Debug Exception Mask
284 1.1 matt DAIF_A = __BIT(2), // SError Abort Mask
285 1.1 matt DAIF_I = __BIT(1), // IRQ Mask
286 1.1 matt DAIF_F = __BIT(0); // FIQ Mask
287 1.1 matt
288 1.1 matt AARCH64REG_READ_INLINE(spsr_el1) // Saved Program Status Register
289 1.1 matt AARCH64REG_WRITE_INLINE(spsr_el1)
290 1.1 matt
291 1.1 matt static const uintmax_t
292 1.1 matt SPSR_NZCV = __BITS(31,28), // mask of N Z C V
293 1.1 matt SPSR_N = __BIT(31), // Negative
294 1.1 matt SPSR_Z = __BIT(30), // Zero
295 1.1 matt SPSR_C = __BIT(29), // Carry
296 1.1 matt SPSR_V = __BIT(28), // oVerflow
297 1.1 matt SPSR_A32_Q = __BIT(27), // A32: Overflow
298 1.1 matt SPSR_A32_J = __BIT(24), // A32: Jazelle Mode
299 1.1 matt SPSR_A32_IT1 = __BIT(23), // A32: IT[1]
300 1.1 matt SPSR_A32_IT0 = __BIT(22), // A32: IT[0]
301 1.1 matt SPSR_SS = __BIT(21), // Software Step
302 1.1 matt SPSR_IL = __BIT(20), // Instruction Length
303 1.1 matt SPSR_GE = __BITS(19,16), // A32: SIMD GE
304 1.1 matt SPSR_IT7 = __BIT(15), // A32: IT[7]
305 1.1 matt SPSR_IT6 = __BIT(14), // A32: IT[6]
306 1.1 matt SPSR_IT5 = __BIT(13), // A32: IT[5]
307 1.1 matt SPSR_IT4 = __BIT(12), // A32: IT[4]
308 1.1 matt SPSR_IT3 = __BIT(11), // A32: IT[3]
309 1.1 matt SPSR_IT2 = __BIT(10), // A32: IT[2]
310 1.1 matt SPSR_A64_D = __BIT(9), // A64: Debug Exception Mask
311 1.1 matt SPSR_A32_E = __BIT(9), // A32: BE Endian Mode
312 1.1 matt SPSR_A = __BIT(8), // Async abort (SError) Mask
313 1.1 matt SPSR_I = __BIT(7), // IRQ Mask
314 1.1 matt SPSR_F = __BIT(6), // FIQ Mask
315 1.1 matt SPSR_A32_T = __BIT(5), // A32 Thumb Mode
316 1.1 matt SPSR_M = __BITS(4,0), // Execution State
317 1.1 matt SPSR_M_EL3H = 0x0d,
318 1.1 matt SPSR_M_EL3T = 0x0c,
319 1.1 matt SPSR_M_EL2H = 0x09,
320 1.1 matt SPSR_M_EL2T = 0x08,
321 1.1 matt SPSR_M_EL1H = 0x05,
322 1.1 matt SPSR_M_EL1T = 0x04,
323 1.1 matt SPSR_M_EL0T = 0x00,
324 1.1 matt SPSR_M_SYS32 = 0x1f,
325 1.1 matt SPSR_M_UND32 = 0x1b,
326 1.1 matt SPSR_M_ABT32 = 0x17,
327 1.1 matt SPSR_M_SVC32 = 0x13,
328 1.1 matt SPSR_M_IRQ32 = 0x12,
329 1.1 matt SPSR_M_FIQ32 = 0x11,
330 1.1 matt SPSR_M_USR32 = 0x10;
331 1.1 matt
332 1.1 matt AARCH64REG_READ_INLINE(tcr_el1) // Translation Control Register
333 1.1 matt AARCH64REG_WRITE_INLINE(tcr_el1)
334 1.1 matt
335 1.1 matt static const uintmax_t
336 1.1 matt TCR_TBI1 = __BIT(38), // ignore Top Byte for TTBR1_EL1
337 1.1 matt TCR_TBI0 = __BIT(37), // ignore Top Byte for TTBR0_EL1
338 1.1 matt TCR_AS64K = __BIT(36), // Use 64K ASIDs
339 1.1 matt TCR_IPS = __BITS(34,32), // Intermediate Phys Addr Size
340 1.1 matt TCR_IPS_256TB = 5, // 48 bits (256 TB)
341 1.1 matt TCR_IPS_64TB = 4, // 44 bits (16 TB)
342 1.1 matt TCR_IPS_4TB = 3, // 42 bits ( 4 TB)
343 1.1 matt TCR_IPS_1TB = 2, // 40 bits ( 1 TB)
344 1.1 matt TCR_IPS_64GB = 1, // 36 bits (64 GB)
345 1.1 matt TCR_IPS_4GB = 0, // 32 bits (4 GB)
346 1.1 matt TCR_TG1 = __BITS(31,30), // Page Granule Size
347 1.1 matt TCR_TG_4KB = 1, // 4KB page size
348 1.1 matt TCR_TG_16KB = 2, // 16KB page size
349 1.1 matt TCR_TG_64KB = 3, // 64KB page size
350 1.1 matt TCR_SH1 = __BITS(29,28),
351 1.1 matt TCR_SH_NONE = 0,
352 1.1 matt TCR_SH_OUTER = 1,
353 1.1 matt TCR_SH_INNER = 2,
354 1.1 matt TCR_ORGN1 = __BITS(27,26),
355 1.1 matt TCR_XRGN_NC = 0, // Non Cacheable
356 1.1 matt TCR_XRGN_WB_WA = 1, // WriteBack WriteAllocate
357 1.1 matt TCR_XRGN_WT = 2, // WriteThrough
358 1.1 matt TCR_XRGN_WB = 3, // WriteBack
359 1.1 matt TCR_IRGN1 = __BITS(25,24),
360 1.1 matt TCR_EPD1 = __BIT(23), // Walk Disable for TTBR1_EL1
361 1.1 matt TCR_A1 = __BIT(22), // ASID is in TTBR1_EL1
362 1.1 matt TCR_T1SZ = __BITS(21,16), // Size offset for TTBR1_EL1
363 1.1 matt TCR_TG0 = __BITS(15,14),
364 1.1 matt TCR_SH0 = __BITS(13,12),
365 1.1 matt TCR_ORGN0 = __BITS(11,10),
366 1.1 matt TCR_IRGN0 = __BITS(9,8),
367 1.1 matt TCR_EPD0 = __BIT(7), // Walk Disable for TTBR0
368 1.1 matt TCR_T0SZ = __BITS(5,0); // Size offset for TTBR0_EL1
369 1.1 matt
370 1.1 matt #define TCR_PAGE_SIZE1(tcr) (1L << (__SHIFTOUT(tcr, TCR_TG1) * 2 + 10))
371 1.1 matt
372 1.1 matt AARCH64REG_READ_INLINE(tpidr_el1) // Thread ID Register (EL1)
373 1.1 matt AARCH64REG_WRITE_INLINE(tpidr_el1)
374 1.1 matt
375 1.1 matt AARCH64REG_WRITE_INLINE(tpidrro_el0) // Thread ID Register (RO for EL0)
376 1.1 matt
377 1.1 matt AARCH64REG_READ_INLINE(ttbr0_el0) // Translation Table Base Register 0 EL0
378 1.1 matt AARCH64REG_WRITE_INLINE(ttbr0_el0)
379 1.1 matt
380 1.1 matt AARCH64REG_READ_INLINE(ttbr0_el1) // Translation Table Base Register 0 EL0
381 1.1 matt AARCH64REG_WRITE_INLINE(ttbr0_el1)
382 1.1 matt
383 1.1 matt AARCH64REG_READ_INLINE(ttbr1_el1) // Translation Table Base Register 1 EL1
384 1.1 matt AARCH64REG_WRITE_INLINE(ttbr1_el1)
385 1.1 matt
386 1.1 matt static const uint64_t
387 1.1 matt TTBR_ASID = __BITS(63, 48),
388 1.1 matt TTBR_BADDR = __BITS(47, 0);
389 1.1 matt
390 1.1 matt AARCH64REG_READ_INLINE(vbar_el1) // Vector Base Address Register
391 1.1 matt AARCH64REG_WRITE_INLINE(vbar_el1)
392 1.1 matt
393 1.1 matt AARCH64REG_READ_INLINE(pmccfiltr_el0)
394 1.1 matt AARCH64REG_WRITE_INLINE(pmccfiltr_el0)
395 1.1 matt
396 1.1 matt static const uintmax_t
397 1.1 matt PMCCFILTR_P = __BIT(31), // Don't count cycles in EL1
398 1.1 matt PMCCFILTR_U = __BIT(30), // Don't count cycles in EL0
399 1.1 matt PMCCFILTR_NSK = __BIT(29), // Don't count cycles in NS EL1
400 1.1 matt PMCCFILTR_NSU = __BIT(28), // Don't count cycles in NS EL0
401 1.1 matt PMCCFILTR_NSH = __BIT(27), // Don't count cycles in NS EL2
402 1.1 matt PMCCFILTR_M = __BIT(26); // Don't count cycles in EL3
403 1.1 matt
404 1.1 matt AARCH64REG_READ_INLINE(pmccntr_el0)
405 1.1 matt
406 1.1 matt AARCH64REG_READ_INLINE(cntfrq_el0)
407 1.1 matt
408 1.1 matt AARCH64REG_READ_INLINE(cntkctl_el1)
409 1.1 matt AARCH64REG_WRITE_INLINE(cntkctl_el1)
410 1.1 matt
411 1.1 matt static const uintmax_t
412 1.1 matt CNTKCTL_EL0PTEN = __BIT(9), // EL0 access for CNTP CVAL/TVAL/CTL
413 1.1 matt CNTKCTL_EL0VTEN = __BIT(8), // EL0 access for CNTV CVAL/TVAL/CTL
414 1.1 matt CNTKCTL_ELNTI = __BITS(7,4),
415 1.1 matt CNTKCTL_EVNTDIR = __BIT(3),
416 1.1 matt CNTKCTL_EVNTEN = __BIT(2),
417 1.1 matt CNTKCTL_EL0VCTEN = __BIT(1), // EL0 access for CNTVCT and CNTFRQ
418 1.1 matt CNTKCTL_EL0PCTEN = __BIT(0); // EL0 access for CNTPCT and CNTFRQ
419 1.1 matt
420 1.1 matt AARCH64REG_READ_INLINE(cntp_ctl_el0)
421 1.1 matt AARCH64REG_WRITE_INLINE(cntp_ctl_el0)
422 1.1 matt AARCH64REG_READ_INLINE(cntp_cval_el0)
423 1.1 matt AARCH64REG_WRITE_INLINE(cntp_cval_el0)
424 1.1 matt AARCH64REG_READ_INLINE(cntp_tval_el0)
425 1.1 matt AARCH64REG_WRITE_INLINE(cntp_tval_el0)
426 1.1 matt AARCH64REG_READ_INLINE(cntpct_el0)
427 1.1 matt AARCH64REG_WRITE_INLINE(cntpct_el0)
428 1.1 matt
429 1.1 matt AARCH64REG_READ_INLINE(cntps_ctl_el1)
430 1.1 matt AARCH64REG_WRITE_INLINE(cntps_ctl_el1)
431 1.1 matt AARCH64REG_READ_INLINE(cntps_cval_el1)
432 1.1 matt AARCH64REG_WRITE_INLINE(cntps_cval_el1)
433 1.1 matt AARCH64REG_READ_INLINE(cntps_tval_el1)
434 1.1 matt AARCH64REG_WRITE_INLINE(cntps_tval_el1)
435 1.1 matt
436 1.1 matt AARCH64REG_READ_INLINE(cntv_ctl_el0)
437 1.1 matt AARCH64REG_WRITE_INLINE(cntv_ctl_el0)
438 1.1 matt AARCH64REG_READ_INLINE(cntv_cval_el0)
439 1.1 matt AARCH64REG_WRITE_INLINE(cntv_cval_el0)
440 1.1 matt AARCH64REG_READ_INLINE(cntv_tval_el0)
441 1.1 matt AARCH64REG_WRITE_INLINE(cntv_tval_el0)
442 1.1 matt AARCH64REG_READ_INLINE(cntvct_el0)
443 1.1 matt AARCH64REG_WRITE_INLINE(cntvct_el0)
444 1.1 matt
445 1.1 matt static const uintmax_t
446 1.1 matt CNTCTL_ISTATUS = __BIT(2), // Interrupt Asserted
447 1.1 matt CNTCTL_IMASK = __BIT(1), // Timer Interrupt is Masked
448 1.1 matt CNTCTL_ENABLE = __BIT(0); // Timer Enabled
449 1.1 matt
450 1.1 matt #elif defined(__arm__)
451 1.1 matt
452 1.1 matt #include <arm/armreg.h>
453 1.1 matt
454 1.1 matt #endif /* __aarch64__/__arm__ */
455 1.1 matt
456 1.1 matt #endif /* _AARCH64_ARMREG_H_ */
457