1 /* $NetBSD: procfs_machdep.c,v 1.6 2024/02/07 04:20:26 msaitoh Exp $ */ 2 3 /*- 4 * Copyright (c) 2020 Ryo Shimizu 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 17 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: procfs_machdep.c,v 1.6 2024/02/07 04:20:26 msaitoh Exp $"); 31 32 #include <sys/param.h> 33 #include <sys/cpu.h> 34 #include <sys/systm.h> 35 36 #include <miscfs/procfs/procfs.h> 37 38 #include <aarch64/armreg.h> 39 #include <aarch64/cpufunc.h> 40 41 /* use variables named 'buf', 'left', 'total' */ 42 #define FORWARD_BUF(_len) \ 43 do { \ 44 total += _len; \ 45 if (_len < left) { \ 46 buf += _len; \ 47 left -= _len; \ 48 } else { \ 49 buf += left; \ 50 left = 0; \ 51 } \ 52 } while (0 /*CONSTCOND*/) 53 54 #define OUTPUT_BUF(fmt, args...) \ 55 do { \ 56 size_t l = snprintf(buf, left, fmt, ## args); \ 57 FORWARD_BUF(l); \ 58 } while (0/*CONSTCOND*/) 59 60 static int 61 procfs_cpuinfo_features(struct cpu_info *ci, char *buf, int buflen) 62 { 63 uint64_t isar0, isar1, mmfr2, pfr0, pfr1; 64 size_t left, total; 65 66 isar0 = ci->ci_id.ac_aa64isar0; 67 isar1 = ci->ci_id.ac_aa64isar1; 68 mmfr2 = ci->ci_id.ac_aa64mmfr2; 69 pfr0 = ci->ci_id.ac_aa64pfr0; 70 pfr1 = ci->ci_id.ac_aa64pfr1; 71 72 left = buflen; 73 total = 0; 74 75 /* 76 * I don't know if we need to mimic the order of HWCAP in linux 77 */ 78 OUTPUT_BUF("Features\t:"); 79 #define SO_EQ(reg, mask, val) (__SHIFTOUT((reg), (mask)) == (val)) 80 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_FP, ID_AA64PFR0_EL1_FP_IMPL)) 81 OUTPUT_BUF(" fp"); 82 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_ADVSIMD, ID_AA64PFR0_EL1_ADV_SIMD_IMPL)) 83 OUTPUT_BUF(" asimd"); 84 /* notyet: " evtstrm" */ 85 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_AES, ID_AA64ISAR0_EL1_AES_AES)) 86 OUTPUT_BUF(" aes"); 87 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_AES, ID_AA64ISAR0_EL1_AES_PMUL)) 88 OUTPUT_BUF(" pmull"); 89 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SHA1, 90 ID_AA64ISAR0_EL1_SHA1_SHA1CPMHSU)) 91 OUTPUT_BUF(" sha1"); 92 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SHA2, 93 ID_AA64ISAR0_EL1_SHA2_SHA256HSU)) 94 OUTPUT_BUF(" sha2"); 95 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_CRC32, ID_AA64ISAR0_EL1_CRC32_CRC32X)) 96 OUTPUT_BUF(" crc32"); 97 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_ATOMIC, ID_AA64ISAR0_EL1_ATOMIC_SWP)) 98 OUTPUT_BUF(" atomics"); 99 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_FP, ID_AA64PFR0_EL1_FP_HP)) 100 OUTPUT_BUF(" fphp"); 101 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_ADVSIMD, ID_AA64PFR0_EL1_ADV_SIMD_HP)) 102 OUTPUT_BUF(" asimdhp"); 103 /* notyet: " cpuid" */ 104 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_RDM, ID_AA64ISAR0_EL1_RDM_SQRDML)) 105 OUTPUT_BUF(" asimdrdm"); 106 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_JSCVT, 107 ID_AA64ISAR1_EL1_JSCVT_SUPPORTED)) 108 OUTPUT_BUF(" jscvt"); 109 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_FCMA, 110 ID_AA64ISAR1_EL1_FCMA_SUPPORTED)) 111 OUTPUT_BUF(" fcma"); 112 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_LRCPC, ID_AA64ISAR1_EL1_LRCPC_PR)) 113 OUTPUT_BUF(" lrcpc"); 114 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_DPB, ID_AA64ISAR1_EL1_DPB_CVAP)) 115 OUTPUT_BUF(" dcpop"); 116 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SHA3, ID_AA64ISAR0_EL1_SHA3_EOR3)) 117 OUTPUT_BUF(" sha3"); 118 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SM3, ID_AA64ISAR0_EL1_SM3_SM3)) 119 OUTPUT_BUF(" sm3"); 120 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SM4, ID_AA64ISAR0_EL1_SM4_SM4)) 121 OUTPUT_BUF(" sm4"); 122 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_DP, ID_AA64ISAR0_EL1_DP_UDOT)) 123 OUTPUT_BUF(" asimddp"); 124 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_SHA2, 125 ID_AA64ISAR0_EL1_SHA2_SHA512HSU)) 126 OUTPUT_BUF(" sha512"); 127 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_SVE, ID_AA64PFR0_EL1_SVE_IMPL)) 128 OUTPUT_BUF(" sve"); 129 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_FHM, ID_AA64ISAR0_EL1_FHM_FMLAL)) 130 OUTPUT_BUF(" asimdfhm"); 131 if (SO_EQ(pfr0, ID_AA64PFR0_EL1_DIT, ID_AA64PFR0_EL1_DIT_IMPL)) 132 OUTPUT_BUF(" dit"); 133 if (SO_EQ(mmfr2, ID_AA64MMFR2_EL1_AT, ID_AA64MMFR2_EL1_AT_16BIT)) 134 OUTPUT_BUF(" uscat"); 135 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_LRCPC, ID_AA64ISAR1_EL1_LRCPC_PR_UR)) 136 OUTPUT_BUF(" ilrcpc"); 137 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_TS, ID_AA64ISAR0_EL1_TS_CFINV)) 138 OUTPUT_BUF(" flagm"); 139 if (SO_EQ(pfr1, ID_AA64PFR1_EL1_SSBS, ID_AA64PFR1_EL1_SSBS_MSR_MRS)) 140 OUTPUT_BUF(" ssbs"); 141 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_SB, ID_AA64ISAR1_EL1_SB_SUPPORTED)) 142 OUTPUT_BUF(" sb"); 143 #ifdef ARMV83_PAC 144 if (aarch64_pac_enabled) 145 OUTPUT_BUF(" paca pacg"); 146 #endif 147 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_DPB, ID_AA64ISAR1_EL1_DPB_CVAP_CVADP)) 148 OUTPUT_BUF(" dcpodp"); 149 /* notyet: " sve2" */ 150 /* notyet: " sveaes" */ 151 /* notyet: " svepmull" */ 152 /* notyet: " svebitperm" */ 153 /* notyet: " svesha3" */ 154 /* notyet: " svesm4" */ 155 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_TS, ID_AA64ISAR0_EL1_TS_AXFLAG)) 156 OUTPUT_BUF(" flagm2"); 157 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_FRINTTS, 158 ID_AA64ISAR1_EL1_FRINTTS_SUPPORTED)) 159 OUTPUT_BUF(" frint"); 160 /* notyet: " svei8mm" */ 161 /* notyet: " svef32mm" */ 162 /* notyet: " svef64mm" */ 163 /* notyet: " svebf16" */ 164 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_I8MM, 165 ID_AA64ISAR1_EL1_I8MM_SUPPORTED)) 166 OUTPUT_BUF(" i8mm"); 167 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_BF16, ID_AA64ISAR1_EL1_BF16_BFDOT)) 168 OUTPUT_BUF(" bf16"); 169 if (SO_EQ(isar1, ID_AA64ISAR1_EL1_DGH, ID_AA64ISAR1_EL1_DGH_SUPPORTED)) 170 OUTPUT_BUF(" dgh"); 171 if (SO_EQ(isar0, ID_AA64ISAR0_EL1_RNDR, ID_AA64ISAR0_EL1_RNDR_RNDRRS)) 172 OUTPUT_BUF(" rng"); 173 #ifdef ARMV85_BTI 174 if (aarch64_bti_enabled) 175 OUTPUT_BUF(" bti"); 176 #endif 177 OUTPUT_BUF("\n"); 178 #undef SO_EQ 179 180 return total; 181 } 182 183 int 184 procfs_getcpuinfstr(char *buf, size_t *lenp) 185 { 186 struct cpu_info *ci; 187 CPU_INFO_ITERATOR cii; 188 size_t left, len, total; 189 int ret = 0; 190 191 left = *lenp; 192 total = 0; 193 194 for (CPU_INFO_FOREACH(cii, ci)) { 195 OUTPUT_BUF("processor\t: %d\n", cii); 196 197 len = procfs_cpuinfo_features(ci, buf, left); 198 FORWARD_BUF(len); 199 200 OUTPUT_BUF("CPU implementer\t: 0x%02lx\n", 201 __SHIFTOUT(ci->ci_id.ac_midr, CPU_ID_IMPLEMENTOR_MASK)); 202 OUTPUT_BUF("CPU architecture: 8\n"); /* ARMv8 */ 203 OUTPUT_BUF("CPU variant\t: 0x%lx\n", 204 __SHIFTOUT(ci->ci_id.ac_midr, CPU_ID_VARIANT_MASK)); 205 OUTPUT_BUF("CPU part\t: 0x%03lx\n", 206 __SHIFTOUT(ci->ci_id.ac_midr, CPU_ID_PARTNO_MASK)); 207 OUTPUT_BUF("CPU revision\t: %lu\n", 208 __SHIFTOUT(ci->ci_id.ac_midr, CPU_ID_REVISION_MASK)); 209 OUTPUT_BUF("\n"); 210 } 211 212 /* not enough buffer? */ 213 if (total >= *lenp) 214 ret = -1; 215 216 *lenp = total + 1; /* total output + '\0' */ 217 return ret; 218 } 219