1 1.38 riastrad /* $NetBSD: atomic.S,v 1.38 2025/09/06 02:53:21 riastradh Exp $ */ 2 1.1 ad 3 1.1 ad /*- 4 1.1 ad * Copyright (c) 2007 The NetBSD Foundation, Inc. 5 1.1 ad * All rights reserved. 6 1.1 ad * 7 1.1 ad * This code is derived from software contributed to The NetBSD Foundation 8 1.1 ad * by Jason R. Thorpe, and by Andrew Doran. 9 1.1 ad * 10 1.1 ad * Redistribution and use in source and binary forms, with or without 11 1.1 ad * modification, are permitted provided that the following conditions 12 1.1 ad * are met: 13 1.1 ad * 1. Redistributions of source code must retain the above copyright 14 1.1 ad * notice, this list of conditions and the following disclaimer. 15 1.1 ad * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 ad * notice, this list of conditions and the following disclaimer in the 17 1.1 ad * documentation and/or other materials provided with the distribution. 18 1.30 riastrad * 19 1.1 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.1 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.1 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.1 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.1 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.1 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.1 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.1 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.1 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.1 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.1 ad * POSSIBILITY OF SUCH DAMAGE. 30 1.1 ad */ 31 1.1 ad 32 1.16 pooka #include <sys/param.h> 33 1.1 ad #include <machine/asm.h> 34 1.21 christos /* 35 1.21 christos * __HAVE_ constants should not be in <machine/types.h> 36 1.21 christos * because we can't use them from assembly. OTOH we 37 1.21 christos * only need __HAVE_ATOMIC64_OPS here, and we don't. 38 1.21 christos */ 39 1.18 pooka #ifdef _KERNEL 40 1.18 pooka #define ALIAS(f, t) STRONG_ALIAS(f,t) 41 1.18 pooka #else 42 1.18 pooka #define ALIAS(f, t) WEAK_ALIAS(f,t) 43 1.18 pooka #endif 44 1.18 pooka 45 1.15 pooka #ifdef _HARDKERNEL 46 1.23 bouyer #include "opt_xen.h" 47 1.25 maxv #include <machine/frameasm.h> 48 1.28 maxv #define LOCK HOTPATCH(HP_NAME_NOLOCK, 1); lock 49 1.29 maxv #define HOTPATCH_CAS_64 HOTPATCH(HP_NAME_CAS_64, 49); 50 1.1 ad #else 51 1.28 maxv #define LOCK lock 52 1.29 maxv #define HOTPATCH_CAS_64 /* nothing */ 53 1.1 ad #endif 54 1.1 ad 55 1.1 ad .text 56 1.1 ad 57 1.13 chs ENTRY(_atomic_add_32) 58 1.1 ad movl 4(%esp), %edx 59 1.1 ad movl 8(%esp), %eax 60 1.27 maxv LOCK 61 1.1 ad addl %eax, (%edx) 62 1.1 ad ret 63 1.22 uebayasi END(_atomic_add_32) 64 1.1 ad 65 1.13 chs ENTRY(_atomic_add_32_nv) 66 1.1 ad movl 4(%esp), %edx 67 1.1 ad movl 8(%esp), %eax 68 1.1 ad movl %eax, %ecx 69 1.27 maxv LOCK 70 1.1 ad xaddl %eax, (%edx) 71 1.1 ad addl %ecx, %eax 72 1.1 ad ret 73 1.22 uebayasi END(_atomic_add_32_nv) 74 1.1 ad 75 1.13 chs ENTRY(_atomic_and_32) 76 1.1 ad movl 4(%esp), %edx 77 1.1 ad movl 8(%esp), %eax 78 1.27 maxv LOCK 79 1.1 ad andl %eax, (%edx) 80 1.1 ad ret 81 1.22 uebayasi END(_atomic_and_32) 82 1.1 ad 83 1.13 chs ENTRY(_atomic_and_32_nv) 84 1.1 ad movl 4(%esp), %edx 85 1.1 ad movl (%edx), %eax 86 1.14 ad 0: 87 1.1 ad movl %eax, %ecx 88 1.1 ad andl 8(%esp), %ecx 89 1.27 maxv LOCK 90 1.1 ad cmpxchgl %ecx, (%edx) 91 1.14 ad jnz 1f 92 1.1 ad movl %ecx, %eax 93 1.1 ad ret 94 1.14 ad 1: 95 1.14 ad jmp 0b 96 1.22 uebayasi END(_atomic_and_32_nv) 97 1.1 ad 98 1.13 chs ENTRY(_atomic_dec_32) 99 1.1 ad movl 4(%esp), %edx 100 1.27 maxv LOCK 101 1.1 ad decl (%edx) 102 1.1 ad ret 103 1.22 uebayasi END(_atomic_dec_32) 104 1.1 ad 105 1.13 chs ENTRY(_atomic_dec_32_nv) 106 1.1 ad movl 4(%esp), %edx 107 1.1 ad movl $-1, %eax 108 1.27 maxv LOCK 109 1.1 ad xaddl %eax, (%edx) 110 1.1 ad decl %eax 111 1.1 ad ret 112 1.22 uebayasi END(_atomic_dec_32_nv) 113 1.1 ad 114 1.13 chs ENTRY(_atomic_inc_32) 115 1.1 ad movl 4(%esp), %edx 116 1.27 maxv LOCK 117 1.1 ad incl (%edx) 118 1.1 ad ret 119 1.22 uebayasi END(_atomic_inc_32) 120 1.1 ad 121 1.13 chs ENTRY(_atomic_inc_32_nv) 122 1.1 ad movl 4(%esp), %edx 123 1.1 ad movl $1, %eax 124 1.27 maxv LOCK 125 1.1 ad xaddl %eax, (%edx) 126 1.1 ad incl %eax 127 1.1 ad ret 128 1.22 uebayasi END(_atomic_inc_32_nv) 129 1.1 ad 130 1.13 chs ENTRY(_atomic_or_32) 131 1.1 ad movl 4(%esp), %edx 132 1.1 ad movl 8(%esp), %eax 133 1.27 maxv LOCK 134 1.1 ad orl %eax, (%edx) 135 1.1 ad ret 136 1.22 uebayasi END(_atomic_or_32) 137 1.1 ad 138 1.13 chs ENTRY(_atomic_or_32_nv) 139 1.1 ad movl 4(%esp), %edx 140 1.1 ad movl (%edx), %eax 141 1.14 ad 0: 142 1.1 ad movl %eax, %ecx 143 1.1 ad orl 8(%esp), %ecx 144 1.27 maxv LOCK 145 1.1 ad cmpxchgl %ecx, (%edx) 146 1.14 ad jnz 1f 147 1.1 ad movl %ecx, %eax 148 1.1 ad ret 149 1.14 ad 1: 150 1.14 ad jmp 0b 151 1.22 uebayasi END(_atomic_or_32_nv) 152 1.1 ad 153 1.13 chs ENTRY(_atomic_swap_32) 154 1.1 ad movl 4(%esp), %edx 155 1.1 ad movl 8(%esp), %eax 156 1.1 ad xchgl %eax, (%edx) 157 1.1 ad ret 158 1.22 uebayasi END(_atomic_swap_32) 159 1.1 ad 160 1.13 chs ENTRY(_atomic_cas_32) 161 1.1 ad movl 4(%esp), %edx 162 1.1 ad movl 8(%esp), %eax 163 1.1 ad movl 12(%esp), %ecx 164 1.27 maxv LOCK 165 1.1 ad cmpxchgl %ecx, (%edx) 166 1.1 ad /* %eax now contains the old value */ 167 1.1 ad ret 168 1.22 uebayasi END(_atomic_cas_32) 169 1.1 ad 170 1.13 chs ENTRY(_atomic_cas_32_ni) 171 1.9 ad movl 4(%esp), %edx 172 1.9 ad movl 8(%esp), %eax 173 1.9 ad movl 12(%esp), %ecx 174 1.9 ad cmpxchgl %ecx, (%edx) 175 1.9 ad /* %eax now contains the old value */ 176 1.9 ad ret 177 1.22 uebayasi END(_atomic_cas_32_ni) 178 1.9 ad 179 1.35 riastrad ENTRY(_membar_acquire) 180 1.31 riastrad /* 181 1.31 riastrad * Every load from normal memory is a load-acquire on x86, so 182 1.31 riastrad * there is never any need for explicit barriers to order 183 1.31 riastrad * load-before-anything. 184 1.31 riastrad */ 185 1.1 ad ret 186 1.35 riastrad END(_membar_acquire) 187 1.1 ad 188 1.35 riastrad ENTRY(_membar_release) 189 1.32 riastrad /* 190 1.32 riastrad * Every store to normal memory is a store-release on x86, so 191 1.32 riastrad * there is never any need for explicit barriers to order 192 1.32 riastrad * anything-before-store. 193 1.32 riastrad */ 194 1.1 ad ret 195 1.35 riastrad END(_membar_release) 196 1.1 ad 197 1.13 chs ENTRY(_membar_sync) 198 1.33 riastrad /* 199 1.36 riastrad * MFENCE, or a serializing instruction like a locked ADDL, 200 1.33 riastrad * is necessary to order store-before-load. Every other 201 1.33 riastrad * ordering -- load-before-anything, anything-before-store -- 202 1.33 riastrad * is already guaranteed without explicit barriers. 203 1.36 riastrad * 204 1.36 riastrad * Empirically it turns out locked ADDL is cheaper than MFENCE, 205 1.36 riastrad * so we use that, with an offset below the return address on 206 1.36 riastrad * the stack to avoid a false dependency with RET. (It might 207 1.36 riastrad * even be better to use a much lower offset, say -128, to 208 1.36 riastrad * avoid false dependencies for subsequent callees of the 209 1.36 riastrad * caller.) 210 1.36 riastrad * 211 1.36 riastrad * https://pvk.ca/Blog/2014/10/19/performance-optimisation-~-writing-an-essay/ 212 1.36 riastrad * https://shipilev.net/blog/2014/on-the-fence-with-dependencies/ 213 1.36 riastrad * https://www.agner.org/optimize/instruction_tables.pdf 214 1.37 riastrad * 215 1.38 riastrad * Sync with paravirt_membar_sync in 216 1.38 riastrad * sys/arch/i386/i386/cpufunc.S. 217 1.33 riastrad */ 218 1.27 maxv LOCK 219 1.1 ad addl $0, -4(%esp) 220 1.1 ad ret 221 1.22 uebayasi END(_membar_sync) 222 1.1 ad 223 1.21 christos #if defined(__HAVE_ATOMIC64_OPS) || defined(_KERNEL) 224 1.24 bouyer #ifdef XENPV 225 1.23 bouyer STRONG_ALIAS(_atomic_cas_64,_atomic_cas_cx8) 226 1.23 bouyer #else 227 1.21 christos ENTRY(_atomic_cas_64) 228 1.29 maxv HOTPATCH_CAS_64 229 1.29 maxv /* 49 bytes of instructions */ 230 1.15 pooka #ifdef _HARDKERNEL 231 1.14 ad pushf 232 1.6 ad cli 233 1.29 maxv #endif 234 1.6 ad pushl %edi 235 1.6 ad pushl %ebx 236 1.6 ad movl 12(%esp), %edi 237 1.6 ad movl 16(%esp), %eax 238 1.6 ad movl 20(%esp), %edx 239 1.6 ad movl 24(%esp), %ebx 240 1.6 ad movl 28(%esp), %ecx 241 1.6 ad cmpl 0(%edi), %eax 242 1.6 ad jne 2f 243 1.6 ad cmpl 4(%edi), %edx 244 1.6 ad jne 2f 245 1.6 ad movl %ebx, 0(%edi) 246 1.6 ad movl %ecx, 4(%edi) 247 1.6 ad 1: 248 1.6 ad popl %ebx 249 1.6 ad popl %edi 250 1.21 christos #ifdef _HARDKERNEL 251 1.14 ad popf 252 1.29 maxv #endif 253 1.6 ad ret 254 1.6 ad 2: 255 1.6 ad movl 0(%edi), %eax 256 1.6 ad movl 4(%edi), %edx 257 1.6 ad jmp 1b 258 1.22 uebayasi END(_atomic_cas_64) 259 1.29 maxv #endif /* !XENPV */ 260 1.6 ad 261 1.13 chs ENTRY(_atomic_cas_cx8) 262 1.29 maxv /* 49 bytes of instructions */ 263 1.6 ad pushl %edi 264 1.6 ad pushl %ebx 265 1.6 ad movl 12(%esp), %edi 266 1.6 ad movl 16(%esp), %eax 267 1.6 ad movl 20(%esp), %edx 268 1.6 ad movl 24(%esp), %ebx 269 1.6 ad movl 28(%esp), %ecx 270 1.27 maxv LOCK 271 1.6 ad cmpxchg8b (%edi) 272 1.6 ad popl %ebx 273 1.6 ad popl %edi 274 1.6 ad ret 275 1.21 christos #ifdef _HARDKERNEL 276 1.29 maxv .space 20, 0xCC 277 1.17 enami #endif 278 1.22 uebayasi END(_atomic_cas_cx8) 279 1.29 maxv LABEL(_atomic_cas_cx8_end) 280 1.21 christos #endif /* __HAVE_ATOMIC64_OPS || _KERNEL */ 281 1.6 ad 282 1.1 ad ALIAS(atomic_add_32,_atomic_add_32) 283 1.4 ad ALIAS(atomic_add_int,_atomic_add_32) 284 1.4 ad ALIAS(atomic_add_long,_atomic_add_32) 285 1.1 ad ALIAS(atomic_add_ptr,_atomic_add_32) 286 1.1 ad 287 1.1 ad ALIAS(atomic_add_32_nv,_atomic_add_32_nv) 288 1.4 ad ALIAS(atomic_add_int_nv,_atomic_add_32_nv) 289 1.4 ad ALIAS(atomic_add_long_nv,_atomic_add_32_nv) 290 1.1 ad ALIAS(atomic_add_ptr_nv,_atomic_add_32_nv) 291 1.1 ad 292 1.1 ad ALIAS(atomic_and_32,_atomic_and_32) 293 1.1 ad ALIAS(atomic_and_uint,_atomic_and_32) 294 1.1 ad ALIAS(atomic_and_ulong,_atomic_and_32) 295 1.1 ad ALIAS(atomic_and_ptr,_atomic_and_32) 296 1.1 ad 297 1.1 ad ALIAS(atomic_and_32_nv,_atomic_and_32_nv) 298 1.1 ad ALIAS(atomic_and_uint_nv,_atomic_and_32_nv) 299 1.1 ad ALIAS(atomic_and_ulong_nv,_atomic_and_32_nv) 300 1.1 ad ALIAS(atomic_and_ptr_nv,_atomic_and_32_nv) 301 1.1 ad 302 1.1 ad ALIAS(atomic_dec_32,_atomic_dec_32) 303 1.1 ad ALIAS(atomic_dec_uint,_atomic_dec_32) 304 1.1 ad ALIAS(atomic_dec_ulong,_atomic_dec_32) 305 1.1 ad ALIAS(atomic_dec_ptr,_atomic_dec_32) 306 1.1 ad 307 1.1 ad ALIAS(atomic_dec_32_nv,_atomic_dec_32_nv) 308 1.1 ad ALIAS(atomic_dec_uint_nv,_atomic_dec_32_nv) 309 1.1 ad ALIAS(atomic_dec_ulong_nv,_atomic_dec_32_nv) 310 1.1 ad ALIAS(atomic_dec_ptr_nv,_atomic_dec_32_nv) 311 1.1 ad 312 1.1 ad ALIAS(atomic_inc_32,_atomic_inc_32) 313 1.1 ad ALIAS(atomic_inc_uint,_atomic_inc_32) 314 1.1 ad ALIAS(atomic_inc_ulong,_atomic_inc_32) 315 1.1 ad ALIAS(atomic_inc_ptr,_atomic_inc_32) 316 1.1 ad 317 1.1 ad ALIAS(atomic_inc_32_nv,_atomic_inc_32_nv) 318 1.1 ad ALIAS(atomic_inc_uint_nv,_atomic_inc_32_nv) 319 1.1 ad ALIAS(atomic_inc_ulong_nv,_atomic_inc_32_nv) 320 1.1 ad ALIAS(atomic_inc_ptr_nv,_atomic_inc_32_nv) 321 1.1 ad 322 1.1 ad ALIAS(atomic_or_32,_atomic_or_32) 323 1.1 ad ALIAS(atomic_or_uint,_atomic_or_32) 324 1.1 ad ALIAS(atomic_or_ulong,_atomic_or_32) 325 1.1 ad ALIAS(atomic_or_ptr,_atomic_or_32) 326 1.1 ad 327 1.1 ad ALIAS(atomic_or_32_nv,_atomic_or_32_nv) 328 1.1 ad ALIAS(atomic_or_uint_nv,_atomic_or_32_nv) 329 1.1 ad ALIAS(atomic_or_ulong_nv,_atomic_or_32_nv) 330 1.1 ad ALIAS(atomic_or_ptr_nv,_atomic_or_32_nv) 331 1.1 ad 332 1.1 ad ALIAS(atomic_swap_32,_atomic_swap_32) 333 1.1 ad ALIAS(atomic_swap_uint,_atomic_swap_32) 334 1.1 ad ALIAS(atomic_swap_ulong,_atomic_swap_32) 335 1.1 ad ALIAS(atomic_swap_ptr,_atomic_swap_32) 336 1.1 ad 337 1.1 ad ALIAS(atomic_cas_32,_atomic_cas_32) 338 1.1 ad ALIAS(atomic_cas_uint,_atomic_cas_32) 339 1.1 ad ALIAS(atomic_cas_ulong,_atomic_cas_32) 340 1.1 ad ALIAS(atomic_cas_ptr,_atomic_cas_32) 341 1.1 ad 342 1.9 ad ALIAS(atomic_cas_32_ni,_atomic_cas_32_ni) 343 1.9 ad ALIAS(atomic_cas_uint_ni,_atomic_cas_32_ni) 344 1.9 ad ALIAS(atomic_cas_ulong_ni,_atomic_cas_32_ni) 345 1.9 ad ALIAS(atomic_cas_ptr_ni,_atomic_cas_32_ni) 346 1.9 ad 347 1.21 christos #if defined(__HAVE_ATOMIC64_OPS) || defined(_KERNEL) 348 1.6 ad ALIAS(atomic_cas_64,_atomic_cas_64) 349 1.9 ad ALIAS(atomic_cas_64_ni,_atomic_cas_64) 350 1.20 martin ALIAS(__sync_val_compare_and_swap_8,_atomic_cas_64) 351 1.21 christos #endif /* __HAVE_ATOMIC64_OPS || _KERNEL */ 352 1.6 ad 353 1.35 riastrad ALIAS(membar_acquire,_membar_acquire) 354 1.35 riastrad ALIAS(membar_release,_membar_release) 355 1.35 riastrad ALIAS(membar_sync,_membar_sync) 356 1.35 riastrad 357 1.35 riastrad ALIAS(membar_consumer,_membar_acquire) 358 1.35 riastrad ALIAS(membar_producer,_membar_release) 359 1.34 riastrad ALIAS(membar_enter,_membar_sync) 360 1.35 riastrad ALIAS(membar_exit,_membar_release) 361 1.1 ad ALIAS(membar_sync,_membar_sync) 362 1.5 ad 363 1.5 ad STRONG_ALIAS(_atomic_add_int,_atomic_add_32) 364 1.5 ad STRONG_ALIAS(_atomic_add_long,_atomic_add_32) 365 1.5 ad STRONG_ALIAS(_atomic_add_ptr,_atomic_add_32) 366 1.5 ad 367 1.5 ad STRONG_ALIAS(_atomic_add_int_nv,_atomic_add_32_nv) 368 1.5 ad STRONG_ALIAS(_atomic_add_long_nv,_atomic_add_32_nv) 369 1.5 ad STRONG_ALIAS(_atomic_add_ptr_nv,_atomic_add_32_nv) 370 1.5 ad 371 1.5 ad STRONG_ALIAS(_atomic_and_uint,_atomic_and_32) 372 1.5 ad STRONG_ALIAS(_atomic_and_ulong,_atomic_and_32) 373 1.5 ad STRONG_ALIAS(_atomic_and_ptr,_atomic_and_32) 374 1.5 ad 375 1.5 ad STRONG_ALIAS(_atomic_and_uint_nv,_atomic_and_32_nv) 376 1.5 ad STRONG_ALIAS(_atomic_and_ulong_nv,_atomic_and_32_nv) 377 1.5 ad STRONG_ALIAS(_atomic_and_ptr_nv,_atomic_and_32_nv) 378 1.5 ad 379 1.5 ad STRONG_ALIAS(_atomic_dec_uint,_atomic_dec_32) 380 1.5 ad STRONG_ALIAS(_atomic_dec_ulong,_atomic_dec_32) 381 1.5 ad STRONG_ALIAS(_atomic_dec_ptr,_atomic_dec_32) 382 1.5 ad 383 1.5 ad STRONG_ALIAS(_atomic_dec_uint_nv,_atomic_dec_32_nv) 384 1.5 ad STRONG_ALIAS(_atomic_dec_ulong_nv,_atomic_dec_32_nv) 385 1.5 ad STRONG_ALIAS(_atomic_dec_ptr_nv,_atomic_dec_32_nv) 386 1.5 ad 387 1.5 ad STRONG_ALIAS(_atomic_inc_uint,_atomic_inc_32) 388 1.5 ad STRONG_ALIAS(_atomic_inc_ulong,_atomic_inc_32) 389 1.5 ad STRONG_ALIAS(_atomic_inc_ptr,_atomic_inc_32) 390 1.5 ad 391 1.5 ad STRONG_ALIAS(_atomic_inc_uint_nv,_atomic_inc_32_nv) 392 1.5 ad STRONG_ALIAS(_atomic_inc_ulong_nv,_atomic_inc_32_nv) 393 1.5 ad STRONG_ALIAS(_atomic_inc_ptr_nv,_atomic_inc_32_nv) 394 1.5 ad 395 1.5 ad STRONG_ALIAS(_atomic_or_uint,_atomic_or_32) 396 1.5 ad STRONG_ALIAS(_atomic_or_ulong,_atomic_or_32) 397 1.5 ad STRONG_ALIAS(_atomic_or_ptr,_atomic_or_32) 398 1.5 ad 399 1.5 ad STRONG_ALIAS(_atomic_or_uint_nv,_atomic_or_32_nv) 400 1.5 ad STRONG_ALIAS(_atomic_or_ulong_nv,_atomic_or_32_nv) 401 1.5 ad STRONG_ALIAS(_atomic_or_ptr_nv,_atomic_or_32_nv) 402 1.5 ad 403 1.5 ad STRONG_ALIAS(_atomic_swap_uint,_atomic_swap_32) 404 1.5 ad STRONG_ALIAS(_atomic_swap_ulong,_atomic_swap_32) 405 1.5 ad STRONG_ALIAS(_atomic_swap_ptr,_atomic_swap_32) 406 1.5 ad 407 1.5 ad STRONG_ALIAS(_atomic_cas_uint,_atomic_cas_32) 408 1.5 ad STRONG_ALIAS(_atomic_cas_ulong,_atomic_cas_32) 409 1.5 ad STRONG_ALIAS(_atomic_cas_ptr,_atomic_cas_32) 410 1.8 ad 411 1.9 ad STRONG_ALIAS(_atomic_cas_uint_ni,_atomic_cas_32_ni) 412 1.9 ad STRONG_ALIAS(_atomic_cas_ulong_ni,_atomic_cas_32_ni) 413 1.9 ad STRONG_ALIAS(_atomic_cas_ptr_ni,_atomic_cas_32_ni) 414 1.9 ad 415 1.35 riastrad STRONG_ALIAS(_membar_consumer,_membar_acquire) 416 1.35 riastrad STRONG_ALIAS(_membar_producer,_membar_release) 417 1.34 riastrad STRONG_ALIAS(_membar_enter,_membar_sync) 418 1.35 riastrad STRONG_ALIAS(_membar_exit,_membar_release) 419