atomic_swap.S revision 1.11
11.11Sjoerg/* $NetBSD: atomic_swap.S,v 1.11 2014/06/28 20:18:55 joerg Exp $ */ 21.2Smatt 31.2Smatt/*- 41.3Smatt * Copyright (c) 2007,2012 The NetBSD Foundation, Inc. 51.2Smatt * All rights reserved. 61.2Smatt * 71.2Smatt * This code is derived from software contributed to The NetBSD Foundation 81.2Smatt * by Jason R. Thorpe and Matt Thomas. 91.2Smatt * 101.2Smatt * Redistribution and use in source and binary forms, with or without 111.2Smatt * modification, are permitted provided that the following conditions 121.2Smatt * are met: 131.2Smatt * 1. Redistributions of source code must retain the above copyright 141.2Smatt * notice, this list of conditions and the following disclaimer. 151.2Smatt * 2. Redistributions in binary form must reproduce the above copyright 161.2Smatt * notice, this list of conditions and the following disclaimer in the 171.2Smatt * documentation and/or other materials provided with the distribution. 181.2Smatt * 191.2Smatt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 201.2Smatt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 211.2Smatt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 221.2Smatt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 231.2Smatt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 241.2Smatt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 251.2Smatt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 261.2Smatt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 271.2Smatt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 281.2Smatt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 291.2Smatt * POSSIBILITY OF SUCH DAMAGE. 301.2Smatt */ 311.2Smatt 321.2Smatt#include "atomic_op_asm.h" 331.2Smatt 341.3Smatt/* 351.3Smatt * While SWP{B} is sufficient on its own for pre-ARMv7 CPUs, on MP ARMv7 cores 361.3Smatt * SWP{B} is disabled since it's no longer atomic among multiple CPUs. They 371.3Smatt * will actually raise an UNDEFINED exception. 381.3Smatt * 391.3Smatt * So if we use the LDREX/STREX template, but use a SWP instruction followed 401.3Smatt * by a MOV instruction (using a temporary register), that gives a handler 411.3Smatt * for the SWP UNDEFINED exception enough information to "patch" this instance 421.3Smatt * SWP with correct forms of LDREX/STREX. (note that this would happen even 431.3Smatt * "read-only" pages. If the page gets tossed, we will get another exception 441.3Smatt * and fix yet again). 451.3Smatt */ 461.3Smatt 471.2SmattENTRY_NP(_atomic_swap_32) 481.6Smatt mov ip, r0 491.3Smatt1: 501.3Smatt#ifdef _ARM_ARCH_6 511.6Smatt ldrex r0, [ip] 521.4Smatt cmp r0, r1 531.6Smatt#ifdef __thumb__ 541.6Smatt beq 99f 551.6Smatt strex r3, r1, [ip] 561.6Smatt cmp r3, #0 571.3Smatt#else 581.6Smatt strexne r3, r1, [ip] 591.6Smatt cmpne r3, #0 601.6Smatt#endif 611.6Smatt#else 621.6Smatt swp r0, r1, [ip] 631.4Smatt cmp r0, r1 641.9Smatt movsne r3, #0 651.6Smatt cmpne r3, #0 661.3Smatt#endif 671.3Smatt bne 1b 681.4Smatt#ifdef _ARM_ARCH_7 691.4Smatt dmb 701.4Smatt#else 711.6Smatt mcr p15, 0, r3, c7, c10, 5 /* data memory barrier */ 721.4Smatt#endif 731.6Smatt99: 741.2Smatt RET 751.7SmattEND(_atomic_swap_32) 761.7Smatt 771.2SmattATOMIC_OP_ALIAS(atomic_swap_32,_atomic_swap_32) 781.2SmattATOMIC_OP_ALIAS(atomic_swap_uint,_atomic_swap_32) 791.2SmattATOMIC_OP_ALIAS(atomic_swap_ulong,_atomic_swap_32) 801.2SmattATOMIC_OP_ALIAS(atomic_swap_ptr,_atomic_swap_32) 811.10SmattCRT_ALIAS(__sync_lock_test_and_set_4,_atomic_swap_32) 821.11SjoergCRT_ALIAS(__atomic_exchange_4,_atomic_swap_32) 831.2SmattSTRONG_ALIAS(_atomic_swap_uint,_atomic_swap_32) 841.2SmattSTRONG_ALIAS(_atomic_swap_ulong,_atomic_swap_32) 851.2SmattSTRONG_ALIAS(_atomic_swap_ptr,_atomic_swap_32) 861.2Smatt 871.10Smatt#if (!defined(_KERNEL) || defined(_RUMPKERNEL)) && !defined(_STANDALONE) 881.8SmattENTRY_NP(__sync_lock_release_4) 891.8Smatt mov r1, #0 901.8Smatt strb r1, [r0] 911.8Smatt RET 921.8SmattEND(__sync_lock_release_4) 931.10Smatt#endif 941.8Smatt 951.2SmattENTRY_NP(_atomic_swap_8) 961.7Smatt mov ip, r0 971.3Smatt1: 981.3Smatt#ifdef _ARM_ARCH_6 991.7Smatt ldrexb r0, [ip] 1001.7Smatt strexb r3, r1, [ip] 1011.3Smatt#else 1021.7Smatt swpb r0, r1, [ip] 1031.3Smatt mov r3, #0 1041.3Smatt#endif 1051.3Smatt cmp r3, #0 1061.3Smatt bne 1b 1071.4Smatt#ifdef _ARM_ARCH_7 1081.4Smatt dmb 1091.4Smatt#else 1101.4Smatt mcr p15, 0, ip, c7, c10, 5 /* data memory barrier */ 1111.4Smatt#endif 1121.2Smatt RET 1131.7SmattEND(_atomic_swap_8) 1141.7Smatt 1151.2SmattATOMIC_OP_ALIAS(atomic_swap_8,_atomic_swap_8) 1161.2SmattATOMIC_OP_ALIAS(atomic_swap_char,_atomic_swap_8) 1171.2SmattATOMIC_OP_ALIAS(atomic_swap_uchar,_atomic_swap_8) 1181.10SmattCRT_ALIAS(__sync_lock_test_and_set_1,_atomic_swap_8) 1191.11SjoergCRT_ALIAS(__atomic_exchange_1,_atomic_swap_8) 1201.3SmattSTRONG_ALIAS(_atomic_swap_char,_atomic_swap_8) 1211.3SmattSTRONG_ALIAS(_atomic_swap_uchar,_atomic_swap_8) 1221.8Smatt 1231.10Smatt#if (!defined(_KERNEL) || defined(_RUMPKERNEL)) && !defined(_STANDALONE) 1241.8SmattENTRY_NP(__sync_lock_release_1) 1251.8Smatt mov r1, #0 1261.8Smatt strb r1, [r0] 1271.8Smatt RET 1281.8SmattEND(__sync_lock_release_1) 1291.10Smatt#endif 130