11.19Sskrll/*	$NetBSD: atomic_swap.S,v 1.19 2021/07/28 07:32:20 skrll Exp $	*/
21.2Smatt
31.2Smatt/*-
41.3Smatt * Copyright (c) 2007,2012 The NetBSD Foundation, Inc.
51.2Smatt * All rights reserved.
61.2Smatt *
71.2Smatt * This code is derived from software contributed to The NetBSD Foundation
81.2Smatt * by Jason R. Thorpe and Matt Thomas.
91.2Smatt *
101.2Smatt * Redistribution and use in source and binary forms, with or without
111.2Smatt * modification, are permitted provided that the following conditions
121.2Smatt * are met:
131.2Smatt * 1. Redistributions of source code must retain the above copyright
141.2Smatt *    notice, this list of conditions and the following disclaimer.
151.2Smatt * 2. Redistributions in binary form must reproduce the above copyright
161.2Smatt *    notice, this list of conditions and the following disclaimer in the
171.2Smatt *    documentation and/or other materials provided with the distribution.
181.16Sskrll *
191.2Smatt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
201.2Smatt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
211.2Smatt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
221.2Smatt * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
231.2Smatt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
241.2Smatt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
251.2Smatt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
261.2Smatt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
271.2Smatt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
281.2Smatt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
291.2Smatt * POSSIBILITY OF SUCH DAMAGE.
301.2Smatt */
311.2Smatt
321.2Smatt#include "atomic_op_asm.h"
331.2Smatt
341.3Smatt/*
351.3Smatt * While SWP{B} is sufficient on its own for pre-ARMv7 CPUs, on MP ARMv7 cores
361.3Smatt * SWP{B} is disabled since it's no longer atomic among multiple CPUs.  They
371.3Smatt * will actually raise an UNDEFINED exception.
381.3Smatt *
391.3Smatt * So if we use the LDREX/STREX template, but use a SWP instruction followed
401.3Smatt * by a MOV instruction (using a temporary register), that gives a handler
411.3Smatt * for the SWP UNDEFINED exception enough information to "patch" this instance
421.3Smatt * SWP with correct forms of LDREX/STREX.  (note that this would happen even
431.3Smatt * "read-only" pages.  If the page gets tossed, we will get another exception
441.3Smatt * and fix yet again).
451.3Smatt */
461.3Smatt
471.19SskrllENTRY_NP(__sync_lock_test_and_set_4)
481.19Sskrll	DMB
491.19Sskrll	/* FALLTHROUGH */
501.19Sskrll
511.2SmattENTRY_NP(_atomic_swap_32)
521.6Smatt	mov	ip, r0
531.3Smatt1:
541.18Sskrll#if defined(_ARM_ARCH_6)
551.6Smatt	ldrex	r0, [ip]
561.4Smatt	cmp	r0, r1
571.6Smatt#ifdef __thumb__
581.6Smatt	beq	99f
591.6Smatt	strex	r3, r1, [ip]
601.6Smatt	cmp	r3, #0
611.3Smatt#else
621.6Smatt	strexne	r3, r1, [ip]
631.6Smatt	cmpne	r3, #0
641.6Smatt#endif
651.6Smatt#else
661.6Smatt	swp	r0, r1, [ip]
671.4Smatt	cmp	r0, r1
681.9Smatt	movsne	r3, #0
691.6Smatt	cmpne	r3, #0
701.3Smatt#endif
711.3Smatt	bne	1b
721.6Smatt99:
731.2Smatt	RET
741.7SmattEND(_atomic_swap_32)
751.19SskrllEND(__sync_lock_test_and_set_4)
761.7Smatt
771.2SmattATOMIC_OP_ALIAS(atomic_swap_32,_atomic_swap_32)
781.2SmattATOMIC_OP_ALIAS(atomic_swap_uint,_atomic_swap_32)
791.2SmattATOMIC_OP_ALIAS(atomic_swap_ulong,_atomic_swap_32)
801.2SmattATOMIC_OP_ALIAS(atomic_swap_ptr,_atomic_swap_32)
811.11SjoergCRT_ALIAS(__atomic_exchange_4,_atomic_swap_32)
821.2SmattSTRONG_ALIAS(_atomic_swap_uint,_atomic_swap_32)
831.2SmattSTRONG_ALIAS(_atomic_swap_ulong,_atomic_swap_32)
841.2SmattSTRONG_ALIAS(_atomic_swap_ptr,_atomic_swap_32)
851.2Smatt
861.14Sjustin#if (!defined(_KERNEL) || !defined(_RUMPKERNEL)) && !defined(_STANDALONE)
871.8SmattENTRY_NP(__sync_lock_release_4)
881.8Smatt	mov	r1, #0
891.19Sskrll	DMB
901.19Sskrll
911.15Sskrll	str	r1, [r0]
921.8Smatt	RET
931.8SmattEND(__sync_lock_release_4)
941.10Smatt#endif
951.8Smatt
961.19SskrllENTRY_NP(__sync_lock_test_and_set_1)
971.19Sskrll	DMB
981.19Sskrll	/* FALLTHROUGH */
991.19Sskrll
1001.2SmattENTRY_NP(_atomic_swap_8)
1011.7Smatt	mov	ip, r0
1021.3Smatt1:
1031.18Sskrll#if defined(_ARM_ARCH_6)
1041.7Smatt	ldrexb	r0, [ip]
1051.7Smatt	strexb	r3, r1, [ip]
1061.3Smatt#else
1071.7Smatt	swpb	r0, r1, [ip]
1081.3Smatt	mov	r3, #0
1091.3Smatt#endif
1101.3Smatt	cmp	r3, #0
1111.3Smatt	bne	1b
1121.2Smatt	RET
1131.7SmattEND(_atomic_swap_8)
1141.19SskrllEND(__sync_lock_test_and_set_1)
1151.7Smatt
1161.2SmattATOMIC_OP_ALIAS(atomic_swap_8,_atomic_swap_8)
1171.2SmattATOMIC_OP_ALIAS(atomic_swap_char,_atomic_swap_8)
1181.2SmattATOMIC_OP_ALIAS(atomic_swap_uchar,_atomic_swap_8)
1191.11SjoergCRT_ALIAS(__atomic_exchange_1,_atomic_swap_8)
1201.3SmattSTRONG_ALIAS(_atomic_swap_char,_atomic_swap_8)
1211.3SmattSTRONG_ALIAS(_atomic_swap_uchar,_atomic_swap_8)
1221.8Smatt
1231.14Sjustin#if (!defined(_KERNEL) || !defined(_RUMPKERNEL)) && !defined(_STANDALONE)
1241.8SmattENTRY_NP(__sync_lock_release_1)
1251.8Smatt	mov	r1, #0
1261.19Sskrll	DMB
1271.19Sskrll
1281.8Smatt	strb	r1, [r0]
1291.8Smatt	RET
1301.8SmattEND(__sync_lock_release_1)
1311.10Smatt#endif
132