11.33Sriastrad/* $NetBSD: lock.h,v 1.33 2022/02/13 13:42:30 riastradh Exp $ */
21.1Sthorpej
31.1Sthorpej/*-
41.9Sthorpej * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
51.1Sthorpej * All rights reserved.
61.1Sthorpej *
71.1Sthorpej * This code is derived from software contributed to The NetBSD Foundation
81.1Sthorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
91.1Sthorpej * NASA Ames Research Center.
101.1Sthorpej *
111.1Sthorpej * Redistribution and use in source and binary forms, with or without
121.1Sthorpej * modification, are permitted provided that the following conditions
131.1Sthorpej * are met:
141.1Sthorpej * 1. Redistributions of source code must retain the above copyright
151.1Sthorpej *    notice, this list of conditions and the following disclaimer.
161.1Sthorpej * 2. Redistributions in binary form must reproduce the above copyright
171.1Sthorpej *    notice, this list of conditions and the following disclaimer in the
181.1Sthorpej *    documentation and/or other materials provided with the distribution.
191.1Sthorpej *
201.1Sthorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
211.1Sthorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
221.1Sthorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
231.1Sthorpej * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
241.1Sthorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
251.1Sthorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
261.1Sthorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
271.1Sthorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
281.1Sthorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
291.1Sthorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
301.1Sthorpej * POSSIBILITY OF SUCH DAMAGE.
311.1Sthorpej */
321.1Sthorpej
331.1Sthorpej/*
341.1Sthorpej * Machine-dependent spin lock operations.
351.1Sthorpej */
361.1Sthorpej
371.4Sthorpej#ifndef _ALPHA_LOCK_H_
381.4Sthorpej#define	_ALPHA_LOCK_H_
391.17Smartin
401.18She#ifdef _KERNEL_OPT
411.17Smartin#include "opt_multiprocessor.h"
421.18She#endif
431.4Sthorpej
441.25Sskrllstatic __inline int
451.29Schristos__SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr)
461.25Sskrll{
471.25Sskrll	return *__ptr == __SIMPLELOCK_LOCKED;
481.25Sskrll}
491.25Sskrll
501.25Sskrllstatic __inline int
511.29Schristos__SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr)
521.25Sskrll{
531.25Sskrll	return *__ptr == __SIMPLELOCK_UNLOCKED;
541.25Sskrll}
551.25Sskrll
561.25Sskrllstatic __inline void
571.25Sskrll__cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
581.25Sskrll{
591.25Sskrll	*__ptr = __SIMPLELOCK_UNLOCKED;
601.25Sskrll}
611.25Sskrll
621.25Sskrllstatic __inline void
631.25Sskrll__cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
641.25Sskrll{
651.25Sskrll	*__ptr = __SIMPLELOCK_LOCKED;
661.25Sskrll}
671.25Sskrll
681.22Sperrystatic __inline void
691.6Sthorpej__cpu_simple_lock_init(__cpu_simple_lock_t *alp)
701.4Sthorpej{
711.4Sthorpej
721.32Sriastrad	*alp = __SIMPLELOCK_UNLOCKED;
731.4Sthorpej}
741.4Sthorpej
751.22Sperrystatic __inline void
761.6Sthorpej__cpu_simple_lock(__cpu_simple_lock_t *alp)
771.4Sthorpej{
781.4Sthorpej	unsigned long t0;
791.4Sthorpej
801.4Sthorpej	/*
811.4Sthorpej	 * Note, if we detect that the lock is held when
821.4Sthorpej	 * we do the initial load-locked, we spin using
831.4Sthorpej	 * a non-locked load to save the coherency logic
841.4Sthorpej	 * some work.
851.4Sthorpej	 */
861.4Sthorpej
871.21Sperry	__asm volatile(
881.5Sthorpej		"# BEGIN __cpu_simple_lock\n"
891.4Sthorpej		"1:	ldl_l	%0, %3		\n"
901.4Sthorpej		"	bne	%0, 2f		\n"
911.4Sthorpej		"	bis	$31, %2, %0	\n"
921.4Sthorpej		"	stl_c	%0, %1		\n"
931.4Sthorpej		"	beq	%0, 3f		\n"
941.4Sthorpej		"	mb			\n"
951.4Sthorpej		"	br	4f		\n"
961.4Sthorpej		"2:	ldl	%0, %3		\n"
971.4Sthorpej		"	beq	%0, 1b		\n"
981.4Sthorpej		"	br	2b		\n"
991.4Sthorpej		"3:	br	1b		\n"
1001.4Sthorpej		"4:				\n"
1011.5Sthorpej		"	# END __cpu_simple_lock\n"
1021.16Sthorpej		: "=&r" (t0), "=m" (*alp)
1031.16Sthorpej		: "i" (__SIMPLELOCK_LOCKED), "m" (*alp)
1041.33Sriastrad		: "cc", "memory");
1051.4Sthorpej}
1061.4Sthorpej
1071.22Sperrystatic __inline int
1081.6Sthorpej__cpu_simple_lock_try(__cpu_simple_lock_t *alp)
1091.4Sthorpej{
1101.4Sthorpej	unsigned long t0, v0;
1111.4Sthorpej
1121.21Sperry	__asm volatile(
1131.5Sthorpej		"# BEGIN __cpu_simple_lock_try\n"
1141.4Sthorpej		"1:	ldl_l	%0, %4		\n"
1151.4Sthorpej		"	bne	%0, 2f		\n"
1161.4Sthorpej		"	bis	$31, %3, %0	\n"
1171.4Sthorpej		"	stl_c	%0, %2		\n"
1181.4Sthorpej		"	beq	%0, 3f		\n"
1191.4Sthorpej		"	mb			\n"
1201.4Sthorpej		"	bis	$31, 1, %1	\n"
1211.4Sthorpej		"	br	4f		\n"
1221.4Sthorpej		"2:	bis	$31, $31, %1	\n"
1231.4Sthorpej		"	br	4f		\n"
1241.4Sthorpej		"3:	br	1b		\n"
1251.4Sthorpej		"4:				\n"
1261.5Sthorpej		"	# END __cpu_simple_lock_try"
1271.16Sthorpej		: "=&r" (t0), "=r" (v0), "=m" (*alp)
1281.16Sthorpej		: "i" (__SIMPLELOCK_LOCKED), "m" (*alp)
1291.33Sriastrad		: "cc", "memory");
1301.4Sthorpej
1311.7Ssimonb	return (v0 != 0);
1321.4Sthorpej}
1331.4Sthorpej
1341.22Sperrystatic __inline void
1351.6Sthorpej__cpu_simple_unlock(__cpu_simple_lock_t *alp)
1361.4Sthorpej{
1371.4Sthorpej
1381.21Sperry	__asm volatile(
1391.5Sthorpej		"# BEGIN __cpu_simple_unlock\n"
1401.9Sthorpej		"	mb			\n"
1411.4Sthorpej		"	stl	$31, %0		\n"
1421.5Sthorpej		"	# END __cpu_simple_unlock"
1431.33Sriastrad		: "=m" (*alp)
1441.33Sriastrad		: /* no inputs */
1451.33Sriastrad		: "memory");
1461.4Sthorpej}
1471.11Sthorpej
1481.12Sthorpej#if defined(MULTIPROCESSOR)
1491.11Sthorpej/*
1501.11Sthorpej * On the Alpha, interprocessor interrupts come in at device priority
1511.31Sthorpej * level (ALPHA_PSL_IPL_CLOCK).  This can cause some problems while
1521.31Sthorpej * waiting for spin locks from a high'ish priority level (like spin
1531.31Sthorpej * mutexes used by the scheduler): IPIs that come in will not be
1541.31Sthorpej * processed. This can lead to deadlock.
1551.11Sthorpej *
1561.31Sthorpej * This hook allows IPIs to be processed while spinning.  Note we only
1571.31Sthorpej * do the special thing if IPIs are blocked (current IPL >= IPL_CLOCK).
1581.31Sthorpej * IPIs will be processed in the normal fashion otherwise, and checking
1591.31Sthorpej * this way ensures that preemption is disabled (i.e. curcpu() is stable).
1601.11Sthorpej */
1611.15Sthorpej#define	SPINLOCK_SPIN_HOOK						\
1621.11Sthorpejdo {									\
1631.31Sthorpej	unsigned long _ipl_ = alpha_pal_rdps() & ALPHA_PSL_IPL_MASK;	\
1641.11Sthorpej									\
1651.31Sthorpej	if (_ipl_ >= ALPHA_PSL_IPL_CLOCK) {				\
1661.31Sthorpej		struct cpu_info *__ci = curcpu();			\
1671.31Sthorpej		if (atomic_load_relaxed(&__ci->ci_ipis) != 0) {		\
1681.31Sthorpej			alpha_ipi_process(__ci, NULL);			\
1691.31Sthorpej		}							\
1701.13Sthorpej	}								\
1711.31Sthorpej} while (/*CONSTCOND*/0)
1721.23Sad#define	SPINLOCK_BACKOFF_HOOK	(void)nullop((void *)0)
1731.12Sthorpej#endif /* MULTIPROCESSOR */
1741.4Sthorpej
1751.4Sthorpej#endif /* _ALPHA_LOCK_H_ */
176