lock.h revision 1.25
11.25Sskrll/* $NetBSD: lock.h,v 1.25 2007/09/10 11:34:07 skrll Exp $ */
21.1Sthorpej
31.1Sthorpej/*-
41.9Sthorpej * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
51.1Sthorpej * All rights reserved.
61.1Sthorpej *
71.1Sthorpej * This code is derived from software contributed to The NetBSD Foundation
81.1Sthorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
91.1Sthorpej * NASA Ames Research Center.
101.1Sthorpej *
111.1Sthorpej * Redistribution and use in source and binary forms, with or without
121.1Sthorpej * modification, are permitted provided that the following conditions
131.1Sthorpej * are met:
141.1Sthorpej * 1. Redistributions of source code must retain the above copyright
151.1Sthorpej *    notice, this list of conditions and the following disclaimer.
161.1Sthorpej * 2. Redistributions in binary form must reproduce the above copyright
171.1Sthorpej *    notice, this list of conditions and the following disclaimer in the
181.1Sthorpej *    documentation and/or other materials provided with the distribution.
191.1Sthorpej * 3. All advertising materials mentioning features or use of this software
201.1Sthorpej *    must display the following acknowledgement:
211.1Sthorpej *	This product includes software developed by the NetBSD
221.1Sthorpej *	Foundation, Inc. and its contributors.
231.1Sthorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
241.1Sthorpej *    contributors may be used to endorse or promote products derived
251.1Sthorpej *    from this software without specific prior written permission.
261.1Sthorpej *
271.1Sthorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
281.1Sthorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
291.1Sthorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
301.1Sthorpej * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
311.1Sthorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
321.1Sthorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
331.1Sthorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
341.1Sthorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
351.1Sthorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
361.1Sthorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
371.1Sthorpej * POSSIBILITY OF SUCH DAMAGE.
381.1Sthorpej */
391.1Sthorpej
401.1Sthorpej/*
411.1Sthorpej * Machine-dependent spin lock operations.
421.1Sthorpej */
431.1Sthorpej
441.4Sthorpej#ifndef _ALPHA_LOCK_H_
451.4Sthorpej#define	_ALPHA_LOCK_H_
461.17Smartin
471.18She#ifdef _KERNEL_OPT
481.17Smartin#include "opt_multiprocessor.h"
491.18She#endif
501.4Sthorpej
511.25Sskrllstatic __inline int
521.25Sskrll__SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
531.25Sskrll{
541.25Sskrll	return *__ptr == __SIMPLELOCK_LOCKED;
551.25Sskrll}
561.25Sskrll
571.25Sskrllstatic __inline int
581.25Sskrll__SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
591.25Sskrll{
601.25Sskrll	return *__ptr == __SIMPLELOCK_UNLOCKED;
611.25Sskrll}
621.25Sskrll
631.25Sskrllstatic __inline void
641.25Sskrll__cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
651.25Sskrll{
661.25Sskrll	*__ptr = __SIMPLELOCK_UNLOCKED;
671.25Sskrll}
681.25Sskrll
691.25Sskrllstatic __inline void
701.25Sskrll__cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
711.25Sskrll{
721.25Sskrll	*__ptr = __SIMPLELOCK_LOCKED;
731.25Sskrll}
741.25Sskrll
751.22Sperrystatic __inline void
761.6Sthorpej__cpu_simple_lock_init(__cpu_simple_lock_t *alp)
771.4Sthorpej{
781.4Sthorpej
791.21Sperry	__asm volatile(
801.5Sthorpej		"# BEGIN __cpu_simple_lock_init\n"
811.4Sthorpej		"	stl	$31, %0		\n"
821.4Sthorpej		"	mb			\n"
831.5Sthorpej		"	# END __cpu_simple_lock_init"
841.5Sthorpej		: "=m" (*alp));
851.4Sthorpej}
861.4Sthorpej
871.22Sperrystatic __inline void
881.6Sthorpej__cpu_simple_lock(__cpu_simple_lock_t *alp)
891.4Sthorpej{
901.4Sthorpej	unsigned long t0;
911.4Sthorpej
921.4Sthorpej	/*
931.4Sthorpej	 * Note, if we detect that the lock is held when
941.4Sthorpej	 * we do the initial load-locked, we spin using
951.4Sthorpej	 * a non-locked load to save the coherency logic
961.4Sthorpej	 * some work.
971.4Sthorpej	 */
981.4Sthorpej
991.21Sperry	__asm volatile(
1001.5Sthorpej		"# BEGIN __cpu_simple_lock\n"
1011.4Sthorpej		"1:	ldl_l	%0, %3		\n"
1021.4Sthorpej		"	bne	%0, 2f		\n"
1031.4Sthorpej		"	bis	$31, %2, %0	\n"
1041.4Sthorpej		"	stl_c	%0, %1		\n"
1051.4Sthorpej		"	beq	%0, 3f		\n"
1061.4Sthorpej		"	mb			\n"
1071.4Sthorpej		"	br	4f		\n"
1081.4Sthorpej		"2:	ldl	%0, %3		\n"
1091.4Sthorpej		"	beq	%0, 1b		\n"
1101.4Sthorpej		"	br	2b		\n"
1111.4Sthorpej		"3:	br	1b		\n"
1121.4Sthorpej		"4:				\n"
1131.5Sthorpej		"	# END __cpu_simple_lock\n"
1141.16Sthorpej		: "=&r" (t0), "=m" (*alp)
1151.16Sthorpej		: "i" (__SIMPLELOCK_LOCKED), "m" (*alp)
1161.16Sthorpej		: "memory");
1171.4Sthorpej}
1181.4Sthorpej
1191.22Sperrystatic __inline int
1201.6Sthorpej__cpu_simple_lock_try(__cpu_simple_lock_t *alp)
1211.4Sthorpej{
1221.4Sthorpej	unsigned long t0, v0;
1231.4Sthorpej
1241.21Sperry	__asm volatile(
1251.5Sthorpej		"# BEGIN __cpu_simple_lock_try\n"
1261.4Sthorpej		"1:	ldl_l	%0, %4		\n"
1271.4Sthorpej		"	bne	%0, 2f		\n"
1281.4Sthorpej		"	bis	$31, %3, %0	\n"
1291.4Sthorpej		"	stl_c	%0, %2		\n"
1301.4Sthorpej		"	beq	%0, 3f		\n"
1311.4Sthorpej		"	mb			\n"
1321.4Sthorpej		"	bis	$31, 1, %1	\n"
1331.4Sthorpej		"	br	4f		\n"
1341.4Sthorpej		"2:	bis	$31, $31, %1	\n"
1351.4Sthorpej		"	br	4f		\n"
1361.4Sthorpej		"3:	br	1b		\n"
1371.4Sthorpej		"4:				\n"
1381.5Sthorpej		"	# END __cpu_simple_lock_try"
1391.16Sthorpej		: "=&r" (t0), "=r" (v0), "=m" (*alp)
1401.16Sthorpej		: "i" (__SIMPLELOCK_LOCKED), "m" (*alp)
1411.16Sthorpej		: "memory");
1421.4Sthorpej
1431.7Ssimonb	return (v0 != 0);
1441.4Sthorpej}
1451.4Sthorpej
1461.22Sperrystatic __inline void
1471.6Sthorpej__cpu_simple_unlock(__cpu_simple_lock_t *alp)
1481.4Sthorpej{
1491.4Sthorpej
1501.21Sperry	__asm volatile(
1511.5Sthorpej		"# BEGIN __cpu_simple_unlock\n"
1521.9Sthorpej		"	mb			\n"
1531.4Sthorpej		"	stl	$31, %0		\n"
1541.5Sthorpej		"	# END __cpu_simple_unlock"
1551.5Sthorpej		: "=m" (*alp));
1561.4Sthorpej}
1571.11Sthorpej
1581.12Sthorpej#if defined(MULTIPROCESSOR)
1591.11Sthorpej/*
1601.11Sthorpej * On the Alpha, interprocessor interrupts come in at device priority
1611.11Sthorpej * level.  This can cause some problems while waiting for r/w spinlocks
1621.11Sthorpej * from a high'ish priority level: IPIs that come in will not be processed.
1631.11Sthorpej * This can lead to deadlock.
1641.11Sthorpej *
1651.15Sthorpej * This hook allows IPIs to be processed while a spinlock's interlock
1661.11Sthorpej * is released.
1671.11Sthorpej */
1681.15Sthorpej#define	SPINLOCK_SPIN_HOOK						\
1691.11Sthorpejdo {									\
1701.11Sthorpej	struct cpu_info *__ci = curcpu();				\
1711.15Sthorpej	int __s;							\
1721.11Sthorpej									\
1731.13Sthorpej	if (__ci->ci_ipis != 0) {					\
1741.13Sthorpej		/* printf("CPU %lu has IPIs pending\n",			\
1751.13Sthorpej		    __ci->ci_cpuid); */					\
1761.15Sthorpej		__s = splipi();						\
1771.13Sthorpej		alpha_ipi_process(__ci, NULL);				\
1781.15Sthorpej		splx(__s);						\
1791.13Sthorpej	}								\
1801.11Sthorpej} while (0)
1811.23Sad#define	SPINLOCK_BACKOFF_HOOK	(void)nullop((void *)0)
1821.12Sthorpej#endif /* MULTIPROCESSOR */
1831.4Sthorpej
1841.24Sadstatic __inline void
1851.23Sadmb_read(void)
1861.23Sad{
1871.23Sad	__asm __volatile("mb" : : : "memory");
1881.23Sad}
1891.23Sad
1901.24Sadstatic __inline void
1911.23Sadmb_write(void)
1921.23Sad{
1931.23Sad	/* XXX wmb */
1941.23Sad	__asm __volatile("mb" : : : "memory");
1951.23Sad}
1961.23Sad
1971.24Sadstatic __inline void
1981.23Sadmb_memory(void)
1991.23Sad{
2001.23Sad	__asm __volatile("mb" : : : "memory");
2011.23Sad}
2021.23Sad
2031.4Sthorpej#endif /* _ALPHA_LOCK_H_ */
204