lock.h revision 1.25
1/* $NetBSD: lock.h,v 1.25 2007/09/10 11:34:07 skrll Exp $ */
2
3/*-
4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the NetBSD
22 *	Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 *    contributors may be used to endorse or promote products derived
25 *    from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*
41 * Machine-dependent spin lock operations.
42 */
43
44#ifndef _ALPHA_LOCK_H_
45#define	_ALPHA_LOCK_H_
46
47#ifdef _KERNEL_OPT
48#include "opt_multiprocessor.h"
49#endif
50
51static __inline int
52__SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
53{
54	return *__ptr == __SIMPLELOCK_LOCKED;
55}
56
57static __inline int
58__SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
59{
60	return *__ptr == __SIMPLELOCK_UNLOCKED;
61}
62
63static __inline void
64__cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
65{
66	*__ptr = __SIMPLELOCK_UNLOCKED;
67}
68
69static __inline void
70__cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
71{
72	*__ptr = __SIMPLELOCK_LOCKED;
73}
74
75static __inline void
76__cpu_simple_lock_init(__cpu_simple_lock_t *alp)
77{
78
79	__asm volatile(
80		"# BEGIN __cpu_simple_lock_init\n"
81		"	stl	$31, %0		\n"
82		"	mb			\n"
83		"	# END __cpu_simple_lock_init"
84		: "=m" (*alp));
85}
86
87static __inline void
88__cpu_simple_lock(__cpu_simple_lock_t *alp)
89{
90	unsigned long t0;
91
92	/*
93	 * Note, if we detect that the lock is held when
94	 * we do the initial load-locked, we spin using
95	 * a non-locked load to save the coherency logic
96	 * some work.
97	 */
98
99	__asm volatile(
100		"# BEGIN __cpu_simple_lock\n"
101		"1:	ldl_l	%0, %3		\n"
102		"	bne	%0, 2f		\n"
103		"	bis	$31, %2, %0	\n"
104		"	stl_c	%0, %1		\n"
105		"	beq	%0, 3f		\n"
106		"	mb			\n"
107		"	br	4f		\n"
108		"2:	ldl	%0, %3		\n"
109		"	beq	%0, 1b		\n"
110		"	br	2b		\n"
111		"3:	br	1b		\n"
112		"4:				\n"
113		"	# END __cpu_simple_lock\n"
114		: "=&r" (t0), "=m" (*alp)
115		: "i" (__SIMPLELOCK_LOCKED), "m" (*alp)
116		: "memory");
117}
118
119static __inline int
120__cpu_simple_lock_try(__cpu_simple_lock_t *alp)
121{
122	unsigned long t0, v0;
123
124	__asm volatile(
125		"# BEGIN __cpu_simple_lock_try\n"
126		"1:	ldl_l	%0, %4		\n"
127		"	bne	%0, 2f		\n"
128		"	bis	$31, %3, %0	\n"
129		"	stl_c	%0, %2		\n"
130		"	beq	%0, 3f		\n"
131		"	mb			\n"
132		"	bis	$31, 1, %1	\n"
133		"	br	4f		\n"
134		"2:	bis	$31, $31, %1	\n"
135		"	br	4f		\n"
136		"3:	br	1b		\n"
137		"4:				\n"
138		"	# END __cpu_simple_lock_try"
139		: "=&r" (t0), "=r" (v0), "=m" (*alp)
140		: "i" (__SIMPLELOCK_LOCKED), "m" (*alp)
141		: "memory");
142
143	return (v0 != 0);
144}
145
146static __inline void
147__cpu_simple_unlock(__cpu_simple_lock_t *alp)
148{
149
150	__asm volatile(
151		"# BEGIN __cpu_simple_unlock\n"
152		"	mb			\n"
153		"	stl	$31, %0		\n"
154		"	# END __cpu_simple_unlock"
155		: "=m" (*alp));
156}
157
158#if defined(MULTIPROCESSOR)
159/*
160 * On the Alpha, interprocessor interrupts come in at device priority
161 * level.  This can cause some problems while waiting for r/w spinlocks
162 * from a high'ish priority level: IPIs that come in will not be processed.
163 * This can lead to deadlock.
164 *
165 * This hook allows IPIs to be processed while a spinlock's interlock
166 * is released.
167 */
168#define	SPINLOCK_SPIN_HOOK						\
169do {									\
170	struct cpu_info *__ci = curcpu();				\
171	int __s;							\
172									\
173	if (__ci->ci_ipis != 0) {					\
174		/* printf("CPU %lu has IPIs pending\n",			\
175		    __ci->ci_cpuid); */					\
176		__s = splipi();						\
177		alpha_ipi_process(__ci, NULL);				\
178		splx(__s);						\
179	}								\
180} while (0)
181#define	SPINLOCK_BACKOFF_HOOK	(void)nullop((void *)0)
182#endif /* MULTIPROCESSOR */
183
184static __inline void
185mb_read(void)
186{
187	__asm __volatile("mb" : : : "memory");
188}
189
190static __inline void
191mb_write(void)
192{
193	/* XXX wmb */
194	__asm __volatile("mb" : : : "memory");
195}
196
197static __inline void
198mb_memory(void)
199{
200	__asm __volatile("mb" : : : "memory");
201}
202
203#endif /* _ALPHA_LOCK_H_ */
204