lock_stubs.S revision 1.4
1/*	$NetBSD: lock_stubs.S,v 1.4 2007/08/29 23:38:02 ad Exp $	*/
2
3/*-
4 * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the NetBSD
21 *	Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 *    contributors may be used to endorse or promote products derived
24 *    from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39/*
40 * AMD64 lock stubs.  Calling convention:
41 *
42 * %rdi		arg 1
43 * %rsi		arg 2
44 * %rdx		arg 3
45 * %rax		return value
46 */
47
48#include "opt_multiprocessor.h"
49#include "opt_lockdebug.h"
50
51#include <machine/asm.h>
52#include <machine/intrdefs.h>
53
54#include "assym.h"
55
56#if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
57#define	FULL
58#endif
59
60#if defined(MULTIPROCESSOR)
61#define	LOCK		lock
62#else
63#define	LOCK		/* nothing */
64#endif
65
66#define	END(name,a)	.align	a; LABEL(name)
67
68#ifndef LOCKDEBUG
69
70/*
71 * void mutex_enter(kmutex_t *mtx);
72 *
73 * Acquire a mutex and post a load fence.
74 */
75	.align	64
76
77NENTRY(mutex_enter)				/* 0x0000, 25 bytes */
78	movq	CPUVAR(CURLWP), %rcx
79	xorq	%rax, %rax
80	LOCK
81	cmpxchgq %rcx, MTX_OWNER(%rdi)
82	jnz,pn	_C_LABEL(mutex_vector_enter)
83	ret
84
85/*
86 * void mutex_exit(kmutex_t *mtx);
87 *
88 * Release a mutex and post a load fence.
89 *
90 * See comments in mutex_vector_enter() about doing this operation unlocked
91 * on multiprocessor systems, and comments in arch/x86/include/lock.h about
92 * memory ordering on Intel x86 systems.
93 */
94NENTRY(mutex_exit)				/* 0x0020, 24 bytes */
95	movq	CPUVAR(CURLWP), %rax
96	xorq	%rdx, %rdx
97	cmpxchgq %rdx, MTX_OWNER(%rdi)
98	jnz,pn	_C_LABEL(mutex_vector_exit)
99	ret
100
101/*
102 * void mutex_spin_enter(kmutex_t *mtx);
103 *
104 * Acquire a spin mutex and post a load fence.
105 */
106NENTRY(mutex_spin_enter)
107	movq	CPUVAR(SELF), %r8
108#if defined(FULL)
109	movl	$0x0100, %eax			/* new + expected value */
110#endif
111	movl	CPU_INFO_ILEVEL(%r8), %esi
112	subl	$1, CPU_INFO_MTX_COUNT(%r8)	/* decl doesnt set CF */
113	movzbl	MTX_IPL(%rdi), %ecx		/* new SPL */
114	cmovncl	CPU_INFO_MTX_OLDSPL(%r8), %esi
115	cmpl	%ecx, %esi			/* higher? */
116	movl	%esi, CPU_INFO_MTX_OLDSPL(%r8)
117	cmovgl	%esi, %ecx
118	movl	%ecx, CPU_INFO_ILEVEL(%r8)	/* splraiseipl() */
119#if defined(FULL)
120	LOCK
121	cmpxchgb %ah, MTX_LOCK(%rdi)		/* lock */
122	jnz,pn	_C_LABEL(mutex_spin_retry)	/* failed; hard case */
123#endif
124	ret
125
126/*
127 * void mutex_spin_exit(kmutex_t *mtx);
128 *
129 * Release a spin mutex and post a load fence.
130 */
131NENTRY(mutex_spin_exit)
132#ifdef DIAGNOSTIC
133
134	movl	$0x0001, %eax			/* new + expected value */
135	movq	CPUVAR(SELF), %r8
136	cmpxchgb %ah, MTX_LOCK(%rdi)		/* unlock */
137	jnz,pn	_C_LABEL(mutex_vector_exit)	/* hard case if problems */
138	movl	CPU_INFO_MTX_OLDSPL(%r8), %edi
139	incl	CPU_INFO_MTX_COUNT(%r8)
140	jnz	1f
141	cmpl	CPU_INFO_ILEVEL(%r8), %edi
142	jae	1f
143	movl	CPU_INFO_IUNMASK(%r8,%rdi,4), %esi
144	cli
145	testl	CPU_INFO_IPENDING(%r8), %esi
146	jnz	_C_LABEL(Xspllower)
147	movl	%edi, CPU_INFO_ILEVEL(%r8)
148	sti
1491:	rep					/* double byte ret as branch */
150	ret					/* target: see AMD docs */
151
152#else	/* DIAGNOSTIC */
153
154	movq	CPUVAR(SELF), %rsi
155#ifdef MULTIPROCESSOR
156	movb	$0x00, MTX_LOCK(%rdi)
157#endif
158	movl	CPU_INFO_MTX_OLDSPL(%rsi), %ecx
159	incl	CPU_INFO_MTX_COUNT(%rsi)
160	movl	CPU_INFO_ILEVEL(%rsi),%edx
161	cmovnzl	%edx,%ecx
162	cmpl	%edx,%ecx			/* new level is lower? */
163	pushq	%rbx
164	jae,pn	2f
1651:
166	movl	CPU_INFO_IPENDING(%rsi),%eax
167	testl	%eax,CPU_INFO_IUNMASK(%rsi,%rcx,4)/* deferred interrupts? */
168	movl	%eax,%ebx
169	jnz,pn	3f
170	cmpxchg8b CPU_INFO_ISTATE(%rsi)		/* swap in new ilevel */
171	jnz,pn	1b
1722:
173	popq	%rbx
174	ret
1753:
176	popq	%rbx
177	movl	%ecx, %edi
178	jmp	_C_LABEL(Xspllower)
179
180#endif	/* DIAGNOSTIC */
181
182/*
183 * void	rw_enter(krwlock_t *rwl, krw_t op);
184 *
185 * Acquire one hold on a RW lock.
186 */
187NENTRY(rw_enter)				/* 0x00c0, 62 bytes */
188	cmpl	$RW_READER, %esi
189	jne	2f
190
191	/*
192	 * Reader: this is the most common case.
193	 */
1941:	movq	RW_OWNER(%rdi), %rax
195	testb	$(RW_WRITE_LOCKED|RW_WRITE_WANTED), %al
196	leaq	RW_READ_INCR(%rax), %rdx
197	jnz,pn	_C_LABEL(rw_vector_enter)
198	LOCK
199	cmpxchgq %rdx, RW_OWNER(%rdi)
200	jnz,pn	1b
201	ret
202
203	/*
204	 * Writer: if the compare-and-set fails, don't bother retrying.
205	 */
2062:	movq	CPUVAR(CURLWP), %rcx
207	xorq	%rax, %rax
208	orq	$RW_WRITE_LOCKED, %rcx
209	LOCK
210	cmpxchgq %rcx, RW_OWNER(%rdi)
211	jnz,pn	_C_LABEL(rw_vector_enter)
212	ret
213
214/*
215 * void	rw_exit(krwlock_t *rwl);
216 *
217 * Release one hold on a RW lock.
218 */
219NENTRY(rw_exit)					/* 0x0100, 64 bytes */
220	movq	RW_OWNER(%rdi), %rax
221	testb	$RW_WRITE_LOCKED, %al
222	jnz	2f
223
224	/*
225	 * Reader
226	 */
2271:	testb	$RW_HAS_WAITERS, %al
228	jnz,pn	3f
229	cmpq	$RW_READ_INCR, %rax
230	leaq	-RW_READ_INCR(%rax), %rdx
231	jb,pn	3f
232	LOCK
233	cmpxchgq %rdx, RW_OWNER(%rdi)
234	jnz,pn	1b
235	ret
236
237	/*
238	 * Writer
239	 */
2402:	leaq	-RW_WRITE_LOCKED(%rax), %rdx
241	subq	CPUVAR(CURLWP), %rdx
242	jnz,pn	3f
243	LOCK
244	cmpxchgq %rdx, RW_OWNER(%rdi)
245	jnz	3f
246	ret
247
2483:	jmp	_C_LABEL(rw_vector_exit)
249
250#endif	/* LOCKDEBUG */
251
252/*
253 * int _lock_cas(uintptr_t *val, uintptr_t old, uintptr_t new);
254 *
255 * Perform an atomic compare-and-set operation.
256 */
257NENTRY(_lock_cas)				/* 0x0140, 19 bytes */
258	movq	%rsi, %rax
259	LOCK
260	cmpxchgq %rdx, (%rdi)
261	movq	$0, %rax
262	setz	%al				/* = 1 if success */
263	ret
264
265/*
266 * Memory barrier operations.
267 */
268NENTRY(mb_read)
269	lfence
270	ret
271END(mb_read_end, 8)
272
273NENTRY(mb_write)
274	/* Nothing just yet */
275	ret
276END(mb_write_end, 8)
277
278NENTRY(mb_memory)
279	mfence
280	ret
281END(mb_memory_end, 8)
282
283/*
284 * Make sure code after the ret is properly encoded with nopness
285 * by gas, or could stall newer processors.
286 */
287
288NENTRY(x86_mb_nop)
289	ret
290END(x86_mb_nop_end, 8)
291