lock_stubs.S revision 1.35
1/*	$NetBSD: lock_stubs.S,v 1.35 2019/12/08 20:00:56 ad Exp $	*/
2
3/*
4 * Copyright (c) 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "opt_multiprocessor.h"
33#include "opt_lockdebug.h"
34
35#include <machine/asm.h>
36#include <machine/frameasm.h>
37
38#include "assym.h"
39
40#define LOCK	\
41	HOTPATCH(HP_NAME_NOLOCK, 1)	; \
42	lock
43#define RET	\
44	HOTPATCH(HP_NAME_RETFENCE, 3)	; \
45	ret; nop; nop			; \
46	ret
47
48#ifndef LOCKDEBUG
49
50	.align	64
51
52/*
53 * void mutex_enter(kmutex_t *mtx);
54 *
55 * Acquire a mutex and post a load fence.
56 */
57ENTRY(mutex_enter)
58	movq	CPUVAR(CURLWP), %rcx
59	xorq	%rax, %rax
60	LOCK
61	cmpxchgq %rcx, (%rdi)
62	jnz	1f
63	RET
641:
65	jmp	_C_LABEL(mutex_vector_enter)
66END(mutex_enter)
67
68/*
69 * void mutex_exit(kmutex_t *mtx);
70 *
71 * Release a mutex and post a load fence.
72 *
73 * See comments in mutex_vector_enter() about doing this operation unlocked
74 * on multiprocessor systems, and comments in arch/x86/include/lock.h about
75 * memory ordering on Intel x86 systems.
76 */
77ENTRY(mutex_exit)
78	movq	CPUVAR(CURLWP), %rax
79	xorq	%rdx, %rdx
80	cmpxchgq %rdx, (%rdi)
81	jnz	1f
82	ret
831:
84	jmp	_C_LABEL(mutex_vector_exit)
85END(mutex_exit)
86
87/*
88 * void mutex_spin_enter(kmutex_t *mtx);
89 *
90 * Acquire a spin mutex and post a load fence.
91 */
92ENTRY(mutex_spin_enter)
93	movl	$1, %eax
94	movl	CPUVAR(ILEVEL), %esi
95	movzbl	MTX_IPL(%rdi), %ecx		/* new SPL */
96	cmpl	%ecx, %esi			/* higher? */
97	cmovgl	%esi, %ecx
98	movl	%ecx, CPUVAR(ILEVEL)		/* splraiseipl() */
99	subl	%eax, CPUVAR(MTX_COUNT)		/* decl doesnt set CF */
100	cmovncl	CPUVAR(MTX_OLDSPL), %esi
101	movl	%esi, CPUVAR(MTX_OLDSPL)
102	xchgb	%al, MTX_LOCK(%rdi)		/* lock */
103#ifdef MULTIPROCESSOR	/* XXX for xen */
104	testb	%al, %al
105	jnz	1f
106#endif
107	RET
1081:
109	jmp	_C_LABEL(mutex_spin_retry)	/* failed; hard case */
110END(mutex_spin_enter)
111
112/*
113 * void mutex_spin_exit(kmutex_t *mtx);
114 *
115 * Release a spin mutex and post a load fence.
116 */
117ENTRY(mutex_spin_exit)
118#ifdef DIAGNOSTIC
119
120	movl	$0x0001, %eax			/* new + expected value */
121	movq	CPUVAR(SELF), %r8
122	cmpxchgb %ah, MTX_LOCK(%rdi)		/* unlock */
123	jnz	_C_LABEL(mutex_vector_exit)	/* hard case if problems */
124	movl	CPU_INFO_MTX_OLDSPL(%r8), %edi
125	incl	CPU_INFO_MTX_COUNT(%r8)
126	jnz	1f
127	cmpl	CPU_INFO_ILEVEL(%r8), %edi
128	jae	1f
129#if !defined(XENPV)
130	movl	CPU_INFO_IUNMASK(%r8,%rdi,4), %esi
131	CLI(ax)
132	testl	CPU_INFO_IPENDING(%r8), %esi
133	jnz	_C_LABEL(Xspllower)
134#endif
135#if defined(XEN)
136	movl	CPU_INFO_XUNMASK(%r8,%rdi,4), %esi
137	CLI(ax)
138	testl	CPU_INFO_XPENDING(%r8), %esi
139	jnz	_C_LABEL(Xspllower)
140#endif
141	movl	%edi, CPU_INFO_ILEVEL(%r8)
142	STI(ax)
1431:	rep					/* double byte ret as branch */
144	ret					/* target: see AMD docs */
145
146#else	/* DIAGNOSTIC */
147
148	movq	CPUVAR(SELF), %rsi
149	movb	$0x00, MTX_LOCK(%rdi)
150	movl	CPU_INFO_MTX_OLDSPL(%rsi), %ecx
151	incl	CPU_INFO_MTX_COUNT(%rsi)
152	movl	CPU_INFO_ILEVEL(%rsi),%edx
153	cmovnzl	%edx,%ecx
154	pushq	%rbx
155	cmpl	%edx,%ecx			/* new level is lower? */
156	jae	2f
1571:
158#if !defined(XENPV)
159	movl	CPU_INFO_IPENDING(%rsi),%eax
160	testl	%eax,CPU_INFO_IUNMASK(%rsi,%rcx,4)/* deferred interrupts? */
161	jnz	3f
162	movl	%eax,%ebx
163	cmpxchg8b CPU_INFO_ISTATE(%rsi)		/* swap in new ilevel */
164	jnz	4f
165#endif
166#if defined(XEN)
167	movl	CPU_INFO_XPENDING(%rsi),%eax
168	testl	%eax,CPU_INFO_XUNMASK(%rsi,%rcx,4)/* deferred interrupts? */
169	jnz	3f
170	movl	%edx, %eax
171	cmpxchgl %ecx, CPU_INFO_ILEVEL(%rsi)
172	jnz	4f
173#endif
1742:
175	popq	%rbx
176	ret
1773:
178	popq	%rbx
179	movl	%ecx, %edi
180	jmp	_C_LABEL(Xspllower)
1814:
182	jmp	1b
183
184#endif	/* DIAGNOSTIC */
185
186END(mutex_spin_exit)
187
188/*
189 * void	rw_enter(krwlock_t *rwl, krw_t op);
190 *
191 * Acquire one hold on a RW lock.
192 */
193ENTRY(rw_enter)
194	cmpl	$RW_READER, %esi
195	jne	2f
196
197	/*
198	 * Reader: this is the most common case.
199	 */
200	movq	(%rdi), %rax
2010:
202	testb	$(RW_WRITE_LOCKED|RW_WRITE_WANTED), %al
203	jnz	3f
204	leaq	RW_READ_INCR(%rax), %rdx
205	LOCK
206	cmpxchgq %rdx, (%rdi)
207	jnz	1f
208	RET
2091:
210	jmp	0b
211
212	/*
213	 * Writer: if the compare-and-set fails, don't bother retrying.
214	 */
2152:	movq	CPUVAR(CURLWP), %rcx
216	xorq	%rax, %rax
217	orq	$RW_WRITE_LOCKED, %rcx
218	LOCK
219	cmpxchgq %rcx, (%rdi)
220	jnz	3f
221	RET
2223:
223	jmp	_C_LABEL(rw_vector_enter)
224END(rw_enter)
225
226/*
227 * void	rw_exit(krwlock_t *rwl);
228 *
229 * Release one hold on a RW lock.
230 */
231ENTRY(rw_exit)
232	movq	(%rdi), %rax
233	testb	$RW_WRITE_LOCKED, %al
234	jnz	2f
235
236	/*
237	 * Reader
238	 */
2390:	testb	$RW_HAS_WAITERS, %al
240	jnz	3f
241	cmpq	$RW_READ_INCR, %rax
242	jb	3f
243	leaq	-RW_READ_INCR(%rax), %rdx
244	LOCK
245	cmpxchgq %rdx, (%rdi)
246	jnz	1f
247	ret
2481:
249	jmp	0b
250
251	/*
252	 * Writer
253	 */
2542:	leaq	-RW_WRITE_LOCKED(%rax), %rdx
255	subq	CPUVAR(CURLWP), %rdx
256	jnz	3f
257	LOCK
258	cmpxchgq %rdx, (%rdi)
259	jnz	3f
260	ret
261
2623:	jmp	_C_LABEL(rw_vector_exit)
263END(rw_exit)
264
265/*
266 * int	rw_tryenter(krwlock_t *rwl, krw_t op);
267 *
268 * Try to acquire one hold on a RW lock.
269 */
270ENTRY(rw_tryenter)
271	cmpl	$RW_READER, %esi
272	jne	2f
273
274	/*
275	 * Reader: this is the most common case.
276	 */
277	movq	(%rdi), %rax
2780:
279	testb	$(RW_WRITE_LOCKED|RW_WRITE_WANTED), %al
280	jnz	4f
281	leaq	RW_READ_INCR(%rax), %rdx
282	LOCK
283	cmpxchgq %rdx, (%rdi)
284	jnz	1f
285	movl	%edx, %eax			/* nonzero */
286	RET
2871:
288	jmp	0b
289
290	/*
291	 * Writer: if the compare-and-set fails, don't bother retrying.
292	 */
2932:	movq	CPUVAR(CURLWP), %rcx
294	xorq	%rax, %rax
295	orq	$RW_WRITE_LOCKED, %rcx
296	LOCK
297	cmpxchgq %rcx, (%rdi)
298	movl	$0, %eax
299	setz	%al
3003:
301	RET
302	ret
3034:
304	xorl	%eax, %eax
305	jmp	3b
306END(rw_tryenter)
307
308#endif	/* LOCKDEBUG */
309
310/*
311 * Spinlocks.
312 */
313ENTRY(__cpu_simple_lock_init)
314	movb	$0, (%rdi)
315	ret
316END(__cpu_simple_lock_init)
317
318ENTRY(__cpu_simple_lock)
319	movl	$0x0100, %eax
3201:
321	LOCK
322	cmpxchgb %ah, (%rdi)
323	jnz	2f
324	RET
3252:
326	movl	$0x0100, %eax
327	pause
328	nop
329	nop
330	cmpb	$0, (%rdi)
331	je	1b
332	jmp	2b
333END(__cpu_simple_lock)
334
335ENTRY(__cpu_simple_unlock)
336	movb	$0, (%rdi)
337	ret
338END(__cpu_simple_unlock)
339
340ENTRY(__cpu_simple_lock_try)
341	movl	$0x0100, %eax
342	LOCK
343	cmpxchgb %ah, (%rdi)
344	movl	$0, %eax
345	setz	%al
346	KMSAN_INIT_RET(4)
347	RET
348END(__cpu_simple_lock_try)
349
350