lock_stubs.S revision 1.37
1/*	$NetBSD: lock_stubs.S,v 1.37 2022/09/07 00:40:18 knakahara Exp $	*/
2
3/*
4 * Copyright (c) 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "opt_multiprocessor.h"
33#include "opt_lockdebug.h"
34
35#include <machine/asm.h>
36#include <machine/frameasm.h>
37
38#include "assym.h"
39
40#define LOCK	\
41	HOTPATCH(HP_NAME_NOLOCK, 1)	; \
42	lock
43#define RET	\
44	HOTPATCH(HP_NAME_RETFENCE, 3)	; \
45	ret; nop; nop			; \
46	ret
47
48#ifndef LOCKDEBUG
49
50	.align	64
51
52/*
53 * void mutex_enter(kmutex_t *mtx);
54 *
55 * Acquire a mutex and post a load fence.
56 */
57ENTRY(mutex_enter)
58	movq	CPUVAR(CURLWP), %rcx
59	xorq	%rax, %rax
60	LOCK
61	cmpxchgq %rcx, (%rdi)
62	jnz	1f
63	RET
641:
65	jmp	_C_LABEL(mutex_vector_enter)
66END(mutex_enter)
67
68/*
69 * void mutex_exit(kmutex_t *mtx);
70 *
71 * Release a mutex and post a load fence.
72 *
73 * See comments in mutex_vector_enter() about doing this operation unlocked
74 * on multiprocessor systems, and comments in arch/x86/include/lock.h about
75 * memory ordering on Intel x86 systems.
76 */
77ENTRY(mutex_exit)
78	movq	CPUVAR(CURLWP), %rax
79	xorq	%rdx, %rdx
80	cmpxchgq %rdx, (%rdi)
81	jnz	1f
82	ret
831:
84	jmp	_C_LABEL(mutex_vector_exit)
85END(mutex_exit)
86
87/*
88 * void mutex_spin_enter(kmutex_t *mtx);
89 *
90 * Acquire a spin mutex and post a load fence.
91 */
92ENTRY(mutex_spin_enter)
93	movl	$1, %eax
94	movzbl	CPUVAR(ILEVEL), %esi
95	movzbl	MTX_IPL(%rdi), %ecx		/* new SPL */
96	cmpl	%ecx, %esi			/* higher? */
97	cmovgl	%esi, %ecx
98	movb	%cl, CPUVAR(ILEVEL)		/* splraiseipl() */
99	subl	%eax, CPUVAR(MTX_COUNT)		/* decl doesnt set CF */
100	cmovncl	CPUVAR(MTX_OLDSPL), %esi
101	movl	%esi, CPUVAR(MTX_OLDSPL)
102	xchgb	%al, MTX_LOCK(%rdi)		/* lock */
103#ifdef MULTIPROCESSOR	/* XXX for xen */
104	testb	%al, %al
105	jnz	1f
106#endif
107	RET
1081:
109	jmp	_C_LABEL(mutex_spin_retry)	/* failed; hard case */
110END(mutex_spin_enter)
111
112/*
113 * void mutex_spin_exit(kmutex_t *mtx);
114 *
115 * Release a spin mutex and post a load fence.
116 */
117ENTRY(mutex_spin_exit)
118#ifdef DIAGNOSTIC
119
120	movl	$0x0001, %eax			/* new + expected value */
121	movq	CPUVAR(SELF), %r8
122	cmpxchgb %ah, MTX_LOCK(%rdi)		/* unlock */
123	jnz	_C_LABEL(mutex_vector_exit)	/* hard case if problems */
124	movl	CPU_INFO_MTX_OLDSPL(%r8), %edi
125	incl	CPU_INFO_MTX_COUNT(%r8)
126	jnz	1f
127	cmpl	CPU_INFO_ILEVEL(%r8), %edi
128	jae	1f
129	movl	CPU_INFO_IUNMASK(%r8,%rdi,4), %esi
130	CLI(ax)
131	testl	CPU_INFO_IPENDING(%r8), %esi
132	jnz	_C_LABEL(Xspllower)
133	movl	%edi, CPU_INFO_ILEVEL(%r8)
134	STI(ax)
1351:	rep					/* double byte ret as branch */
136	ret					/* target: see AMD docs */
137
138#else	/* DIAGNOSTIC */
139
140	movq	CPUVAR(SELF), %rsi
141	movb	$0x00, MTX_LOCK(%rdi)
142	movl	CPU_INFO_MTX_OLDSPL(%rsi), %ecx
143	incl	CPU_INFO_MTX_COUNT(%rsi)
144	movl	CPU_INFO_ILEVEL(%rsi),%edx
145	cmovnzl	%edx,%ecx
146	pushq	%rbx
147	cmpl	%edx,%ecx			/* new level is lower? */
148	jae	2f
1491:
150	movl	CPU_INFO_IPENDING(%rsi),%eax
151	testl	%eax,CPU_INFO_IUNMASK(%rsi,%rcx,4)/* deferred interrupts? */
152	jnz	3f
153	movl	%eax,%ebx
154	cmpxchg8b CPU_INFO_ISTATE(%rsi)		/* swap in new ilevel */
155	jnz	4f
1562:
157	popq	%rbx
158	ret
1593:
160	popq	%rbx
161	movl	%ecx, %edi
162	jmp	_C_LABEL(Xspllower)
1634:
164	jmp	1b
165
166#endif	/* DIAGNOSTIC */
167
168END(mutex_spin_exit)
169
170/*
171 * void	rw_enter(krwlock_t *rwl, krw_t op);
172 *
173 * Acquire one hold on a RW lock.
174 */
175ENTRY(rw_enter)
176	cmpl	$RW_READER, %esi
177	jne	2f
178
179	/*
180	 * Reader: this is the most common case.
181	 */
182	movq	(%rdi), %rax
1830:
184	testb	$(RW_WRITE_LOCKED|RW_WRITE_WANTED), %al
185	jnz	3f
186	leaq	RW_READ_INCR(%rax), %rdx
187	LOCK
188	cmpxchgq %rdx, (%rdi)
189	jnz	1f
190	RET
1911:
192	jmp	0b
193
194	/*
195	 * Writer: if the compare-and-set fails, don't bother retrying.
196	 */
1972:	movq	CPUVAR(CURLWP), %rcx
198	xorq	%rax, %rax
199	orq	$RW_WRITE_LOCKED, %rcx
200	LOCK
201	cmpxchgq %rcx, (%rdi)
202	jnz	3f
203	RET
2043:
205	jmp	_C_LABEL(rw_vector_enter)
206END(rw_enter)
207
208/*
209 * void	rw_exit(krwlock_t *rwl);
210 *
211 * Release one hold on a RW lock.
212 */
213ENTRY(rw_exit)
214	movq	(%rdi), %rax
215	testb	$RW_WRITE_LOCKED, %al
216	jnz	2f
217
218	/*
219	 * Reader
220	 */
2210:	testb	$RW_HAS_WAITERS, %al
222	jnz	3f
223	cmpq	$RW_READ_INCR, %rax
224	jb	3f
225	leaq	-RW_READ_INCR(%rax), %rdx
226	LOCK
227	cmpxchgq %rdx, (%rdi)
228	jnz	1f
229	ret
2301:
231	jmp	0b
232
233	/*
234	 * Writer
235	 */
2362:	leaq	-RW_WRITE_LOCKED(%rax), %rdx
237	subq	CPUVAR(CURLWP), %rdx
238	jnz	3f
239	LOCK
240	cmpxchgq %rdx, (%rdi)
241	jnz	3f
242	ret
243
2443:	jmp	_C_LABEL(rw_vector_exit)
245END(rw_exit)
246
247/*
248 * int	rw_tryenter(krwlock_t *rwl, krw_t op);
249 *
250 * Try to acquire one hold on a RW lock.
251 */
252ENTRY(rw_tryenter)
253	cmpl	$RW_READER, %esi
254	jne	2f
255
256	/*
257	 * Reader: this is the most common case.
258	 */
259	movq	(%rdi), %rax
2600:
261	testb	$(RW_WRITE_LOCKED|RW_WRITE_WANTED), %al
262	jnz	4f
263	leaq	RW_READ_INCR(%rax), %rdx
264	LOCK
265	cmpxchgq %rdx, (%rdi)
266	jnz	1f
267	movl	%edx, %eax			/* nonzero */
268	RET
2691:
270	jmp	0b
271
272	/*
273	 * Writer: if the compare-and-set fails, don't bother retrying.
274	 */
2752:	movq	CPUVAR(CURLWP), %rcx
276	xorq	%rax, %rax
277	orq	$RW_WRITE_LOCKED, %rcx
278	LOCK
279	cmpxchgq %rcx, (%rdi)
280	movl	$0, %eax
281	setz	%al
2823:
283	RET
284	ret
2854:
286	xorl	%eax, %eax
287	jmp	3b
288END(rw_tryenter)
289
290#endif	/* LOCKDEBUG */
291
292/*
293 * Spinlocks.
294 */
295ENTRY(__cpu_simple_lock_init)
296	movb	$0, (%rdi)
297	ret
298END(__cpu_simple_lock_init)
299
300ENTRY(__cpu_simple_lock)
301	movl	$0x0100, %eax
3021:
303	LOCK
304	cmpxchgb %ah, (%rdi)
305	jnz	2f
306	RET
3072:
308	movl	$0x0100, %eax
309	pause
310	nop
311	nop
312	cmpb	$0, (%rdi)
313	je	1b
314	jmp	2b
315END(__cpu_simple_lock)
316
317ENTRY(__cpu_simple_unlock)
318	movb	$0, (%rdi)
319	ret
320END(__cpu_simple_unlock)
321
322ENTRY(__cpu_simple_lock_try)
323	movl	$0x0100, %eax
324	LOCK
325	cmpxchgb %ah, (%rdi)
326	movl	$0, %eax
327	setz	%al
328	KMSAN_INIT_RET(4)
329	RET
330END(__cpu_simple_lock_try)
331
332