Home | History | Annotate | Line # | Download | only in include
lock.h revision 1.17
      1 /*	$NetBSD: lock.h,v 1.17 2007/09/26 20:59:59 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000, 2006 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe and Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Machine-dependent spin lock operations.
     41  */
     42 
     43 #ifndef _X86_LOCK_H_
     44 #define	_X86_LOCK_H_
     45 
     46 #if defined(_KERNEL_OPT)
     47 #include "opt_lockdebug.h"
     48 #endif
     49 
     50 #ifdef _KERNEL
     51 #include <machine/cpufunc.h>
     52 #endif
     53 
     54 static __inline int
     55 __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
     56 {
     57 	return *__ptr == __SIMPLELOCK_LOCKED;
     58 }
     59 
     60 static __inline int
     61 __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
     62 {
     63 	return *__ptr == __SIMPLELOCK_UNLOCKED;
     64 }
     65 
     66 static __inline void
     67 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
     68 {
     69 
     70 	*__ptr = __SIMPLELOCK_LOCKED;
     71 }
     72 
     73 static __inline void
     74 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
     75 {
     76 
     77 	*__ptr = __SIMPLELOCK_UNLOCKED;
     78 }
     79 
     80 #ifdef LOCKDEBUG
     81 
     82 extern void __cpu_simple_lock_init(__cpu_simple_lock_t *);
     83 extern void __cpu_simple_lock(__cpu_simple_lock_t *);
     84 extern int __cpu_simple_lock_try(__cpu_simple_lock_t *);
     85 extern void __cpu_simple_unlock(__cpu_simple_lock_t *);
     86 
     87 #else
     88 
     89 #include <machine/atomic.h>
     90 
     91 static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *)
     92 	__attribute__((__unused__));
     93 static __inline void __cpu_simple_lock(__cpu_simple_lock_t *)
     94 	__attribute__((__unused__));
     95 static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *)
     96 	__attribute__((__unused__));
     97 static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *)
     98 	__attribute__((__unused__));
     99 
    100 static __inline void
    101 __cpu_simple_lock_init(__cpu_simple_lock_t *lockp)
    102 {
    103 
    104 	*lockp = __SIMPLELOCK_UNLOCKED;
    105 	__insn_barrier();
    106 }
    107 
    108 static __inline void
    109 __cpu_simple_lock(__cpu_simple_lock_t *lockp)
    110 {
    111 
    112 	while (x86_atomic_testset_b(lockp, __SIMPLELOCK_LOCKED)
    113 	    != __SIMPLELOCK_UNLOCKED) {
    114 		do {
    115 #ifdef _KERNEL
    116 			x86_pause();
    117 #endif /* _KERNEL */
    118 		} while (*lockp == __SIMPLELOCK_LOCKED);
    119 	}
    120 	__insn_barrier();
    121 }
    122 
    123 static __inline int
    124 __cpu_simple_lock_try(__cpu_simple_lock_t *lockp)
    125 {
    126 	int r = (x86_atomic_testset_b(lockp, __SIMPLELOCK_LOCKED)
    127 	    == __SIMPLELOCK_UNLOCKED);
    128 
    129 	__insn_barrier();
    130 
    131 	return (r);
    132 }
    133 
    134 /*
    135  * Note on x86 memory ordering
    136  *
    137  * When releasing a lock we must ensure that no stores or loads from within
    138  * the critical section are re-ordered by the CPU to occur outside of it:
    139  * they must have completed and be visible to other processors once the lock
    140  * has been released.
    141  *
    142  * NetBSD usually runs with the kernel mapped (via MTRR) in a WB (write
    143  * back) memory region.  In that case, memory ordering on x86 platforms
    144  * looks like this:
    145  *
    146  * i386		All loads/stores occur in instruction sequence.
    147  *
    148  * i486		All loads/stores occur in instruction sequence.  In
    149  * Pentium	exceptional circumstances, loads can be re-ordered around
    150  *		stores, but for the purposes of releasing a lock it does
    151  *		not matter.  Stores may not be immediately visible to other
    152  *		processors as they can be buffered.  However, since the
    153  *		stores are buffered in order the lock release will always be
    154  *		the last operation in the critical section that becomes
    155  *		visible to other CPUs.
    156  *
    157  * Pentium Pro	The "Intel 64 and IA-32 Architectures Software Developer's
    158  * onwards	Manual" volume 3A (order number 248966) says that (1) "Reads
    159  *		can be carried out speculatively and in any order" and (2)
    160  *		"Reads can pass buffered stores, but the processor is
    161  *		self-consistent.".  This would be a problem for the below,
    162  *		and would mandate a locked instruction cycle or load fence
    163  *		before releasing the simple lock.
    164  *
    165  *		The "Intel Pentium 4 Processor Optimization" guide (order
    166  *		number 253668-022US) says: "Loads can be moved before stores
    167  *		that occurred earlier in the program if they are not
    168  *		predicted to load from the same linear address.".  This is
    169  *		not a problem since the only loads that can be re-ordered
    170  *		take place once the lock has been released via a store.
    171  *
    172  *		The above two documents seem to contradict each other,
    173  *		however with the exception of early steppings of the Pentium
    174  *		Pro, the second document is closer to the truth: a store
    175  *		will always act as a load fence for all loads that precede
    176  *		the store in instruction order.
    177  *
    178  *		Again, note that stores can be buffered and will not always
    179  *		become immediately visible to other CPUs: they are however
    180  *		buffered in order.
    181  *
    182  * AMD64	Stores occur in order and are buffered.  Loads can be
    183  *		reordered, however stores act as load fences, meaning that
    184  *		loads can not be reordered around stores.
    185  */
    186 static __inline void
    187 __cpu_simple_unlock(__cpu_simple_lock_t *lockp)
    188 {
    189 
    190 	__insn_barrier();
    191 	*lockp = __SIMPLELOCK_UNLOCKED;
    192 }
    193 
    194 #endif /* !LOCKDEBUG */
    195 
    196 #define	SPINLOCK_SPIN_HOOK	/* nothing */
    197 #define	SPINLOCK_BACKOFF_HOOK	x86_pause()
    198 
    199 #ifdef _KERNEL
    200 void	mb_read(void);
    201 void	mb_write(void);
    202 void	mb_memory(void);
    203 #endif	/* _KERNEL */
    204 
    205 #endif /* _X86_LOCK_H_ */
    206