Home | History | Annotate | Line # | Download | only in include
lock.h revision 1.27.2.1
      1  1.27.2.1     yamt /*	$NetBSD: lock.h,v 1.27.2.1 2007/02/27 16:53:08 yamt Exp $ */
      2       1.1       pk 
      3       1.1       pk /*-
      4      1.24       ad  * Copyright (c) 1998, 1999, 2006 The NetBSD Foundation, Inc.
      5       1.1       pk  * All rights reserved.
      6       1.1       pk  *
      7       1.1       pk  * This code is derived from software contributed to The NetBSD Foundation
      8      1.24       ad  * by Paul Kranenburg and Andrew Doran.
      9       1.1       pk  *
     10       1.1       pk  * Redistribution and use in source and binary forms, with or without
     11       1.1       pk  * modification, are permitted provided that the following conditions
     12       1.1       pk  * are met:
     13       1.1       pk  * 1. Redistributions of source code must retain the above copyright
     14       1.1       pk  *    notice, this list of conditions and the following disclaimer.
     15       1.1       pk  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.1       pk  *    notice, this list of conditions and the following disclaimer in the
     17       1.1       pk  *    documentation and/or other materials provided with the distribution.
     18       1.1       pk  * 3. All advertising materials mentioning features or use of this software
     19       1.1       pk  *    must display the following acknowledgement:
     20       1.1       pk  *        This product includes software developed by the NetBSD
     21       1.1       pk  *        Foundation, Inc. and its contributors.
     22       1.1       pk  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23       1.1       pk  *    contributors may be used to endorse or promote products derived
     24       1.1       pk  *    from this software without specific prior written permission.
     25       1.1       pk  *
     26       1.1       pk  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27       1.1       pk  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28       1.1       pk  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29       1.1       pk  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30       1.1       pk  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31       1.1       pk  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32       1.1       pk  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33       1.1       pk  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34       1.1       pk  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35       1.1       pk  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36       1.1       pk  * POSSIBILITY OF SUCH DAMAGE.
     37       1.1       pk  */
     38       1.1       pk 
     39       1.1       pk #ifndef _MACHINE_LOCK_H
     40       1.1       pk #define _MACHINE_LOCK_H
     41       1.1       pk 
     42       1.1       pk /*
     43       1.1       pk  * Machine dependent spin lock operations.
     44       1.1       pk  */
     45       1.5  thorpej 
     46      1.15       pk #if __SIMPLELOCK_UNLOCKED != 0
     47      1.15       pk #error __SIMPLELOCK_UNLOCKED must be 0 for this implementation
     48      1.15       pk #endif
     49      1.15       pk 
     50       1.7  thorpej /* XXX So we can expose this to userland. */
     51      1.10  hannken #ifdef __lint__
     52      1.10  hannken #define __ldstub(__addr)	(__addr)
     53      1.10  hannken #else /* !__lint__ */
     54      1.22    perry static __inline int __ldstub(__cpu_simple_lock_t *addr);
     55      1.22    perry static __inline int __ldstub(__cpu_simple_lock_t *addr)
     56      1.14      mrg {
     57      1.14      mrg 	int v;
     58      1.14      mrg 
     59      1.20    perry 	__asm volatile("ldstub [%1],%0"
     60      1.18      chs 	    : "=&r" (v)
     61      1.14      mrg 	    : "r" (addr)
     62      1.14      mrg 	    : "memory");
     63      1.14      mrg 
     64      1.14      mrg 	return v;
     65      1.14      mrg }
     66      1.10  hannken #endif /* __lint__ */
     67       1.7  thorpej 
     68      1.23      uwe static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *)
     69       1.7  thorpej 	__attribute__((__unused__));
     70      1.23      uwe static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *)
     71       1.7  thorpej 	__attribute__((__unused__));
     72      1.23      uwe static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *)
     73       1.7  thorpej 	__attribute__((__unused__));
     74      1.17       pk #ifndef __CPU_SIMPLE_LOCK_NOINLINE
     75      1.23      uwe static __inline void __cpu_simple_lock(__cpu_simple_lock_t *)
     76      1.15       pk 	__attribute__((__unused__));
     77      1.15       pk #else
     78      1.23      uwe extern void __cpu_simple_lock(__cpu_simple_lock_t *);
     79      1.12       pk #endif
     80      1.12       pk 
     81      1.22    perry static __inline void
     82       1.9  thorpej __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
     83       1.1       pk {
     84       1.2       pk 
     85       1.7  thorpej 	*alp = __SIMPLELOCK_UNLOCKED;
     86       1.1       pk }
     87       1.1       pk 
     88      1.17       pk #ifndef __CPU_SIMPLE_LOCK_NOINLINE
     89      1.22    perry static __inline void
     90       1.9  thorpej __cpu_simple_lock(__cpu_simple_lock_t *alp)
     91       1.1       pk {
     92       1.2       pk 
     93       1.2       pk 	/*
     94       1.7  thorpej 	 * If someone else holds the lock use simple reads until it
     95       1.7  thorpej 	 * is released, then retry the atomic operation. This reduces
     96       1.7  thorpej 	 * memory bus contention because the cache-coherency logic
     97       1.7  thorpej 	 * does not have to broadcast invalidates on the lock while
     98       1.7  thorpej 	 * we spin on it.
     99       1.2       pk 	 */
    100       1.7  thorpej 	while (__ldstub(alp) != __SIMPLELOCK_UNLOCKED) {
    101       1.7  thorpej 		while (*alp != __SIMPLELOCK_UNLOCKED)
    102       1.7  thorpej 			/* spin */ ;
    103       1.2       pk 	}
    104       1.1       pk }
    105      1.17       pk #endif /* __CPU_SIMPLE_LOCK_NOINLINE */
    106       1.1       pk 
    107      1.22    perry static __inline int
    108       1.9  thorpej __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
    109       1.1       pk {
    110       1.2       pk 
    111       1.7  thorpej 	return (__ldstub(alp) == __SIMPLELOCK_UNLOCKED);
    112       1.1       pk }
    113       1.1       pk 
    114      1.22    perry static __inline void
    115       1.9  thorpej __cpu_simple_unlock(__cpu_simple_lock_t *alp)
    116       1.1       pk {
    117       1.2       pk 
    118      1.12       pk 	/*
    119      1.13       pk 	 * Insert compiler barrier to prevent instruction re-ordering
    120      1.13       pk 	 * around the lock release.
    121      1.12       pk 	 */
    122      1.13       pk 	__insn_barrier();
    123      1.13       pk 	*alp = __SIMPLELOCK_UNLOCKED;
    124       1.1       pk }
    125       1.1       pk 
    126      1.24       ad #if defined(__sparc_v9__)
    127      1.25       ad static __inline void
    128      1.24       ad mb_read(void)
    129      1.24       ad {
    130      1.24       ad 	__asm __volatile("membar #LoadLoad" : : : "memory");
    131      1.24       ad }
    132      1.24       ad 
    133      1.25       ad static __inline void
    134      1.24       ad mb_write(void)
    135      1.24       ad {
    136      1.24       ad 	__asm __volatile("" : : : "memory");
    137      1.24       ad }
    138      1.24       ad 
    139      1.25       ad static __inline void
    140      1.24       ad mb_memory(void)
    141      1.24       ad {
    142      1.24       ad 	__asm __volatile("membar #MemIssue" : : : "memory");
    143      1.24       ad }
    144      1.24       ad #else	/* __sparc_v9__ */
    145      1.25       ad static __inline void
    146      1.24       ad mb_read(void)
    147      1.24       ad {
    148      1.24       ad 	static volatile int junk;
    149  1.27.2.1     yamt 	__asm volatile("st %%g0,[%0]"
    150  1.27.2.1     yamt 	    :
    151  1.27.2.1     yamt 	    : "r" (&junk)
    152  1.27.2.1     yamt 	    : "memory");
    153      1.24       ad }
    154      1.24       ad 
    155      1.25       ad static __inline void
    156      1.24       ad mb_write(void)
    157      1.24       ad {
    158      1.24       ad 	__insn_barrier();
    159      1.24       ad }
    160      1.24       ad 
    161      1.25       ad static __inline void
    162      1.24       ad mb_memory(void)
    163      1.24       ad {
    164      1.27       ad 	mb_read();
    165      1.24       ad }
    166      1.24       ad #endif	/* __sparc_v9__ */
    167      1.24       ad 
    168       1.1       pk #endif /* _MACHINE_LOCK_H */
    169