Home | History | Annotate | Line # | Download | only in include
lock.h revision 1.6.12.1
      1 /* 	$NetBSD: lock.h,v 1.6.12.1 2006/06/21 14:52:09 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, and Matthew Fredette.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*
     41  * Machine-dependent spin lock operations.
     42  */
     43 
     44 #ifndef _HPPA_LOCK_H_
     45 #define	_HPPA_LOCK_H_
     46 
     47 static __inline void
     48 __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
     49 {
     50 	__asm volatile(
     51 		"	; BEGIN __cpu_simple_lock_init\n"
     52 		"	stw	%1, %0		\n"
     53 		"	sync			\n"
     54 		"	; END __cpu_simple_lock_init"
     55 		: "=m" (*alp)
     56 		: "r" (__SIMPLELOCK_UNLOCKED));
     57 }
     58 
     59 static __inline void
     60 __cpu_simple_lock(__cpu_simple_lock_t *alp)
     61 {
     62 	int32_t t0;
     63 
     64 	/*
     65 	 * Note, if we detect that the lock is held when
     66 	 * we do the initial load-clear-word, we spin using
     67 	 * a non-locked load to save the coherency logic
     68 	 * some work.
     69 	 */
     70 
     71 #if 0
     72 	__asm volatile(
     73 		"	; BEGIN __cpu_simple_lock\n"
     74 		"	ldcw		%1, %0		\n"
     75 		"	comb,<>,n	%%r0,%0, 2f	\n"
     76 		"1:	comb,=,n	%%r0,%0, 1b	\n"
     77 		"	ldw		%1, %0		\n"
     78 		"	ldcw		%1, %0		\n"
     79 		"	comb,=,n	%%r0,%0, 1b	\n"
     80 		"	ldw		%1, %0		\n"
     81 		"2:	sync				\n"
     82 		"	; END __cpu_simple_lock\n"
     83 		: "=r" (t0), "+m" (*alp));
     84 #else
     85 	t0 = 1;
     86 #endif
     87 }
     88 
     89 static __inline int
     90 __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
     91 {
     92 	int32_t t0;
     93 
     94 #if 0
     95 	__asm volatile(
     96 		"	; BEGIN __cpu_simple_lock_try\n"
     97 		"	ldcw		%1, %0		\n"
     98 		"	sync				\n"
     99 		"	; END __cpu_simple_lock_try"
    100 		: "=r" (t0), "+m" (*alp));
    101 #else
    102 	t0 = 1;
    103 #endif
    104 	return (t0 != 0);
    105 }
    106 
    107 static __inline void
    108 __cpu_simple_unlock(__cpu_simple_lock_t *alp)
    109 {
    110 	__asm volatile(
    111 		"	; BEGIN __cpu_simple_unlock\n"
    112 		"	sync			\n"
    113 		"	stw	%1, %0		\n"
    114 		"	; END __cpu_simple_unlock"
    115 		: "+m" (*alp)
    116 		: "r" (__SIMPLELOCK_UNLOCKED));
    117 }
    118 
    119 #endif /* _HPPA_LOCK_H_ */
    120