Home | History | Annotate | Line # | Download | only in include
lock.h revision 1.3
      1 /* 	$NetBSD: lock.h,v 1.3 2003/09/20 22:28:19 matt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, and Matthew Fredette.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*
     41  * Machine-dependent spin lock operations.
     42  */
     43 
     44 #ifndef _HPPA_LOCK_H_
     45 #define	_HPPA_LOCK_H_
     46 
     47 #ifdef _KERNEL_OPT
     48 #include "opt_multiprocessor.h"
     49 #endif
     50 
     51 #ifdef	MULTIPROCESSOR
     52 
     53 /*
     54  * Semaphores must be aligned on 16-byte boundaries on the PA-RISC.
     55  */
     56 typedef __volatile struct {
     57 	int32_t sem __attribute__ ((aligned (16)));
     58 } __cpu_simple_lock_t;
     59 
     60 
     61 static __inline void
     62 __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
     63 {
     64 	__asm __volatile(
     65 		"	; BEGIN __cpu_simple_lock_init\n"
     66 		"	stw	%1, %0		\n"
     67 		"	sync			\n"
     68 		"	; END __cpu_simple_lock_init"
     69 		: "=m" (*alp->sem)
     70 		: "r" (__SIMPLELOCK_UNLOCKED));
     71 }
     72 
     73 static __inline void
     74 __cpu_simple_lock(__cpu_simple_lock_t *alp)
     75 {
     76 	int32_t t0;
     77 
     78 	/*
     79 	 * Note, if we detect that the lock is held when
     80 	 * we do the initial load-clear-word, we spin using
     81 	 * a non-locked load to save the coherency logic
     82 	 * some work.
     83 	 */
     84 
     85 	__asm __volatile(
     86 		"	; BEGIN __cpu_simple_lock\n"
     87 		"	ldcw		%1, %0		\n"
     88 		"	comb,<>,n	%%r0,%0, 2f	\n"
     89 		"1:	comb,=,n	%%r0,%0, 1b	\n"
     90 		"	ldw		%1, %0		\n"
     91 		"	ldcw		%1, %0		\n"
     92 		"	comb,=,n	%%r0,%0, 1b	\n"
     93 		"	ldw		%1, %0		\n"
     94 		"2:	sync				\n"
     95 		"	; END __cpu_simple_lock\n"
     96 		: "=r" (t0), "+m" (*alp->sem));
     97 }
     98 
     99 static __inline int
    100 __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
    101 {
    102 	int32_t t0;
    103 
    104 	__asm __volatile(
    105 		"	; BEGIN __cpu_simple_lock_try\n"
    106 		"	ldcw		%1, %0		\n"
    107 		"	sync				\n"
    108 		"	; END __cpu_simple_lock_try"
    109 		: "=r" (t0), "+m" (*alp->sem));
    110 	return (t0 != 0);
    111 }
    112 
    113 static __inline void
    114 __cpu_simple_unlock(__cpu_simple_lock_t *alp)
    115 {
    116 	__asm __volatile(
    117 		"	; BEGIN __cpu_simple_unlock\n"
    118 		"	sync			\n"
    119 		"	stw	%1, %0		\n"
    120 		"	; END __cpu_simple_unlock"
    121 		: "+m" (*alp->sem)
    122 		: "r" (__SIMPLELOCK_UNLOCKED));
    123 }
    124 
    125 #else	/* !MULTIPROCESSOR */
    126 
    127 typedef	__volatile int		__cpu_simple_lock_t;
    128 
    129 #define	__SIMPLELOCK_LOCKED	1
    130 #define	__SIMPLELOCK_UNLOCKED	0
    131 
    132 #endif	/* !MULTIPROCESSOR */
    133 
    134 #endif /* _HPPA_LOCK_H_ */
    135