Home | History | Annotate | Line # | Download | only in include
lock.h revision 1.27.12.1
      1 /*	$NetBSD: lock.h,v 1.27.12.1 2008/03/24 07:15:06 keiichi Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2000 Ludd, University of Lule}, Sweden.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *     This product includes software developed at Ludd, University of Lule}.
     18  * 4. The name of the author may not be used to endorse or promote products
     19  *    derived from this software without specific prior written permission
     20  *
     21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #ifndef _VAX_LOCK_H_
     34 #define _VAX_LOCK_H_
     35 
     36 #ifdef _KERNEL
     37 #ifdef _KERNEL_OPT
     38 #include "opt_multiprocessor.h"
     39 #include <machine/intr.h>
     40 #endif
     41 #include <machine/cpu.h>
     42 #endif
     43 
     44 static __inline int
     45 __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
     46 {
     47 	return *__ptr == __SIMPLELOCK_LOCKED;
     48 }
     49 
     50 static __inline int
     51 __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
     52 {
     53 	return *__ptr == __SIMPLELOCK_UNLOCKED;
     54 }
     55 
     56 static __inline void
     57 __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
     58 {
     59 	*__ptr = __SIMPLELOCK_UNLOCKED;
     60 }
     61 
     62 static __inline void
     63 __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
     64 {
     65 	*__ptr = __SIMPLELOCK_LOCKED;
     66 }
     67 
     68 static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *);
     69 static __inline void
     70 __cpu_simple_lock_init(__cpu_simple_lock_t *__alp)
     71 {
     72 #ifdef _KERNEL
     73 	__asm __volatile ("movl %0,%%r1;jsb Sunlock"
     74 		: /* No output */
     75 		: "g"(__alp)
     76 		: "r1","cc","memory");
     77 #else
     78 	__asm __volatile ("bbcci $0,%0,1f;1:"
     79 		: /* No output */
     80 		: "m"(*__alp)
     81 		: "cc");
     82 #endif
     83 }
     84 
     85 static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *);
     86 static __inline int
     87 __cpu_simple_lock_try(__cpu_simple_lock_t *__alp)
     88 {
     89 	int ret;
     90 
     91 #ifdef _KERNEL
     92 	__asm __volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0"
     93 		: "=&r"(ret)
     94 		: "g"(__alp)
     95 		: "r0","r1","cc","memory");
     96 #else
     97 	__asm __volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:"
     98 		: "=&r"(ret)
     99 		: "m"(*__alp)
    100 		: "cc");
    101 #endif
    102 
    103 	return ret;
    104 }
    105 
    106 static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
    107 static __inline void
    108 __cpu_simple_lock(__cpu_simple_lock_t *__alp)
    109 {
    110 #if defined(_KERNEL) && defined(MULTIPROCESSOR)
    111 	struct cpu_info * const __ci = curcpu();
    112 
    113 	while (__cpu_simple_lock_try(__alp) == 0) {
    114 #define	VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB))
    115 		if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) {
    116 			cpu_handle_ipi();
    117 		}
    118 	}
    119 #else /* _KERNEL && MULTIPROCESSOR */
    120 	__asm __volatile ("1:bbssi $0,%0,1b"
    121 		: /* No outputs */
    122 		: "m"(*__alp)
    123 		: "cc");
    124 #endif /* _KERNEL && MULTIPROCESSOR */
    125 }
    126 
    127 static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *);
    128 static __inline void
    129 __cpu_simple_unlock(__cpu_simple_lock_t *__alp)
    130 {
    131 #ifdef _KERNEL
    132 	__asm __volatile ("movl %0,%%r1;jsb Sunlock"
    133 		: /* No output */
    134 		: "g"(__alp)
    135 		: "r1","cc","memory");
    136 #else
    137 	__asm __volatile ("bbcci $0,%0,1f;1:"
    138 		: /* No output */
    139 		: "m"(*__alp)
    140 		: "cc");
    141 #endif
    142 }
    143 
    144 #if defined(MULTIPROCESSOR)
    145 /*
    146  * On the Vax, interprocessor interrupts can come in at device priority
    147  * level or lower. This can cause some problems while waiting for r/w
    148  * spinlocks from a high'ish priority level: IPIs that come in will not
    149  * be processed. This can lead to deadlock.
    150  *
    151  * This hook allows IPIs to be processed while a spinlock's interlock
    152  * is released.
    153  */
    154 #define SPINLOCK_SPIN_HOOK						\
    155 do {									\
    156 	struct cpu_info * const __ci = curcpu();			\
    157 									\
    158 	if (__ci->ci_ipimsgs != 0) {					\
    159 		/* printf("CPU %lu has IPIs pending\n",			\
    160 		    __ci->ci_cpuid); */					\
    161 		cpu_handle_ipi();					\
    162 	}								\
    163 } while (/*CONSTCOND*/0)
    164 #endif /* MULTIPROCESSOR */
    165 
    166 static __inline void mb_read(void);
    167 static __inline void
    168 mb_read(void)
    169 {
    170 }
    171 
    172 static __inline void mb_write(void);
    173 static __inline void
    174 mb_write(void)
    175 {
    176 }
    177 #endif /* _VAX_LOCK_H_ */
    178