Home | History | Annotate | Line # | Download | only in include
lock.h revision 1.25.20.2
      1  1.25.20.2      matt /*	lock.h,v 1.25.20.1 2007/11/06 23:23:11 matt Exp	*/
      2        1.1     ragge 
      3        1.1     ragge /*
      4        1.1     ragge  * Copyright (c) 2000 Ludd, University of Lule}, Sweden.
      5        1.1     ragge  * All rights reserved.
      6        1.1     ragge  *
      7        1.1     ragge  * Redistribution and use in source and binary forms, with or without
      8        1.1     ragge  * modification, are permitted provided that the following conditions
      9        1.1     ragge  * are met:
     10        1.1     ragge  * 1. Redistributions of source code must retain the above copyright
     11        1.1     ragge  *    notice, this list of conditions and the following disclaimer.
     12        1.1     ragge  * 2. Redistributions in binary form must reproduce the above copyright
     13        1.1     ragge  *    notice, this list of conditions and the following disclaimer in the
     14        1.1     ragge  *    documentation and/or other materials provided with the distribution.
     15        1.1     ragge  * 3. All advertising materials mentioning features or use of this software
     16        1.1     ragge  *    must display the following acknowledgement:
     17        1.1     ragge  *     This product includes software developed at Ludd, University of Lule}.
     18        1.1     ragge  * 4. The name of the author may not be used to endorse or promote products
     19        1.1     ragge  *    derived from this software without specific prior written permission
     20        1.1     ragge  *
     21        1.1     ragge  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22        1.1     ragge  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     23        1.1     ragge  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     24        1.1     ragge  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     25        1.1     ragge  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     26        1.1     ragge  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     27        1.1     ragge  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28        1.1     ragge  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     29        1.1     ragge  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     30        1.1     ragge  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     31        1.1     ragge  */
     32        1.1     ragge 
     33        1.1     ragge #ifndef _VAX_LOCK_H_
     34        1.1     ragge #define _VAX_LOCK_H_
     35       1.11      matt 
     36       1.11      matt #ifdef _KERNEL
     37       1.13        he #ifdef _KERNEL_OPT
     38       1.12    martin #include "opt_multiprocessor.h"
     39       1.16        he #include <machine/intr.h>
     40       1.13        he #endif
     41       1.11      matt #include <machine/cpu.h>
     42       1.11      matt #endif
     43        1.3     ragge 
     44  1.25.20.1      matt static __inline int
     45  1.25.20.1      matt __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
     46  1.25.20.1      matt {
     47  1.25.20.1      matt 	return *__ptr == __SIMPLELOCK_LOCKED;
     48  1.25.20.1      matt }
     49  1.25.20.1      matt 
     50  1.25.20.1      matt static __inline int
     51  1.25.20.1      matt __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
     52  1.25.20.1      matt {
     53  1.25.20.1      matt 	return *__ptr == __SIMPLELOCK_UNLOCKED;
     54  1.25.20.1      matt }
     55  1.25.20.1      matt 
     56  1.25.20.1      matt static __inline void
     57  1.25.20.1      matt __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
     58  1.25.20.1      matt {
     59  1.25.20.1      matt 	*__ptr = __SIMPLELOCK_UNLOCKED;
     60  1.25.20.1      matt }
     61  1.25.20.1      matt 
     62  1.25.20.1      matt static __inline void
     63  1.25.20.1      matt __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
     64  1.25.20.1      matt {
     65  1.25.20.1      matt 	*__ptr = __SIMPLELOCK_LOCKED;
     66  1.25.20.1      matt }
     67  1.25.20.1      matt 
     68       1.24  christos static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *);
     69       1.24  christos static __inline void
     70       1.24  christos __cpu_simple_lock_init(__cpu_simple_lock_t *__alp)
     71        1.1     ragge {
     72        1.9      matt #ifdef _KERNEL
     73  1.25.20.2      matt 	__asm __volatile ("movl %0,%%r1;jsb Sunlock"
     74        1.6     ragge 		: /* No output */
     75       1.24  christos 		: "g"(__alp)
     76        1.6     ragge 		: "r1","cc","memory");
     77        1.9      matt #else
     78  1.25.20.2      matt 	__asm __volatile ("bbcci $0,%0,1f;1:"
     79        1.6     ragge 		: /* No output */
     80       1.24  christos 		: "m"(*__alp)
     81        1.9      matt 		: "cc");
     82        1.6     ragge #endif
     83        1.1     ragge }
     84        1.1     ragge 
     85       1.24  christos static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *);
     86       1.24  christos static __inline int
     87       1.24  christos __cpu_simple_lock_try(__cpu_simple_lock_t *__alp)
     88        1.7     ragge {
     89        1.7     ragge 	int ret;
     90        1.7     ragge 
     91        1.9      matt #ifdef _KERNEL
     92  1.25.20.2      matt 	__asm __volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0"
     93        1.7     ragge 		: "=&r"(ret)
     94       1.24  christos 		: "g"(__alp)
     95        1.7     ragge 		: "r0","r1","cc","memory");
     96        1.9      matt #else
     97  1.25.20.2      matt 	__asm __volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:"
     98        1.7     ragge 		: "=&r"(ret)
     99       1.24  christos 		: "m"(*__alp)
    100        1.9      matt 		: "cc");
    101        1.7     ragge #endif
    102        1.7     ragge 
    103        1.7     ragge 	return ret;
    104        1.7     ragge }
    105        1.7     ragge 
    106       1.24  christos static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
    107       1.24  christos static __inline void
    108       1.24  christos __cpu_simple_lock(__cpu_simple_lock_t *__alp)
    109        1.9      matt {
    110  1.25.20.2      matt #if defined(_KERNEL) && defined(MULTIPROCESSOR)
    111  1.25.20.2      matt 	struct cpu_info * const __ci = curcpu();
    112        1.7     ragge 
    113       1.24  christos 	while (__cpu_simple_lock_try(__alp) == 0) {
    114  1.25.20.2      matt #define	VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB))
    115  1.25.20.2      matt 		if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) {
    116        1.7     ragge 			cpu_handle_ipi();
    117        1.7     ragge 		}
    118        1.7     ragge 	}
    119  1.25.20.2      matt #else /* _KERNEL && MULTIPROCESSOR */
    120  1.25.20.2      matt 	__asm __volatile ("1:bbssi $0,%0,1b"
    121  1.25.20.2      matt 		: /* No outputs */
    122  1.25.20.2      matt 		: "m"(*__alp)
    123  1.25.20.2      matt 		: "cc");
    124  1.25.20.2      matt #endif /* _KERNEL && MULTIPROCESSOR */
    125        1.1     ragge }
    126        1.1     ragge 
    127       1.24  christos static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *);
    128       1.24  christos static __inline void
    129       1.24  christos __cpu_simple_unlock(__cpu_simple_lock_t *__alp)
    130        1.1     ragge {
    131        1.9      matt #ifdef _KERNEL
    132  1.25.20.2      matt 	__asm __volatile ("movl %0,%%r1;jsb Sunlock"
    133        1.6     ragge 		: /* No output */
    134       1.24  christos 		: "g"(__alp)
    135        1.6     ragge 		: "r1","cc","memory");
    136        1.9      matt #else
    137  1.25.20.2      matt 	__asm __volatile ("bbcci $0,%0,1f;1:"
    138        1.6     ragge 		: /* No output */
    139       1.24  christos 		: "m"(*__alp)
    140        1.9      matt 		: "cc");
    141        1.6     ragge #endif
    142        1.1     ragge }
    143        1.1     ragge 
    144        1.6     ragge #if defined(MULTIPROCESSOR)
    145        1.6     ragge /*
    146        1.6     ragge  * On the Vax, interprocessor interrupts can come in at device priority
    147        1.6     ragge  * level or lower. This can cause some problems while waiting for r/w
    148        1.6     ragge  * spinlocks from a high'ish priority level: IPIs that come in will not
    149        1.6     ragge  * be processed. This can lead to deadlock.
    150        1.6     ragge  *
    151        1.6     ragge  * This hook allows IPIs to be processed while a spinlock's interlock
    152        1.6     ragge  * is released.
    153        1.6     ragge  */
    154        1.6     ragge #define SPINLOCK_SPIN_HOOK						\
    155        1.6     ragge do {									\
    156  1.25.20.2      matt 	struct cpu_info * const __ci = curcpu();			\
    157        1.6     ragge 									\
    158        1.6     ragge 	if (__ci->ci_ipimsgs != 0) {					\
    159        1.6     ragge 		/* printf("CPU %lu has IPIs pending\n",			\
    160        1.6     ragge 		    __ci->ci_cpuid); */					\
    161        1.6     ragge 		cpu_handle_ipi();					\
    162        1.6     ragge 	}								\
    163       1.24  christos } while (/*CONSTCOND*/0)
    164        1.6     ragge #endif /* MULTIPROCESSOR */
    165       1.22      matt 
    166       1.24  christos static __inline void mb_read(void);
    167       1.24  christos static __inline void
    168       1.22      matt mb_read(void)
    169       1.22      matt {
    170       1.22      matt }
    171       1.22      matt 
    172       1.24  christos static __inline void mb_write(void);
    173       1.24  christos static __inline void
    174       1.22      matt mb_write(void)
    175       1.22      matt {
    176       1.22      matt }
    177        1.1     ragge #endif /* _VAX_LOCK_H_ */
    178