Home | History | Annotate | Line # | Download | only in include
lock.h revision 1.13.2.4
      1  1.13.2.4    skrll /*	$NetBSD: lock.h,v 1.13.2.4 2005/11/10 13:59:59 skrll Exp $	*/
      2       1.1    ragge 
      3       1.1    ragge /*
      4       1.1    ragge  * Copyright (c) 2000 Ludd, University of Lule}, Sweden.
      5       1.1    ragge  * All rights reserved.
      6       1.1    ragge  *
      7       1.1    ragge  * Redistribution and use in source and binary forms, with or without
      8       1.1    ragge  * modification, are permitted provided that the following conditions
      9       1.1    ragge  * are met:
     10       1.1    ragge  * 1. Redistributions of source code must retain the above copyright
     11       1.1    ragge  *    notice, this list of conditions and the following disclaimer.
     12       1.1    ragge  * 2. Redistributions in binary form must reproduce the above copyright
     13       1.1    ragge  *    notice, this list of conditions and the following disclaimer in the
     14       1.1    ragge  *    documentation and/or other materials provided with the distribution.
     15       1.1    ragge  * 3. All advertising materials mentioning features or use of this software
     16       1.1    ragge  *    must display the following acknowledgement:
     17       1.1    ragge  *     This product includes software developed at Ludd, University of Lule}.
     18       1.1    ragge  * 4. The name of the author may not be used to endorse or promote products
     19       1.1    ragge  *    derived from this software without specific prior written permission
     20       1.1    ragge  *
     21       1.1    ragge  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     22       1.1    ragge  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     23       1.1    ragge  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     24       1.1    ragge  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     25       1.1    ragge  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     26       1.1    ragge  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     27       1.1    ragge  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     28       1.1    ragge  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     29       1.1    ragge  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     30       1.1    ragge  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     31       1.1    ragge  */
     32       1.1    ragge 
     33       1.1    ragge #ifndef _VAX_LOCK_H_
     34       1.1    ragge #define _VAX_LOCK_H_
     35      1.11     matt 
     36      1.11     matt #ifdef _KERNEL
     37      1.13       he #ifdef _KERNEL_OPT
     38      1.12   martin #include "opt_multiprocessor.h"
     39  1.13.2.1    skrll #include <machine/intr.h>
     40      1.13       he #endif
     41      1.11     matt #include <machine/cpu.h>
     42      1.11     matt #endif
     43       1.3    ragge 
     44       1.2  thorpej static __inline void
     45       1.4  thorpej __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
     46       1.1    ragge {
     47       1.9     matt #ifdef _KERNEL
     48      1.10  thorpej 	__asm__ __volatile ("movl %0,%%r1;jsb Sunlock"
     49       1.6    ragge 		: /* No output */
     50       1.6    ragge 		: "g"(alp)
     51       1.6    ragge 		: "r1","cc","memory");
     52       1.9     matt #else
     53       1.9     matt 	__asm__ __volatile ("bbcci $0,%0,1f;1:"
     54       1.6    ragge 		: /* No output */
     55       1.9     matt 		: "m"(*alp)
     56       1.9     matt 		: "cc");
     57       1.6    ragge #endif
     58       1.1    ragge }
     59       1.1    ragge 
     60       1.7    ragge static __inline int
     61       1.7    ragge __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
     62       1.7    ragge {
     63       1.7    ragge 	int ret;
     64       1.7    ragge 
     65       1.9     matt #ifdef _KERNEL
     66      1.10  thorpej 	__asm__ __volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0"
     67       1.7    ragge 		: "=&r"(ret)
     68       1.7    ragge 		: "g"(alp)
     69       1.7    ragge 		: "r0","r1","cc","memory");
     70       1.9     matt #else
     71       1.9     matt 	__asm__ __volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:"
     72       1.7    ragge 		: "=&r"(ret)
     73       1.9     matt 		: "m"(*alp)
     74       1.9     matt 		: "cc");
     75       1.7    ragge #endif
     76       1.7    ragge 
     77       1.7    ragge 	return ret;
     78       1.7    ragge }
     79       1.7    ragge 
     80       1.9     matt #ifdef _KERNEL
     81       1.8    ragge #define	VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB))
     82       1.7    ragge #define	__cpu_simple_lock(alp)						\
     83       1.9     matt do {									\
     84       1.7    ragge 	struct cpu_info *__ci = curcpu();				\
     85       1.7    ragge 									\
     86       1.7    ragge 	while (__cpu_simple_lock_try(alp) == 0) {			\
     87  1.13.2.4    skrll 		int ___s;						\
     88       1.7    ragge 									\
     89       1.8    ragge 		if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) {		\
     90  1.13.2.4    skrll 			___s = splipi();				\
     91       1.7    ragge 			cpu_handle_ipi();				\
     92  1.13.2.4    skrll 			splx(___s);					\
     93       1.7    ragge 		}							\
     94       1.7    ragge 	}								\
     95       1.9     matt } while (0)
     96       1.9     matt #else
     97       1.9     matt static __inline void
     98       1.9     matt __cpu_simple_lock(__cpu_simple_lock_t *alp)
     99       1.9     matt {
    100       1.9     matt 	__asm__ __volatile ("1:bbssi $0,%0,1b"
    101       1.9     matt 		: /* No outputs */
    102       1.9     matt 		: "m"(*alp)
    103       1.9     matt 		: "cc");
    104       1.7    ragge }
    105       1.9     matt #endif /* _KERNEL */
    106       1.7    ragge 
    107       1.7    ragge #if 0
    108       1.2  thorpej static __inline void
    109       1.4  thorpej __cpu_simple_lock(__cpu_simple_lock_t *alp)
    110       1.1    ragge {
    111       1.7    ragge 	struct cpu_info *ci = curcpu();
    112       1.7    ragge 
    113       1.7    ragge 	while (__cpu_simple_lock_try(alp) == 0) {
    114       1.7    ragge 		int s;
    115       1.7    ragge 
    116       1.7    ragge 		if (ci->ci_ipimsgs & IPI_SEND_CNCHAR) {
    117       1.7    ragge 			s = splipi();
    118       1.7    ragge 			cpu_handle_ipi();
    119       1.7    ragge 			splx(s);
    120       1.7    ragge 		}
    121       1.7    ragge 	}
    122       1.7    ragge 
    123       1.7    ragge #if 0
    124      1.10  thorpej 	__asm__ __volatile ("movl %0,%%r1;jsb Slock"
    125       1.6    ragge 		: /* No output */
    126       1.6    ragge 		: "g"(alp)
    127       1.6    ragge 		: "r0","r1","cc","memory");
    128       1.7    ragge #endif
    129       1.6    ragge #if 0
    130       1.5     matt 	__asm__ __volatile ("1:;bbssi $0, %0, 1b"
    131       1.1    ragge 		: /* No output */
    132       1.5     matt 		: "m"(*alp));
    133       1.6    ragge #endif
    134       1.1    ragge }
    135       1.7    ragge #endif
    136       1.1    ragge 
    137       1.2  thorpej static __inline void
    138       1.4  thorpej __cpu_simple_unlock(__cpu_simple_lock_t *alp)
    139       1.1    ragge {
    140       1.9     matt #ifdef _KERNEL
    141      1.10  thorpej 	__asm__ __volatile ("movl %0,%%r1;jsb Sunlock"
    142       1.6    ragge 		: /* No output */
    143       1.6    ragge 		: "g"(alp)
    144       1.6    ragge 		: "r1","cc","memory");
    145       1.9     matt #else
    146       1.9     matt 	__asm__ __volatile ("bbcci $0,%0,1f;1:"
    147       1.6    ragge 		: /* No output */
    148       1.9     matt 		: "m"(*alp)
    149       1.9     matt 		: "cc");
    150       1.6    ragge #endif
    151       1.1    ragge }
    152       1.1    ragge 
    153       1.6    ragge #if defined(MULTIPROCESSOR)
    154       1.6    ragge /*
    155       1.6    ragge  * On the Vax, interprocessor interrupts can come in at device priority
    156       1.6    ragge  * level or lower. This can cause some problems while waiting for r/w
    157       1.6    ragge  * spinlocks from a high'ish priority level: IPIs that come in will not
    158       1.6    ragge  * be processed. This can lead to deadlock.
    159       1.6    ragge  *
    160       1.6    ragge  * This hook allows IPIs to be processed while a spinlock's interlock
    161       1.6    ragge  * is released.
    162       1.6    ragge  */
    163       1.6    ragge #define SPINLOCK_SPIN_HOOK						\
    164       1.6    ragge do {									\
    165       1.6    ragge 	struct cpu_info *__ci = curcpu();				\
    166  1.13.2.4    skrll 	int ___s;							\
    167       1.6    ragge 									\
    168       1.6    ragge 	if (__ci->ci_ipimsgs != 0) {					\
    169       1.6    ragge 		/* printf("CPU %lu has IPIs pending\n",			\
    170       1.6    ragge 		    __ci->ci_cpuid); */					\
    171  1.13.2.4    skrll 		___s = splipi();					\
    172       1.6    ragge 		cpu_handle_ipi();					\
    173  1.13.2.4    skrll 		splx(___s);						\
    174       1.6    ragge 	}								\
    175       1.6    ragge } while (0)
    176       1.6    ragge #endif /* MULTIPROCESSOR */
    177       1.1    ragge #endif /* _VAX_LOCK_H_ */
    178