Home | History | Annotate | Line # | Download | only in include
      1  1.34  riastrad /*	$NetBSD: lock.h,v 1.34 2022/02/13 13:42:21 riastradh Exp $	*/
      2   1.1     ragge 
      3   1.1     ragge /*
      4   1.1     ragge  * Copyright (c) 2000 Ludd, University of Lule}, Sweden.
      5   1.1     ragge  * All rights reserved.
      6   1.1     ragge  *
      7   1.1     ragge  * Redistribution and use in source and binary forms, with or without
      8   1.1     ragge  * modification, are permitted provided that the following conditions
      9   1.1     ragge  * are met:
     10   1.1     ragge  * 1. Redistributions of source code must retain the above copyright
     11   1.1     ragge  *    notice, this list of conditions and the following disclaimer.
     12   1.1     ragge  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.1     ragge  *    notice, this list of conditions and the following disclaimer in the
     14   1.1     ragge  *    documentation and/or other materials provided with the distribution.
     15   1.1     ragge  *
     16   1.1     ragge  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17   1.1     ragge  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18   1.1     ragge  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19   1.1     ragge  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20   1.1     ragge  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21   1.1     ragge  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22   1.1     ragge  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23   1.1     ragge  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24   1.1     ragge  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25   1.1     ragge  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26   1.1     ragge  */
     27   1.1     ragge 
     28   1.1     ragge #ifndef _VAX_LOCK_H_
     29   1.1     ragge #define _VAX_LOCK_H_
     30  1.11      matt 
     31  1.29     pooka #include <sys/param.h>
     32  1.29     pooka 
     33  1.11      matt #ifdef _KERNEL
     34  1.13        he #ifdef _KERNEL_OPT
     35  1.12    martin #include "opt_multiprocessor.h"
     36  1.16        he #include <machine/intr.h>
     37  1.13        he #endif
     38  1.11      matt #include <machine/cpu.h>
     39  1.11      matt #endif
     40   1.3     ragge 
     41  1.26     skrll static __inline int
     42  1.31  christos __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr)
     43  1.26     skrll {
     44  1.26     skrll 	return *__ptr == __SIMPLELOCK_LOCKED;
     45  1.26     skrll }
     46  1.26     skrll 
     47  1.26     skrll static __inline int
     48  1.31  christos __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr)
     49  1.26     skrll {
     50  1.26     skrll 	return *__ptr == __SIMPLELOCK_UNLOCKED;
     51  1.26     skrll }
     52  1.26     skrll 
     53  1.26     skrll static __inline void
     54  1.26     skrll __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
     55  1.26     skrll {
     56  1.26     skrll 	*__ptr = __SIMPLELOCK_UNLOCKED;
     57  1.26     skrll }
     58  1.26     skrll 
     59  1.26     skrll static __inline void
     60  1.26     skrll __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
     61  1.26     skrll {
     62  1.26     skrll 	*__ptr = __SIMPLELOCK_LOCKED;
     63  1.26     skrll }
     64  1.26     skrll 
     65  1.24  christos static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *);
     66  1.24  christos static __inline void
     67  1.24  christos __cpu_simple_lock_init(__cpu_simple_lock_t *__alp)
     68   1.1     ragge {
     69  1.33  riastrad 	*__alp = __SIMPLELOCK_UNLOCKED;
     70   1.1     ragge }
     71   1.1     ragge 
     72  1.24  christos static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *);
     73  1.24  christos static __inline int
     74  1.24  christos __cpu_simple_lock_try(__cpu_simple_lock_t *__alp)
     75   1.7     ragge {
     76   1.7     ragge 	int ret;
     77   1.7     ragge 
     78  1.29     pooka #ifdef _HARDKERNEL
     79  1.28      matt 	__asm __volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0"
     80   1.7     ragge 		: "=&r"(ret)
     81  1.24  christos 		: "g"(__alp)
     82   1.7     ragge 		: "r0","r1","cc","memory");
     83   1.9      matt #else
     84  1.28      matt 	__asm __volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:"
     85   1.7     ragge 		: "=&r"(ret)
     86  1.24  christos 		: "m"(*__alp)
     87  1.34  riastrad 		: "cc", "memory");
     88   1.7     ragge #endif
     89   1.7     ragge 
     90   1.7     ragge 	return ret;
     91   1.7     ragge }
     92   1.7     ragge 
     93  1.24  christos static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
     94  1.24  christos static __inline void
     95  1.24  christos __cpu_simple_lock(__cpu_simple_lock_t *__alp)
     96   1.9      matt {
     97  1.29     pooka #if defined(_HARDKERNEL) && defined(MULTIPROCESSOR)
     98  1.28      matt 	struct cpu_info * const __ci = curcpu();
     99   1.7     ragge 
    100  1.24  christos 	while (__cpu_simple_lock_try(__alp) == 0) {
    101  1.28      matt #define	VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB))
    102  1.28      matt 		if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) {
    103   1.7     ragge 			cpu_handle_ipi();
    104   1.7     ragge 		}
    105   1.7     ragge 	}
    106  1.29     pooka #else /* _HARDKERNEL && MULTIPROCESSOR */
    107  1.28      matt 	__asm __volatile ("1:bbssi $0,%0,1b"
    108  1.28      matt 		: /* No outputs */
    109  1.28      matt 		: "m"(*__alp)
    110  1.34  riastrad 		: "cc", "memory");
    111  1.29     pooka #endif /* _HARDKERNEL && MULTIPROCESSOR */
    112   1.1     ragge }
    113   1.1     ragge 
    114  1.24  christos static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *);
    115  1.24  christos static __inline void
    116  1.24  christos __cpu_simple_unlock(__cpu_simple_lock_t *__alp)
    117   1.1     ragge {
    118  1.29     pooka #ifdef _HARDKERNEL
    119  1.28      matt 	__asm __volatile ("movl %0,%%r1;jsb Sunlock"
    120   1.6     ragge 		: /* No output */
    121  1.24  christos 		: "g"(__alp)
    122   1.6     ragge 		: "r1","cc","memory");
    123   1.9      matt #else
    124  1.28      matt 	__asm __volatile ("bbcci $0,%0,1f;1:"
    125   1.6     ragge 		: /* No output */
    126  1.24  christos 		: "m"(*__alp)
    127  1.34  riastrad 		: "cc", "memory");
    128   1.6     ragge #endif
    129   1.1     ragge }
    130   1.1     ragge 
    131   1.6     ragge #if defined(MULTIPROCESSOR)
    132   1.6     ragge /*
    133   1.6     ragge  * On the Vax, interprocessor interrupts can come in at device priority
    134   1.6     ragge  * level or lower. This can cause some problems while waiting for r/w
    135   1.6     ragge  * spinlocks from a high'ish priority level: IPIs that come in will not
    136   1.6     ragge  * be processed. This can lead to deadlock.
    137   1.6     ragge  *
    138   1.6     ragge  * This hook allows IPIs to be processed while a spinlock's interlock
    139   1.6     ragge  * is released.
    140   1.6     ragge  */
    141   1.6     ragge #define SPINLOCK_SPIN_HOOK						\
    142   1.6     ragge do {									\
    143  1.28      matt 	struct cpu_info * const __ci = curcpu();			\
    144   1.6     ragge 									\
    145   1.6     ragge 	if (__ci->ci_ipimsgs != 0) {					\
    146   1.6     ragge 		/* printf("CPU %lu has IPIs pending\n",			\
    147   1.6     ragge 		    __ci->ci_cpuid); */					\
    148   1.6     ragge 		cpu_handle_ipi();					\
    149   1.6     ragge 	}								\
    150  1.24  christos } while (/*CONSTCOND*/0)
    151   1.6     ragge #endif /* MULTIPROCESSOR */
    152  1.22      matt 
    153   1.1     ragge #endif /* _VAX_LOCK_H_ */
    154