lock.h revision 1.23
11.23Sad/* $NetBSD: lock.h,v 1.23 2007/02/09 21:55:01 ad Exp $ */ 21.1Sthorpej 31.1Sthorpej/*- 41.9Sthorpej * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc. 51.1Sthorpej * All rights reserved. 61.1Sthorpej * 71.1Sthorpej * This code is derived from software contributed to The NetBSD Foundation 81.1Sthorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 91.1Sthorpej * NASA Ames Research Center. 101.1Sthorpej * 111.1Sthorpej * Redistribution and use in source and binary forms, with or without 121.1Sthorpej * modification, are permitted provided that the following conditions 131.1Sthorpej * are met: 141.1Sthorpej * 1. Redistributions of source code must retain the above copyright 151.1Sthorpej * notice, this list of conditions and the following disclaimer. 161.1Sthorpej * 2. Redistributions in binary form must reproduce the above copyright 171.1Sthorpej * notice, this list of conditions and the following disclaimer in the 181.1Sthorpej * documentation and/or other materials provided with the distribution. 191.1Sthorpej * 3. All advertising materials mentioning features or use of this software 201.1Sthorpej * must display the following acknowledgement: 211.1Sthorpej * This product includes software developed by the NetBSD 221.1Sthorpej * Foundation, Inc. and its contributors. 231.1Sthorpej * 4. Neither the name of The NetBSD Foundation nor the names of its 241.1Sthorpej * contributors may be used to endorse or promote products derived 251.1Sthorpej * from this software without specific prior written permission. 261.1Sthorpej * 271.1Sthorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 281.1Sthorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 291.1Sthorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 301.1Sthorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 311.1Sthorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 321.1Sthorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 331.1Sthorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 341.1Sthorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 351.1Sthorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 361.1Sthorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 371.1Sthorpej * POSSIBILITY OF SUCH DAMAGE. 381.1Sthorpej */ 391.1Sthorpej 401.1Sthorpej/* 411.1Sthorpej * Machine-dependent spin lock operations. 421.1Sthorpej */ 431.1Sthorpej 441.4Sthorpej#ifndef _ALPHA_LOCK_H_ 451.4Sthorpej#define _ALPHA_LOCK_H_ 461.17Smartin 471.18She#ifdef _KERNEL_OPT 481.17Smartin#include "opt_multiprocessor.h" 491.18She#endif 501.4Sthorpej 511.22Sperrystatic __inline void 521.6Sthorpej__cpu_simple_lock_init(__cpu_simple_lock_t *alp) 531.4Sthorpej{ 541.4Sthorpej 551.21Sperry __asm volatile( 561.5Sthorpej "# BEGIN __cpu_simple_lock_init\n" 571.4Sthorpej " stl $31, %0 \n" 581.4Sthorpej " mb \n" 591.5Sthorpej " # END __cpu_simple_lock_init" 601.5Sthorpej : "=m" (*alp)); 611.4Sthorpej} 621.4Sthorpej 631.22Sperrystatic __inline void 641.6Sthorpej__cpu_simple_lock(__cpu_simple_lock_t *alp) 651.4Sthorpej{ 661.4Sthorpej unsigned long t0; 671.4Sthorpej 681.4Sthorpej /* 691.4Sthorpej * Note, if we detect that the lock is held when 701.4Sthorpej * we do the initial load-locked, we spin using 711.4Sthorpej * a non-locked load to save the coherency logic 721.4Sthorpej * some work. 731.4Sthorpej */ 741.4Sthorpej 751.21Sperry __asm volatile( 761.5Sthorpej "# BEGIN __cpu_simple_lock\n" 771.4Sthorpej "1: ldl_l %0, %3 \n" 781.4Sthorpej " bne %0, 2f \n" 791.4Sthorpej " bis $31, %2, %0 \n" 801.4Sthorpej " stl_c %0, %1 \n" 811.4Sthorpej " beq %0, 3f \n" 821.4Sthorpej " mb \n" 831.4Sthorpej " br 4f \n" 841.4Sthorpej "2: ldl %0, %3 \n" 851.4Sthorpej " beq %0, 1b \n" 861.4Sthorpej " br 2b \n" 871.4Sthorpej "3: br 1b \n" 881.4Sthorpej "4: \n" 891.5Sthorpej " # END __cpu_simple_lock\n" 901.16Sthorpej : "=&r" (t0), "=m" (*alp) 911.16Sthorpej : "i" (__SIMPLELOCK_LOCKED), "m" (*alp) 921.16Sthorpej : "memory"); 931.4Sthorpej} 941.4Sthorpej 951.22Sperrystatic __inline int 961.6Sthorpej__cpu_simple_lock_try(__cpu_simple_lock_t *alp) 971.4Sthorpej{ 981.4Sthorpej unsigned long t0, v0; 991.4Sthorpej 1001.21Sperry __asm volatile( 1011.5Sthorpej "# BEGIN __cpu_simple_lock_try\n" 1021.4Sthorpej "1: ldl_l %0, %4 \n" 1031.4Sthorpej " bne %0, 2f \n" 1041.4Sthorpej " bis $31, %3, %0 \n" 1051.4Sthorpej " stl_c %0, %2 \n" 1061.4Sthorpej " beq %0, 3f \n" 1071.4Sthorpej " mb \n" 1081.4Sthorpej " bis $31, 1, %1 \n" 1091.4Sthorpej " br 4f \n" 1101.4Sthorpej "2: bis $31, $31, %1 \n" 1111.4Sthorpej " br 4f \n" 1121.4Sthorpej "3: br 1b \n" 1131.4Sthorpej "4: \n" 1141.5Sthorpej " # END __cpu_simple_lock_try" 1151.16Sthorpej : "=&r" (t0), "=r" (v0), "=m" (*alp) 1161.16Sthorpej : "i" (__SIMPLELOCK_LOCKED), "m" (*alp) 1171.16Sthorpej : "memory"); 1181.4Sthorpej 1191.7Ssimonb return (v0 != 0); 1201.4Sthorpej} 1211.4Sthorpej 1221.22Sperrystatic __inline void 1231.6Sthorpej__cpu_simple_unlock(__cpu_simple_lock_t *alp) 1241.4Sthorpej{ 1251.4Sthorpej 1261.21Sperry __asm volatile( 1271.5Sthorpej "# BEGIN __cpu_simple_unlock\n" 1281.9Sthorpej " mb \n" 1291.4Sthorpej " stl $31, %0 \n" 1301.5Sthorpej " # END __cpu_simple_unlock" 1311.5Sthorpej : "=m" (*alp)); 1321.4Sthorpej} 1331.11Sthorpej 1341.12Sthorpej#if defined(MULTIPROCESSOR) 1351.11Sthorpej/* 1361.11Sthorpej * On the Alpha, interprocessor interrupts come in at device priority 1371.11Sthorpej * level. This can cause some problems while waiting for r/w spinlocks 1381.11Sthorpej * from a high'ish priority level: IPIs that come in will not be processed. 1391.11Sthorpej * This can lead to deadlock. 1401.11Sthorpej * 1411.15Sthorpej * This hook allows IPIs to be processed while a spinlock's interlock 1421.11Sthorpej * is released. 1431.11Sthorpej */ 1441.15Sthorpej#define SPINLOCK_SPIN_HOOK \ 1451.11Sthorpejdo { \ 1461.11Sthorpej struct cpu_info *__ci = curcpu(); \ 1471.15Sthorpej int __s; \ 1481.11Sthorpej \ 1491.13Sthorpej if (__ci->ci_ipis != 0) { \ 1501.13Sthorpej /* printf("CPU %lu has IPIs pending\n", \ 1511.13Sthorpej __ci->ci_cpuid); */ \ 1521.15Sthorpej __s = splipi(); \ 1531.13Sthorpej alpha_ipi_process(__ci, NULL); \ 1541.15Sthorpej splx(__s); \ 1551.13Sthorpej } \ 1561.11Sthorpej} while (0) 1571.23Sad#define SPINLOCK_BACKOFF_HOOK (void)nullop((void *)0) 1581.12Sthorpej#endif /* MULTIPROCESSOR */ 1591.4Sthorpej 1601.23Sadstatic inline void 1611.23Sadmb_read(void) 1621.23Sad{ 1631.23Sad __asm __volatile("mb" : : : "memory"); 1641.23Sad} 1651.23Sad 1661.23Sadstatic inline void 1671.23Sadmb_write(void) 1681.23Sad{ 1691.23Sad /* XXX wmb */ 1701.23Sad __asm __volatile("mb" : : : "memory"); 1711.23Sad} 1721.23Sad 1731.23Sadstatic inline void 1741.23Sadmb_memory(void) 1751.23Sad{ 1761.23Sad __asm __volatile("mb" : : : "memory"); 1771.23Sad} 1781.23Sad 1791.4Sthorpej#endif /* _ALPHA_LOCK_H_ */ 180