1 1.36 riastrad /* $NetBSD: pthread_lock.c,v 1.36 2022/04/10 10:38:33 riastradh Exp $ */ 2 1.2 thorpej 3 1.2 thorpej /*- 4 1.19 ad * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc. 5 1.2 thorpej * All rights reserved. 6 1.2 thorpej * 7 1.2 thorpej * This code is derived from software contributed to The NetBSD Foundation 8 1.19 ad * by Nathan J. Williams and Andrew Doran. 9 1.2 thorpej * 10 1.2 thorpej * Redistribution and use in source and binary forms, with or without 11 1.2 thorpej * modification, are permitted provided that the following conditions 12 1.2 thorpej * are met: 13 1.2 thorpej * 1. Redistributions of source code must retain the above copyright 14 1.2 thorpej * notice, this list of conditions and the following disclaimer. 15 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright 16 1.2 thorpej * notice, this list of conditions and the following disclaimer in the 17 1.2 thorpej * documentation and/or other materials provided with the distribution. 18 1.2 thorpej * 19 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.2 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE. 30 1.2 thorpej */ 31 1.6 lukem 32 1.23 ad /* 33 1.23 ad * libpthread internal spinlock routines. 34 1.23 ad */ 35 1.23 ad 36 1.6 lukem #include <sys/cdefs.h> 37 1.36 riastrad __RCSID("$NetBSD: pthread_lock.c,v 1.36 2022/04/10 10:38:33 riastradh Exp $"); 38 1.35 riastrad 39 1.35 riastrad /* Need to use libc-private names for atomic operations. */ 40 1.35 riastrad #include "../../common/lib/libc/atomic/atomic_op_namespace.h" 41 1.2 thorpej 42 1.12 he #include <sys/types.h> 43 1.2 thorpej #include <sys/ras.h> 44 1.2 thorpej 45 1.33 ad #include <machine/lock.h> 46 1.33 ad 47 1.2 thorpej #include <errno.h> 48 1.2 thorpej #include <unistd.h> 49 1.19 ad #include <stdio.h> 50 1.22 ad #include <stdlib.h> 51 1.2 thorpej 52 1.2 thorpej #include "pthread.h" 53 1.2 thorpej #include "pthread_int.h" 54 1.2 thorpej 55 1.22 ad /* How many times to try acquiring spin locks on MP systems. */ 56 1.32 ad #define PTHREAD__NSPINS 64 57 1.23 ad 58 1.10 thorpej RAS_DECL(pthread__lock); 59 1.2 thorpej 60 1.32 ad static void pthread__spinlock_slow(pthread_spin_t *); 61 1.27 skrll 62 1.29 skrll #ifdef PTHREAD__ASM_RASOPS 63 1.29 skrll 64 1.29 skrll void pthread__ras_simple_lock_init(__cpu_simple_lock_t *); 65 1.29 skrll int pthread__ras_simple_lock_try(__cpu_simple_lock_t *); 66 1.29 skrll void pthread__ras_simple_unlock(__cpu_simple_lock_t *); 67 1.29 skrll 68 1.29 skrll #else 69 1.29 skrll 70 1.29 skrll static void 71 1.29 skrll pthread__ras_simple_lock_init(__cpu_simple_lock_t *alp) 72 1.2 thorpej { 73 1.2 thorpej 74 1.27 skrll __cpu_simple_lock_clear(alp); 75 1.2 thorpej } 76 1.2 thorpej 77 1.29 skrll static int 78 1.29 skrll pthread__ras_simple_lock_try(__cpu_simple_lock_t *alp) 79 1.2 thorpej { 80 1.28 skrll int locked; 81 1.2 thorpej 82 1.10 thorpej RAS_START(pthread__lock); 83 1.28 skrll locked = __SIMPLELOCK_LOCKED_P(alp); 84 1.27 skrll __cpu_simple_lock_set(alp); 85 1.10 thorpej RAS_END(pthread__lock); 86 1.2 thorpej 87 1.28 skrll return !locked; 88 1.2 thorpej } 89 1.2 thorpej 90 1.29 skrll static void 91 1.29 skrll pthread__ras_simple_unlock(__cpu_simple_lock_t *alp) 92 1.29 skrll { 93 1.29 skrll 94 1.29 skrll __cpu_simple_lock_clear(alp); 95 1.29 skrll } 96 1.29 skrll 97 1.29 skrll #endif /* PTHREAD__ASM_RASOPS */ 98 1.29 skrll 99 1.29 skrll static const struct pthread_lock_ops pthread__lock_ops_ras = { 100 1.29 skrll pthread__ras_simple_lock_init, 101 1.29 skrll pthread__ras_simple_lock_try, 102 1.29 skrll pthread__ras_simple_unlock, 103 1.32 ad pthread__spinlock_slow, 104 1.29 skrll }; 105 1.29 skrll 106 1.29 skrll static void 107 1.29 skrll pthread__atomic_simple_lock_init(__cpu_simple_lock_t *alp) 108 1.29 skrll { 109 1.29 skrll 110 1.29 skrll __cpu_simple_lock_init(alp); 111 1.29 skrll } 112 1.29 skrll 113 1.29 skrll static int 114 1.29 skrll pthread__atomic_simple_lock_try(__cpu_simple_lock_t *alp) 115 1.29 skrll { 116 1.29 skrll 117 1.29 skrll return (__cpu_simple_lock_try(alp)); 118 1.29 skrll } 119 1.29 skrll 120 1.29 skrll static void 121 1.29 skrll pthread__atomic_simple_unlock(__cpu_simple_lock_t *alp) 122 1.2 thorpej { 123 1.2 thorpej 124 1.23 ad __cpu_simple_unlock(alp); 125 1.29 skrll } 126 1.29 skrll 127 1.29 skrll static const struct pthread_lock_ops pthread__lock_ops_atomic = { 128 1.29 skrll pthread__atomic_simple_lock_init, 129 1.29 skrll pthread__atomic_simple_lock_try, 130 1.29 skrll pthread__atomic_simple_unlock, 131 1.32 ad pthread__spinlock_slow, 132 1.29 skrll }; 133 1.27 skrll 134 1.29 skrll /* 135 1.29 skrll * We default to pointing to the RAS primitives; we might need to use 136 1.29 skrll * locks early, but before main() starts. This is safe, since no other 137 1.29 skrll * threads will be active for the process, so atomicity will not be 138 1.29 skrll * required. 139 1.29 skrll */ 140 1.29 skrll const struct pthread_lock_ops *pthread__lock_ops = &pthread__lock_ops_ras; 141 1.2 thorpej 142 1.23 ad /* 143 1.23 ad * Prevent this routine from being inlined. The common case is no 144 1.23 ad * contention and it's better to not burden the instruction decoder. 145 1.23 ad */ 146 1.36 riastrad static void 147 1.32 ad pthread__spinlock_slow(pthread_spin_t *lock) 148 1.2 thorpej { 149 1.32 ad pthread_t self; 150 1.21 ad int count; 151 1.19 ad 152 1.32 ad self = pthread__self(); 153 1.32 ad 154 1.2 thorpej do { 155 1.21 ad count = pthread__nspins; 156 1.32 ad while (__SIMPLELOCK_LOCKED_P(lock) && --count > 0) 157 1.21 ad pthread__smt_pause(); 158 1.21 ad if (count > 0) { 159 1.32 ad if ((*self->pt_lockops.plo_try)(lock)) 160 1.21 ad break; 161 1.21 ad continue; 162 1.17 ad } 163 1.26 ad sched_yield(); 164 1.19 ad } while (/*CONSTCOND*/ 1); 165 1.2 thorpej } 166 1.2 thorpej 167 1.23 ad /* 168 1.23 ad * Initialize the locking primitives. On uniprocessors, we always 169 1.23 ad * use Restartable Atomic Sequences if they are available. Otherwise, 170 1.23 ad * we fall back onto machine-dependent atomic lock primitives. 171 1.2 thorpej */ 172 1.23 ad void 173 1.23 ad pthread__lockprim_init(void) 174 1.2 thorpej { 175 1.23 ad char *p; 176 1.2 thorpej 177 1.32 ad if ((p = pthread__getenv("PTHREAD_NSPINS")) != NULL) 178 1.23 ad pthread__nspins = atoi(p); 179 1.23 ad else if (pthread__concurrency != 1) 180 1.23 ad pthread__nspins = PTHREAD__NSPINS; 181 1.23 ad else 182 1.23 ad pthread__nspins = 1; 183 1.19 ad 184 1.23 ad if (pthread__concurrency != 1) { 185 1.29 skrll pthread__lock_ops = &pthread__lock_ops_atomic; 186 1.23 ad return; 187 1.23 ad } 188 1.2 thorpej 189 1.23 ad if (rasctl(RAS_ADDR(pthread__lock), RAS_SIZE(pthread__lock), 190 1.23 ad RAS_INSTALL) != 0) { 191 1.29 skrll pthread__lock_ops = &pthread__lock_ops_atomic; 192 1.29 skrll return; 193 1.17 ad } 194 1.2 thorpej } 195 1.2 thorpej 196 1.23 ad void 197 1.23 ad pthread_lockinit(pthread_spin_t *lock) 198 1.2 thorpej { 199 1.2 thorpej 200 1.23 ad pthread__simple_lock_init(lock); 201 1.2 thorpej } 202