subr_pserialize.c revision 1.9 1 /* $NetBSD: subr_pserialize.c,v 1.9 2017/11/21 08:49:14 ozaki-r Exp $ */
2
3 /*-
4 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * Passive serialization.
31 *
32 * Implementation accurately matches the lapsed US patent 4809168, therefore
33 * code is patent-free in the United States. Your use of this code is at
34 * your own risk.
35 *
36 * Note for NetBSD developers: all changes to this source file must be
37 * approved by the <core>.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_pserialize.c,v 1.9 2017/11/21 08:49:14 ozaki-r Exp $");
42
43 #include <sys/param.h>
44
45 #include <sys/condvar.h>
46 #include <sys/cpu.h>
47 #include <sys/evcnt.h>
48 #include <sys/kmem.h>
49 #include <sys/mutex.h>
50 #include <sys/pserialize.h>
51 #include <sys/proc.h>
52 #include <sys/queue.h>
53 #include <sys/xcall.h>
54
55 struct pserialize {
56 TAILQ_ENTRY(pserialize) psz_chain;
57 lwp_t * psz_owner;
58 kcpuset_t * psz_target;
59 kcpuset_t * psz_pass;
60 };
61
62 static u_int psz_work_todo __cacheline_aligned;
63 static kmutex_t psz_lock __cacheline_aligned;
64 static struct evcnt psz_ev_excl __cacheline_aligned;
65
66 /*
67 * As defined in "Method 1":
68 * q0: "0 MP checkpoints have occured".
69 * q1: "1 MP checkpoint has occured".
70 * q2: "2 MP checkpoints have occured".
71 */
72 static TAILQ_HEAD(, pserialize) psz_queue0 __cacheline_aligned;
73 static TAILQ_HEAD(, pserialize) psz_queue1 __cacheline_aligned;
74 static TAILQ_HEAD(, pserialize) psz_queue2 __cacheline_aligned;
75
76 #ifdef LOCKDEBUG
77 #include <sys/percpu.h>
78
79 static percpu_t *psz_debug_nreads __cacheline_aligned;
80 #endif
81
82 /*
83 * pserialize_init:
84 *
85 * Initialize passive serialization structures.
86 */
87 void
88 pserialize_init(void)
89 {
90
91 psz_work_todo = 0;
92 TAILQ_INIT(&psz_queue0);
93 TAILQ_INIT(&psz_queue1);
94 TAILQ_INIT(&psz_queue2);
95 mutex_init(&psz_lock, MUTEX_DEFAULT, IPL_SCHED);
96 evcnt_attach_dynamic(&psz_ev_excl, EVCNT_TYPE_MISC, NULL,
97 "pserialize", "exclusive access");
98 #ifdef LOCKDEBUG
99 psz_debug_nreads = percpu_alloc(sizeof(uint32_t));
100 #endif
101 }
102
103 /*
104 * pserialize_create:
105 *
106 * Create and initialize a passive serialization object.
107 */
108 pserialize_t
109 pserialize_create(void)
110 {
111 pserialize_t psz;
112
113 psz = kmem_zalloc(sizeof(struct pserialize), KM_SLEEP);
114 kcpuset_create(&psz->psz_target, true);
115 kcpuset_create(&psz->psz_pass, true);
116 psz->psz_owner = NULL;
117
118 return psz;
119 }
120
121 /*
122 * pserialize_destroy:
123 *
124 * Destroy a passive serialization object.
125 */
126 void
127 pserialize_destroy(pserialize_t psz)
128 {
129
130 KASSERT(psz->psz_owner == NULL);
131
132 kcpuset_destroy(psz->psz_target);
133 kcpuset_destroy(psz->psz_pass);
134 kmem_free(psz, sizeof(struct pserialize));
135 }
136
137 /*
138 * pserialize_perform:
139 *
140 * Perform the write side of passive serialization. The calling
141 * thread holds an exclusive lock on the data object(s) being updated.
142 * We wait until every processor in the system has made at least two
143 * passes through cpu_switchto(). The wait is made with the caller's
144 * update lock held, but is short term.
145 */
146 void
147 pserialize_perform(pserialize_t psz)
148 {
149 uint64_t xc;
150
151 KASSERT(!cpu_intr_p());
152 KASSERT(!cpu_softintr_p());
153
154 if (__predict_false(panicstr != NULL)) {
155 return;
156 }
157 KASSERT(psz->psz_owner == NULL);
158 KASSERT(ncpu > 0);
159
160 /*
161 * Set up the object and put it onto the queue. The lock
162 * activity here provides the necessary memory barrier to
163 * make the caller's data update completely visible to
164 * other processors.
165 */
166 psz->psz_owner = curlwp;
167 kcpuset_copy(psz->psz_target, kcpuset_running);
168 kcpuset_zero(psz->psz_pass);
169
170 mutex_spin_enter(&psz_lock);
171 TAILQ_INSERT_TAIL(&psz_queue0, psz, psz_chain);
172 psz_work_todo++;
173
174 do {
175 mutex_spin_exit(&psz_lock);
176
177 /*
178 * Force some context switch activity on every CPU, as
179 * the system may not be busy. Pause to not flood.
180 */
181 xc = xc_broadcast(XC_HIGHPRI, (xcfunc_t)nullop, NULL, NULL);
182 xc_wait(xc);
183 kpause("psrlz", false, 1, NULL);
184
185 mutex_spin_enter(&psz_lock);
186 } while (!kcpuset_iszero(psz->psz_target));
187
188 psz_ev_excl.ev_count++;
189 mutex_spin_exit(&psz_lock);
190
191 psz->psz_owner = NULL;
192 }
193
194 int
195 pserialize_read_enter(void)
196 {
197 int s;
198
199 KASSERT(!cpu_intr_p());
200 s = splsoftserial();
201 #ifdef LOCKDEBUG
202 {
203 uint32_t *nreads;
204 nreads = percpu_getref(psz_debug_nreads);
205 (*nreads)++;
206 if (*nreads == 0)
207 panic("nreads overflow");
208 percpu_putref(psz_debug_nreads);
209 }
210 #endif
211 return s;
212 }
213
214 void
215 pserialize_read_exit(int s)
216 {
217
218 #ifdef LOCKDEBUG
219 {
220 uint32_t *nreads;
221 nreads = percpu_getref(psz_debug_nreads);
222 (*nreads)--;
223 if (*nreads == UINT_MAX)
224 panic("nreads underflow");
225 percpu_putref(psz_debug_nreads);
226 }
227 #endif
228 splx(s);
229 }
230
231 /*
232 * pserialize_switchpoint:
233 *
234 * Monitor system context switch activity. Called from machine
235 * independent code after mi_switch() returns.
236 */
237 void
238 pserialize_switchpoint(void)
239 {
240 pserialize_t psz, next;
241 cpuid_t cid;
242
243 /* We must to ensure not to come here from inside a read section. */
244 KASSERT(pserialize_not_in_read_section());
245
246 /*
247 * If no updates pending, bail out. No need to lock in order to
248 * test psz_work_todo; the only ill effect of missing an update
249 * would be to delay LWPs waiting in pserialize_perform(). That
250 * will not happen because updates are on the queue before an
251 * xcall is generated (serialization) to tickle every CPU.
252 */
253 if (__predict_true(psz_work_todo == 0)) {
254 return;
255 }
256 mutex_spin_enter(&psz_lock);
257 cid = cpu_index(curcpu());
258
259 /*
260 * At first, scan through the second queue and update each request,
261 * if passed all processors, then transfer to the third queue.
262 */
263 for (psz = TAILQ_FIRST(&psz_queue1); psz != NULL; psz = next) {
264 next = TAILQ_NEXT(psz, psz_chain);
265 kcpuset_set(psz->psz_pass, cid);
266 if (!kcpuset_match(psz->psz_pass, psz->psz_target)) {
267 continue;
268 }
269 kcpuset_zero(psz->psz_pass);
270 TAILQ_REMOVE(&psz_queue1, psz, psz_chain);
271 TAILQ_INSERT_TAIL(&psz_queue2, psz, psz_chain);
272 }
273 /*
274 * Scan through the first queue and update each request,
275 * if passed all processors, then move to the second queue.
276 */
277 for (psz = TAILQ_FIRST(&psz_queue0); psz != NULL; psz = next) {
278 next = TAILQ_NEXT(psz, psz_chain);
279 kcpuset_set(psz->psz_pass, cid);
280 if (!kcpuset_match(psz->psz_pass, psz->psz_target)) {
281 continue;
282 }
283 kcpuset_zero(psz->psz_pass);
284 TAILQ_REMOVE(&psz_queue0, psz, psz_chain);
285 TAILQ_INSERT_TAIL(&psz_queue1, psz, psz_chain);
286 }
287 /*
288 * Process the third queue: entries have been seen twice on every
289 * processor, remove from the queue and notify the updating thread.
290 */
291 while ((psz = TAILQ_FIRST(&psz_queue2)) != NULL) {
292 TAILQ_REMOVE(&psz_queue2, psz, psz_chain);
293 kcpuset_zero(psz->psz_target);
294 psz_work_todo--;
295 }
296 mutex_spin_exit(&psz_lock);
297 }
298
299 /*
300 * pserialize_in_read_section:
301 *
302 * True if the caller is in a pserialize read section. To be used only
303 * for diagnostic assertions where we want to guarantee the condition like:
304 *
305 * KASSERT(pserialize_in_read_section());
306 */
307 bool
308 pserialize_in_read_section(void)
309 {
310 #ifdef LOCKDEBUG
311 uint32_t *nreads;
312 bool in;
313
314 /* Not initialized yet */
315 if (__predict_false(psz_debug_nreads == NULL))
316 return true;
317
318 nreads = percpu_getref(psz_debug_nreads);
319 in = *nreads != 0;
320 percpu_putref(psz_debug_nreads);
321
322 return in;
323 #else
324 return true;
325 #endif
326 }
327
328 /*
329 * pserialize_not_in_read_section:
330 *
331 * True if the caller is not in a pserialize read section. To be used only
332 * for diagnostic assertions where we want to guarantee the condition like:
333 *
334 * KASSERT(pserialize_not_in_read_section());
335 */
336 bool
337 pserialize_not_in_read_section(void)
338 {
339 #ifdef LOCKDEBUG
340 uint32_t *nreads;
341 bool notin;
342
343 /* Not initialized yet */
344 if (__predict_false(psz_debug_nreads == NULL))
345 return true;
346
347 nreads = percpu_getref(psz_debug_nreads);
348 notin = *nreads == 0;
349 percpu_putref(psz_debug_nreads);
350
351 return notin;
352 #else
353 return true;
354 #endif
355 }
356