linux_rcu.c revision 1.4 1 1.4 riastrad /* $NetBSD: linux_rcu.c,v 1.4 2021/12/19 11:49:11 riastradh Exp $ */
2 1.1 riastrad
3 1.1 riastrad /*-
4 1.1 riastrad * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 1.1 riastrad * All rights reserved.
6 1.1 riastrad *
7 1.1 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.1 riastrad * by Taylor R. Campbell.
9 1.1 riastrad *
10 1.1 riastrad * Redistribution and use in source and binary forms, with or without
11 1.1 riastrad * modification, are permitted provided that the following conditions
12 1.1 riastrad * are met:
13 1.1 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.1 riastrad * notice, this list of conditions and the following disclaimer.
15 1.1 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.1 riastrad * documentation and/or other materials provided with the distribution.
18 1.1 riastrad *
19 1.1 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.1 riastrad */
31 1.1 riastrad
32 1.1 riastrad #include <sys/cdefs.h>
33 1.4 riastrad __KERNEL_RCSID(0, "$NetBSD: linux_rcu.c,v 1.4 2021/12/19 11:49:11 riastradh Exp $");
34 1.1 riastrad
35 1.1 riastrad #include <sys/param.h>
36 1.1 riastrad #include <sys/types.h>
37 1.1 riastrad #include <sys/condvar.h>
38 1.1 riastrad #include <sys/cpu.h>
39 1.1 riastrad #include <sys/kthread.h>
40 1.1 riastrad #include <sys/mutex.h>
41 1.1 riastrad #include <sys/sdt.h>
42 1.1 riastrad #include <sys/xcall.h>
43 1.1 riastrad
44 1.1 riastrad #include <linux/rcupdate.h>
45 1.1 riastrad #include <linux/slab.h>
46 1.1 riastrad
47 1.1 riastrad SDT_PROBE_DEFINE0(sdt, linux, rcu, synchronize__start);
48 1.1 riastrad SDT_PROBE_DEFINE1(sdt, linux, rcu, synchronize__cpu, "unsigned"/*cpu*/);
49 1.1 riastrad SDT_PROBE_DEFINE0(sdt, linux, rcu, synchronize__done);
50 1.1 riastrad SDT_PROBE_DEFINE0(sdt, linux, rcu, barrier__start);
51 1.1 riastrad SDT_PROBE_DEFINE0(sdt, linux, rcu, barrier__done);
52 1.1 riastrad SDT_PROBE_DEFINE2(sdt, linux, rcu, call__queue,
53 1.1 riastrad "struct rcu_head *"/*head*/, "void (*)(struct rcu_head *)"/*callback*/);
54 1.1 riastrad SDT_PROBE_DEFINE2(sdt, linux, rcu, call__run,
55 1.1 riastrad "struct rcu_head *"/*head*/, "void (*)(struct rcu_head *)"/*callback*/);
56 1.1 riastrad SDT_PROBE_DEFINE2(sdt, linux, rcu, call__done,
57 1.1 riastrad "struct rcu_head *"/*head*/, "void (*)(struct rcu_head *)"/*callback*/);
58 1.1 riastrad SDT_PROBE_DEFINE2(sdt, linux, rcu, kfree__queue,
59 1.1 riastrad "struct rcu_head *"/*head*/, "void *"/*obj*/);
60 1.1 riastrad SDT_PROBE_DEFINE2(sdt, linux, rcu, kfree__free,
61 1.1 riastrad "struct rcu_head *"/*head*/, "void *"/*obj*/);
62 1.1 riastrad SDT_PROBE_DEFINE2(sdt, linux, rcu, kfree__done,
63 1.1 riastrad "struct rcu_head *"/*head*/, "void *"/*obj*/);
64 1.1 riastrad
65 1.1 riastrad static struct {
66 1.1 riastrad kmutex_t lock;
67 1.1 riastrad kcondvar_t cv;
68 1.1 riastrad struct rcu_head *first_callback;
69 1.1 riastrad struct rcu_head *first_kfree;
70 1.1 riastrad struct lwp *lwp;
71 1.1 riastrad uint64_t gen;
72 1.1 riastrad bool dying;
73 1.1 riastrad } gc __cacheline_aligned;
74 1.1 riastrad
75 1.1 riastrad static void
76 1.1 riastrad synchronize_rcu_xc(void *a, void *b)
77 1.1 riastrad {
78 1.1 riastrad
79 1.1 riastrad SDT_PROBE1(sdt, linux, rcu, synchronize__cpu, cpu_index(curcpu()));
80 1.1 riastrad }
81 1.1 riastrad
82 1.1 riastrad /*
83 1.1 riastrad * synchronize_rcu()
84 1.1 riastrad *
85 1.1 riastrad * Wait for any pending RCU read section on every CPU to complete
86 1.1 riastrad * by triggering on every CPU activity that is blocked by an RCU
87 1.1 riastrad * read section.
88 1.4 riastrad *
89 1.4 riastrad * May sleep. (Practically guaranteed to sleep!)
90 1.1 riastrad */
91 1.1 riastrad void
92 1.1 riastrad synchronize_rcu(void)
93 1.1 riastrad {
94 1.1 riastrad
95 1.1 riastrad SDT_PROBE0(sdt, linux, rcu, synchronize__start);
96 1.1 riastrad xc_wait(xc_broadcast(0, &synchronize_rcu_xc, NULL, NULL));
97 1.1 riastrad SDT_PROBE0(sdt, linux, rcu, synchronize__done);
98 1.1 riastrad }
99 1.1 riastrad
100 1.1 riastrad /*
101 1.4 riastrad * synchronize_rcu_expedited()
102 1.4 riastrad *
103 1.4 riastrad * Wait for any pending RCU read section on every CPU to complete
104 1.4 riastrad * by triggering on every CPU activity that is blocked by an RCU
105 1.4 riastrad * read section. Try to get an answer faster than
106 1.4 riastrad * synchronize_rcu, at the cost of more activity triggered on
107 1.4 riastrad * other CPUs.
108 1.4 riastrad *
109 1.4 riastrad * May sleep. (Practically guaranteed to sleep!)
110 1.4 riastrad */
111 1.4 riastrad void
112 1.4 riastrad synchronize_rcu_expedited(void)
113 1.4 riastrad {
114 1.4 riastrad
115 1.4 riastrad synchronize_rcu();
116 1.4 riastrad }
117 1.4 riastrad
118 1.4 riastrad /*
119 1.3 riastrad * cookie = get_state_synchronize_rcu(), cond_synchronize_rcu(cookie)
120 1.3 riastrad *
121 1.3 riastrad * Optimization for synchronize_rcu -- skip if it has already
122 1.3 riastrad * happened between get_state_synchronize_rcu and
123 1.3 riastrad * cond_synchronize_rcu. get_state_synchronize_rcu implies a full
124 1.3 riastrad * SMP memory barrier (membar_sync).
125 1.3 riastrad */
126 1.3 riastrad unsigned long
127 1.3 riastrad get_state_synchronize_rcu(void)
128 1.3 riastrad {
129 1.3 riastrad
130 1.3 riastrad membar_sync();
131 1.3 riastrad return 0;
132 1.3 riastrad }
133 1.3 riastrad
134 1.3 riastrad void
135 1.3 riastrad cond_synchronize_rcu(unsigned long cookie)
136 1.3 riastrad {
137 1.3 riastrad
138 1.3 riastrad synchronize_rcu();
139 1.3 riastrad }
140 1.3 riastrad
141 1.3 riastrad /*
142 1.1 riastrad * rcu_barrier()
143 1.1 riastrad *
144 1.1 riastrad * Wait for all pending RCU callbacks to complete.
145 1.1 riastrad *
146 1.1 riastrad * Does not imply, and is not implied by, synchronize_rcu.
147 1.1 riastrad */
148 1.1 riastrad void
149 1.1 riastrad rcu_barrier(void)
150 1.1 riastrad {
151 1.1 riastrad uint64_t gen;
152 1.1 riastrad
153 1.1 riastrad SDT_PROBE0(sdt, linux, rcu, barrier__start);
154 1.1 riastrad mutex_enter(&gc.lock);
155 1.1 riastrad if (gc.first_callback != NULL || gc.first_kfree != NULL) {
156 1.1 riastrad gen = gc.gen;
157 1.1 riastrad do {
158 1.1 riastrad cv_wait(&gc.cv, &gc.lock);
159 1.1 riastrad } while (gc.gen == gen);
160 1.1 riastrad }
161 1.1 riastrad mutex_exit(&gc.lock);
162 1.1 riastrad SDT_PROBE0(sdt, linux, rcu, barrier__done);
163 1.1 riastrad }
164 1.1 riastrad
165 1.1 riastrad /*
166 1.1 riastrad * call_rcu(head, callback)
167 1.1 riastrad *
168 1.1 riastrad * Arrange to call callback(head) after any pending RCU read
169 1.1 riastrad * sections on every CPU is complete. Return immediately.
170 1.1 riastrad */
171 1.1 riastrad void
172 1.1 riastrad call_rcu(struct rcu_head *head, void (*callback)(struct rcu_head *))
173 1.1 riastrad {
174 1.1 riastrad
175 1.1 riastrad head->rcuh_u.callback = callback;
176 1.1 riastrad
177 1.1 riastrad mutex_enter(&gc.lock);
178 1.1 riastrad head->rcuh_next = gc.first_callback;
179 1.1 riastrad gc.first_callback = head;
180 1.1 riastrad cv_broadcast(&gc.cv);
181 1.1 riastrad SDT_PROBE2(sdt, linux, rcu, call__queue, head, callback);
182 1.1 riastrad mutex_exit(&gc.lock);
183 1.1 riastrad }
184 1.1 riastrad
185 1.1 riastrad /*
186 1.1 riastrad * _kfree_rcu(head, obj)
187 1.1 riastrad *
188 1.1 riastrad * kfree_rcu helper: schedule kfree(obj) using head for storage.
189 1.1 riastrad */
190 1.1 riastrad void
191 1.1 riastrad _kfree_rcu(struct rcu_head *head, void *obj)
192 1.1 riastrad {
193 1.1 riastrad
194 1.1 riastrad head->rcuh_u.obj = obj;
195 1.1 riastrad
196 1.1 riastrad mutex_enter(&gc.lock);
197 1.1 riastrad head->rcuh_next = gc.first_kfree;
198 1.1 riastrad gc.first_kfree = head;
199 1.1 riastrad cv_broadcast(&gc.cv);
200 1.1 riastrad SDT_PROBE2(sdt, linux, rcu, kfree__queue, head, obj);
201 1.1 riastrad mutex_exit(&gc.lock);
202 1.1 riastrad }
203 1.1 riastrad
204 1.1 riastrad static void
205 1.1 riastrad gc_thread(void *cookie)
206 1.1 riastrad {
207 1.1 riastrad struct rcu_head *head_callback, *head_kfree, *head, *next;
208 1.1 riastrad
209 1.1 riastrad mutex_enter(&gc.lock);
210 1.1 riastrad for (;;) {
211 1.1 riastrad /* Start with no work. */
212 1.1 riastrad bool work = false;
213 1.1 riastrad
214 1.1 riastrad /* Grab the list of callbacks. */
215 1.1 riastrad if ((head_callback = gc.first_callback) != NULL) {
216 1.1 riastrad gc.first_callback = NULL;
217 1.1 riastrad work = true;
218 1.1 riastrad }
219 1.1 riastrad
220 1.1 riastrad /* Grab the list of objects to kfree. */
221 1.1 riastrad if ((head_kfree = gc.first_kfree) != NULL) {
222 1.1 riastrad gc.first_kfree = NULL;
223 1.1 riastrad work = true;
224 1.1 riastrad }
225 1.1 riastrad
226 1.1 riastrad /*
227 1.1 riastrad * If no work, then either stop, if we're dying, or
228 1.1 riastrad * wait for work, if not.
229 1.1 riastrad */
230 1.1 riastrad if (!work) {
231 1.1 riastrad if (gc.dying)
232 1.1 riastrad break;
233 1.1 riastrad cv_wait(&gc.cv, &gc.lock);
234 1.1 riastrad continue;
235 1.1 riastrad }
236 1.1 riastrad
237 1.1 riastrad /* We have work to do. Drop the lock to do it. */
238 1.1 riastrad mutex_exit(&gc.lock);
239 1.1 riastrad
240 1.1 riastrad /* Wait for activity on all CPUs. */
241 1.1 riastrad synchronize_rcu();
242 1.1 riastrad
243 1.1 riastrad /* Call the callbacks. */
244 1.1 riastrad for (head = head_callback; head != NULL; head = next) {
245 1.1 riastrad void (*callback)(struct rcu_head *) =
246 1.1 riastrad head->rcuh_u.callback;
247 1.1 riastrad next = head->rcuh_next;
248 1.1 riastrad SDT_PROBE2(sdt, linux, rcu, call__run,
249 1.1 riastrad head, callback);
250 1.1 riastrad (*callback)(head);
251 1.1 riastrad /*
252 1.1 riastrad * Can't dereference head or invoke
253 1.1 riastrad * callback after this point.
254 1.1 riastrad */
255 1.1 riastrad SDT_PROBE2(sdt, linux, rcu, call__done,
256 1.1 riastrad head, callback);
257 1.1 riastrad }
258 1.1 riastrad
259 1.1 riastrad /* Free the objects to kfree. */
260 1.1 riastrad for (head = head_kfree; head != NULL; head = next) {
261 1.1 riastrad void *obj = head->rcuh_u.obj;
262 1.1 riastrad next = head->rcuh_next;
263 1.1 riastrad SDT_PROBE2(sdt, linux, rcu, kfree__free, head, obj);
264 1.1 riastrad kfree(obj);
265 1.1 riastrad /* Can't dereference head or obj after this point. */
266 1.1 riastrad SDT_PROBE2(sdt, linux, rcu, kfree__done, head, obj);
267 1.1 riastrad }
268 1.1 riastrad
269 1.1 riastrad /* Return to the lock. */
270 1.1 riastrad mutex_enter(&gc.lock);
271 1.1 riastrad
272 1.1 riastrad /* Finished a batch of work. Notify rcu_barrier. */
273 1.1 riastrad gc.gen++;
274 1.1 riastrad cv_broadcast(&gc.cv);
275 1.1 riastrad }
276 1.1 riastrad KASSERT(gc.first_callback == NULL);
277 1.1 riastrad KASSERT(gc.first_kfree == NULL);
278 1.1 riastrad mutex_exit(&gc.lock);
279 1.1 riastrad
280 1.1 riastrad kthread_exit(0);
281 1.1 riastrad }
282 1.1 riastrad
283 1.2 riastrad void
284 1.2 riastrad init_rcu_head(struct rcu_head *head)
285 1.2 riastrad {
286 1.2 riastrad }
287 1.2 riastrad
288 1.2 riastrad void
289 1.2 riastrad destroy_rcu_head(struct rcu_head *head)
290 1.2 riastrad {
291 1.2 riastrad }
292 1.2 riastrad
293 1.1 riastrad int
294 1.1 riastrad linux_rcu_gc_init(void)
295 1.1 riastrad {
296 1.1 riastrad int error;
297 1.1 riastrad
298 1.1 riastrad mutex_init(&gc.lock, MUTEX_DEFAULT, IPL_VM);
299 1.1 riastrad cv_init(&gc.cv, "lnxrcugc");
300 1.1 riastrad gc.first_callback = NULL;
301 1.1 riastrad gc.first_kfree = NULL;
302 1.1 riastrad gc.gen = 0;
303 1.1 riastrad gc.dying = false;
304 1.1 riastrad
305 1.1 riastrad error = kthread_create(PRI_NONE,
306 1.1 riastrad KTHREAD_MPSAFE|KTHREAD_TS|KTHREAD_MUSTJOIN, NULL, &gc_thread, NULL,
307 1.1 riastrad &gc.lwp, "lnxrcugc");
308 1.1 riastrad if (error)
309 1.1 riastrad goto fail;
310 1.1 riastrad
311 1.1 riastrad /* Success! */
312 1.1 riastrad return 0;
313 1.1 riastrad
314 1.1 riastrad fail: cv_destroy(&gc.cv);
315 1.1 riastrad mutex_destroy(&gc.lock);
316 1.1 riastrad return error;
317 1.1 riastrad }
318 1.1 riastrad
319 1.1 riastrad void
320 1.1 riastrad linux_rcu_gc_fini(void)
321 1.1 riastrad {
322 1.1 riastrad
323 1.1 riastrad mutex_enter(&gc.lock);
324 1.1 riastrad gc.dying = true;
325 1.1 riastrad cv_broadcast(&gc.cv);
326 1.1 riastrad mutex_exit(&gc.lock);
327 1.1 riastrad
328 1.1 riastrad kthread_join(gc.lwp);
329 1.1 riastrad gc.lwp = NULL;
330 1.1 riastrad KASSERT(gc.first_callback == NULL);
331 1.1 riastrad KASSERT(gc.first_kfree == NULL);
332 1.1 riastrad cv_destroy(&gc.cv);
333 1.1 riastrad mutex_destroy(&gc.lock);
334 1.1 riastrad }
335