subr_percpu.c revision 1.21 1 1.21 riastrad /* $NetBSD: subr_percpu.c,v 1.21 2020/02/01 12:49:02 riastradh Exp $ */
2 1.1 yamt
3 1.1 yamt /*-
4 1.1 yamt * Copyright (c)2007,2008 YAMAMOTO Takashi,
5 1.1 yamt * All rights reserved.
6 1.1 yamt *
7 1.1 yamt * Redistribution and use in source and binary forms, with or without
8 1.1 yamt * modification, are permitted provided that the following conditions
9 1.1 yamt * are met:
10 1.1 yamt * 1. Redistributions of source code must retain the above copyright
11 1.1 yamt * notice, this list of conditions and the following disclaimer.
12 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 yamt * notice, this list of conditions and the following disclaimer in the
14 1.1 yamt * documentation and/or other materials provided with the distribution.
15 1.1 yamt *
16 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.1 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.1 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.1 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.1 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.1 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.1 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1 yamt * SUCH DAMAGE.
27 1.1 yamt */
28 1.1 yamt
29 1.1 yamt /*
30 1.1 yamt * per-cpu storage.
31 1.1 yamt */
32 1.1 yamt
33 1.1 yamt #include <sys/cdefs.h>
34 1.21 riastrad __KERNEL_RCSID(0, "$NetBSD: subr_percpu.c,v 1.21 2020/02/01 12:49:02 riastradh Exp $");
35 1.1 yamt
36 1.1 yamt #include <sys/param.h>
37 1.1 yamt #include <sys/cpu.h>
38 1.1 yamt #include <sys/kmem.h>
39 1.1 yamt #include <sys/kernel.h>
40 1.1 yamt #include <sys/mutex.h>
41 1.1 yamt #include <sys/percpu.h>
42 1.1 yamt #include <sys/rwlock.h>
43 1.1 yamt #include <sys/vmem.h>
44 1.1 yamt #include <sys/xcall.h>
45 1.1 yamt
46 1.1 yamt #define PERCPU_QUANTUM_SIZE (ALIGNBYTES + 1)
47 1.1 yamt #define PERCPU_QCACHE_MAX 0
48 1.1 yamt #define PERCPU_IMPORT_SIZE 2048
49 1.1 yamt
50 1.21 riastrad struct percpu {
51 1.21 riastrad unsigned pc_offset;
52 1.21 riastrad size_t pc_size;
53 1.21 riastrad percpu_callback_t pc_dtor;
54 1.21 riastrad void *pc_cookie;
55 1.21 riastrad };
56 1.8 yamt
57 1.13 rmind static krwlock_t percpu_swap_lock __cacheline_aligned;
58 1.13 rmind static kmutex_t percpu_allocation_lock __cacheline_aligned;
59 1.13 rmind static vmem_t * percpu_offset_arena __cacheline_aligned;
60 1.13 rmind static unsigned int percpu_nextoff __cacheline_aligned;
61 1.9 ad
62 1.1 yamt static percpu_cpu_t *
63 1.1 yamt cpu_percpu(struct cpu_info *ci)
64 1.1 yamt {
65 1.1 yamt
66 1.1 yamt return &ci->ci_data.cpu_percpu;
67 1.1 yamt }
68 1.1 yamt
69 1.1 yamt static unsigned int
70 1.1 yamt percpu_offset(percpu_t *pc)
71 1.1 yamt {
72 1.21 riastrad const unsigned int off = pc->pc_offset;
73 1.1 yamt
74 1.8 yamt KASSERT(off < percpu_nextoff);
75 1.8 yamt return off;
76 1.1 yamt }
77 1.1 yamt
78 1.1 yamt /*
79 1.1 yamt * percpu_cpu_swap: crosscall handler for percpu_cpu_enlarge
80 1.1 yamt */
81 1.19 kamil __noubsan
82 1.1 yamt static void
83 1.1 yamt percpu_cpu_swap(void *p1, void *p2)
84 1.1 yamt {
85 1.1 yamt struct cpu_info * const ci = p1;
86 1.1 yamt percpu_cpu_t * const newpcc = p2;
87 1.1 yamt percpu_cpu_t * const pcc = cpu_percpu(ci);
88 1.1 yamt
89 1.12 martin KASSERT(ci == curcpu() || !mp_online);
90 1.11 matt
91 1.1 yamt /*
92 1.1 yamt * swap *pcc and *newpcc unless anyone has beaten us.
93 1.1 yamt */
94 1.1 yamt rw_enter(&percpu_swap_lock, RW_WRITER);
95 1.1 yamt if (newpcc->pcc_size > pcc->pcc_size) {
96 1.1 yamt percpu_cpu_t tmp;
97 1.1 yamt int s;
98 1.1 yamt
99 1.1 yamt tmp = *pcc;
100 1.1 yamt
101 1.1 yamt /*
102 1.1 yamt * block interrupts so that we don't lose their modifications.
103 1.1 yamt */
104 1.1 yamt
105 1.1 yamt s = splhigh();
106 1.1 yamt
107 1.1 yamt /*
108 1.1 yamt * copy data to new storage.
109 1.1 yamt */
110 1.1 yamt
111 1.1 yamt memcpy(newpcc->pcc_data, pcc->pcc_data, pcc->pcc_size);
112 1.1 yamt
113 1.1 yamt /*
114 1.1 yamt * this assignment needs to be atomic for percpu_getptr_remote.
115 1.1 yamt */
116 1.1 yamt
117 1.1 yamt pcc->pcc_data = newpcc->pcc_data;
118 1.1 yamt
119 1.1 yamt splx(s);
120 1.1 yamt
121 1.1 yamt pcc->pcc_size = newpcc->pcc_size;
122 1.1 yamt *newpcc = tmp;
123 1.1 yamt }
124 1.1 yamt rw_exit(&percpu_swap_lock);
125 1.1 yamt }
126 1.1 yamt
127 1.1 yamt /*
128 1.1 yamt * percpu_cpu_enlarge: ensure that percpu_cpu_t of each cpus have enough space
129 1.1 yamt */
130 1.1 yamt
131 1.1 yamt static void
132 1.1 yamt percpu_cpu_enlarge(size_t size)
133 1.1 yamt {
134 1.1 yamt CPU_INFO_ITERATOR cii;
135 1.1 yamt struct cpu_info *ci;
136 1.1 yamt
137 1.1 yamt for (CPU_INFO_FOREACH(cii, ci)) {
138 1.1 yamt percpu_cpu_t pcc;
139 1.1 yamt
140 1.1 yamt pcc.pcc_data = kmem_alloc(size, KM_SLEEP); /* XXX cacheline */
141 1.1 yamt pcc.pcc_size = size;
142 1.1 yamt if (!mp_online) {
143 1.1 yamt percpu_cpu_swap(ci, &pcc);
144 1.1 yamt } else {
145 1.1 yamt uint64_t where;
146 1.1 yamt
147 1.1 yamt where = xc_unicast(0, percpu_cpu_swap, ci, &pcc, ci);
148 1.1 yamt xc_wait(where);
149 1.1 yamt }
150 1.20 riastrad KASSERT(pcc.pcc_size <= size);
151 1.1 yamt if (pcc.pcc_data != NULL) {
152 1.1 yamt kmem_free(pcc.pcc_data, pcc.pcc_size);
153 1.1 yamt }
154 1.1 yamt }
155 1.1 yamt }
156 1.1 yamt
157 1.1 yamt /*
158 1.1 yamt * percpu_backend_alloc: vmem import callback for percpu_offset_arena
159 1.1 yamt */
160 1.1 yamt
161 1.15 dyoung static int
162 1.16 para percpu_backend_alloc(vmem_t *dummy, vmem_size_t size, vmem_size_t *resultsize,
163 1.15 dyoung vm_flag_t vmflags, vmem_addr_t *addrp)
164 1.1 yamt {
165 1.1 yamt unsigned int offset;
166 1.1 yamt unsigned int nextoff;
167 1.1 yamt
168 1.3 yamt ASSERT_SLEEPABLE();
169 1.1 yamt KASSERT(dummy == NULL);
170 1.1 yamt
171 1.1 yamt if ((vmflags & VM_NOSLEEP) != 0)
172 1.15 dyoung return ENOMEM;
173 1.1 yamt
174 1.1 yamt size = roundup(size, PERCPU_IMPORT_SIZE);
175 1.1 yamt mutex_enter(&percpu_allocation_lock);
176 1.1 yamt offset = percpu_nextoff;
177 1.1 yamt percpu_nextoff = nextoff = percpu_nextoff + size;
178 1.1 yamt mutex_exit(&percpu_allocation_lock);
179 1.1 yamt
180 1.1 yamt percpu_cpu_enlarge(nextoff);
181 1.1 yamt
182 1.1 yamt *resultsize = size;
183 1.15 dyoung *addrp = (vmem_addr_t)offset;
184 1.15 dyoung return 0;
185 1.1 yamt }
186 1.1 yamt
187 1.2 yamt static void
188 1.2 yamt percpu_zero_cb(void *vp, void *vp2, struct cpu_info *ci)
189 1.2 yamt {
190 1.2 yamt size_t sz = (uintptr_t)vp2;
191 1.2 yamt
192 1.2 yamt memset(vp, 0, sz);
193 1.2 yamt }
194 1.2 yamt
195 1.2 yamt /*
196 1.2 yamt * percpu_zero: initialize percpu storage with zero.
197 1.2 yamt */
198 1.2 yamt
199 1.2 yamt static void
200 1.2 yamt percpu_zero(percpu_t *pc, size_t sz)
201 1.2 yamt {
202 1.2 yamt
203 1.2 yamt percpu_foreach(pc, percpu_zero_cb, (void *)(uintptr_t)sz);
204 1.2 yamt }
205 1.2 yamt
206 1.1 yamt /*
207 1.1 yamt * percpu_init: subsystem initialization
208 1.1 yamt */
209 1.1 yamt
210 1.1 yamt void
211 1.1 yamt percpu_init(void)
212 1.1 yamt {
213 1.1 yamt
214 1.3 yamt ASSERT_SLEEPABLE();
215 1.1 yamt rw_init(&percpu_swap_lock);
216 1.1 yamt mutex_init(&percpu_allocation_lock, MUTEX_DEFAULT, IPL_NONE);
217 1.13 rmind percpu_nextoff = PERCPU_QUANTUM_SIZE;
218 1.1 yamt
219 1.16 para percpu_offset_arena = vmem_xcreate("percpu", 0, 0, PERCPU_QUANTUM_SIZE,
220 1.1 yamt percpu_backend_alloc, NULL, NULL, PERCPU_QCACHE_MAX, VM_SLEEP,
221 1.1 yamt IPL_NONE);
222 1.1 yamt }
223 1.1 yamt
224 1.1 yamt /*
225 1.1 yamt * percpu_init_cpu: cpu initialization
226 1.1 yamt *
227 1.1 yamt * => should be called before the cpu appears on the list for CPU_INFO_FOREACH.
228 1.1 yamt */
229 1.1 yamt
230 1.1 yamt void
231 1.1 yamt percpu_init_cpu(struct cpu_info *ci)
232 1.1 yamt {
233 1.1 yamt percpu_cpu_t * const pcc = cpu_percpu(ci);
234 1.1 yamt size_t size = percpu_nextoff; /* XXX racy */
235 1.1 yamt
236 1.3 yamt ASSERT_SLEEPABLE();
237 1.1 yamt pcc->pcc_size = size;
238 1.1 yamt if (size) {
239 1.1 yamt pcc->pcc_data = kmem_zalloc(pcc->pcc_size, KM_SLEEP);
240 1.1 yamt }
241 1.1 yamt }
242 1.1 yamt
243 1.1 yamt /*
244 1.1 yamt * percpu_alloc: allocate percpu storage
245 1.1 yamt *
246 1.1 yamt * => called in thread context.
247 1.1 yamt * => considered as an expensive and rare operation.
248 1.2 yamt * => allocated storage is initialized with zeros.
249 1.1 yamt */
250 1.1 yamt
251 1.1 yamt percpu_t *
252 1.1 yamt percpu_alloc(size_t size)
253 1.1 yamt {
254 1.21 riastrad
255 1.21 riastrad return percpu_create(size, NULL, NULL, NULL);
256 1.21 riastrad }
257 1.21 riastrad
258 1.21 riastrad /*
259 1.21 riastrad * percpu_create: allocate percpu storage and associate ctor/dtor with it
260 1.21 riastrad *
261 1.21 riastrad * => called in thread context.
262 1.21 riastrad * => considered as an expensive and rare operation.
263 1.21 riastrad * => allocated storage is initialized by ctor, or zeros if ctor is null
264 1.21 riastrad * => percpu_free will call dtor first, if dtor is nonnull
265 1.21 riastrad * => ctor or dtor may sleep, even on allocation
266 1.21 riastrad */
267 1.21 riastrad
268 1.21 riastrad percpu_t *
269 1.21 riastrad percpu_create(size_t size, percpu_callback_t ctor, percpu_callback_t dtor,
270 1.21 riastrad void *cookie)
271 1.21 riastrad {
272 1.15 dyoung vmem_addr_t offset;
273 1.1 yamt percpu_t *pc;
274 1.1 yamt
275 1.3 yamt ASSERT_SLEEPABLE();
276 1.18 chs (void)vmem_alloc(percpu_offset_arena, size, VM_SLEEP | VM_BESTFIT,
277 1.18 chs &offset);
278 1.21 riastrad
279 1.21 riastrad pc = kmem_alloc(sizeof(*pc), KM_SLEEP);
280 1.21 riastrad pc->pc_offset = offset;
281 1.21 riastrad pc->pc_size = size;
282 1.21 riastrad pc->pc_dtor = dtor;
283 1.21 riastrad pc->pc_cookie = cookie;
284 1.21 riastrad
285 1.21 riastrad if (ctor) {
286 1.21 riastrad CPU_INFO_ITERATOR cii;
287 1.21 riastrad struct cpu_info *ci;
288 1.21 riastrad void *buf;
289 1.21 riastrad
290 1.21 riastrad buf = kmem_alloc(size, KM_SLEEP);
291 1.21 riastrad for (CPU_INFO_FOREACH(cii, ci)) {
292 1.21 riastrad memset(buf, 0, size);
293 1.21 riastrad (*ctor)(buf, cookie, ci);
294 1.21 riastrad percpu_traverse_enter();
295 1.21 riastrad memcpy(percpu_getptr_remote(pc, ci), buf, size);
296 1.21 riastrad percpu_traverse_exit();
297 1.21 riastrad }
298 1.21 riastrad explicit_memset(buf, 0, size);
299 1.21 riastrad kmem_free(buf, size);
300 1.21 riastrad } else {
301 1.21 riastrad percpu_zero(pc, size);
302 1.21 riastrad }
303 1.21 riastrad
304 1.1 yamt return pc;
305 1.1 yamt }
306 1.1 yamt
307 1.1 yamt /*
308 1.5 yamt * percpu_free: free percpu storage
309 1.1 yamt *
310 1.1 yamt * => called in thread context.
311 1.1 yamt * => considered as an expensive and rare operation.
312 1.1 yamt */
313 1.1 yamt
314 1.1 yamt void
315 1.1 yamt percpu_free(percpu_t *pc, size_t size)
316 1.1 yamt {
317 1.1 yamt
318 1.3 yamt ASSERT_SLEEPABLE();
319 1.21 riastrad KASSERT(size == pc->pc_size);
320 1.21 riastrad
321 1.21 riastrad if (pc->pc_dtor) {
322 1.21 riastrad CPU_INFO_ITERATOR cii;
323 1.21 riastrad struct cpu_info *ci;
324 1.21 riastrad void *buf;
325 1.21 riastrad
326 1.21 riastrad buf = kmem_alloc(size, KM_SLEEP);
327 1.21 riastrad for (CPU_INFO_FOREACH(cii, ci)) {
328 1.21 riastrad percpu_traverse_enter();
329 1.21 riastrad memcpy(buf, percpu_getptr_remote(pc, ci), size);
330 1.21 riastrad explicit_memset(percpu_getptr_remote(pc, ci), 0, size);
331 1.21 riastrad percpu_traverse_exit();
332 1.21 riastrad (*pc->pc_dtor)(buf, pc->pc_cookie, ci);
333 1.21 riastrad }
334 1.21 riastrad explicit_memset(buf, 0, size);
335 1.21 riastrad kmem_free(buf, size);
336 1.21 riastrad }
337 1.21 riastrad
338 1.1 yamt vmem_free(percpu_offset_arena, (vmem_addr_t)percpu_offset(pc), size);
339 1.21 riastrad kmem_free(pc, sizeof(*pc));
340 1.1 yamt }
341 1.1 yamt
342 1.1 yamt /*
343 1.4 thorpej * percpu_getref:
344 1.1 yamt *
345 1.1 yamt * => safe to be used in either thread or interrupt context
346 1.4 thorpej * => disables preemption; must be bracketed with a percpu_putref()
347 1.1 yamt */
348 1.1 yamt
349 1.1 yamt void *
350 1.4 thorpej percpu_getref(percpu_t *pc)
351 1.1 yamt {
352 1.1 yamt
353 1.17 uebayasi kpreempt_disable();
354 1.1 yamt return percpu_getptr_remote(pc, curcpu());
355 1.1 yamt }
356 1.1 yamt
357 1.1 yamt /*
358 1.4 thorpej * percpu_putref:
359 1.4 thorpej *
360 1.4 thorpej * => drops the preemption-disabled count after caller is done with per-cpu
361 1.4 thorpej * data
362 1.4 thorpej */
363 1.4 thorpej
364 1.4 thorpej void
365 1.4 thorpej percpu_putref(percpu_t *pc)
366 1.4 thorpej {
367 1.4 thorpej
368 1.17 uebayasi kpreempt_enable();
369 1.4 thorpej }
370 1.4 thorpej
371 1.4 thorpej /*
372 1.1 yamt * percpu_traverse_enter, percpu_traverse_exit, percpu_getptr_remote:
373 1.1 yamt * helpers to access remote cpu's percpu data.
374 1.1 yamt *
375 1.1 yamt * => called in thread context.
376 1.2 yamt * => percpu_traverse_enter can block low-priority xcalls.
377 1.1 yamt * => typical usage would be:
378 1.1 yamt *
379 1.1 yamt * sum = 0;
380 1.1 yamt * percpu_traverse_enter();
381 1.1 yamt * for (CPU_INFO_FOREACH(cii, ci)) {
382 1.1 yamt * unsigned int *p = percpu_getptr_remote(pc, ci);
383 1.1 yamt * sum += *p;
384 1.1 yamt * }
385 1.1 yamt * percpu_traverse_exit();
386 1.1 yamt */
387 1.1 yamt
388 1.1 yamt void
389 1.1 yamt percpu_traverse_enter(void)
390 1.1 yamt {
391 1.1 yamt
392 1.3 yamt ASSERT_SLEEPABLE();
393 1.1 yamt rw_enter(&percpu_swap_lock, RW_READER);
394 1.1 yamt }
395 1.1 yamt
396 1.1 yamt void
397 1.1 yamt percpu_traverse_exit(void)
398 1.1 yamt {
399 1.1 yamt
400 1.1 yamt rw_exit(&percpu_swap_lock);
401 1.1 yamt }
402 1.1 yamt
403 1.1 yamt void *
404 1.1 yamt percpu_getptr_remote(percpu_t *pc, struct cpu_info *ci)
405 1.1 yamt {
406 1.1 yamt
407 1.1 yamt return &((char *)cpu_percpu(ci)->pcc_data)[percpu_offset(pc)];
408 1.1 yamt }
409 1.1 yamt
410 1.1 yamt /*
411 1.1 yamt * percpu_foreach: call the specified callback function for each cpus.
412 1.1 yamt *
413 1.2 yamt * => called in thread context.
414 1.1 yamt * => caller should not rely on the cpu iteration order.
415 1.2 yamt * => the callback function should be minimum because it is executed with
416 1.2 yamt * holding a global lock, which can block low-priority xcalls.
417 1.2 yamt * eg. it's illegal for a callback function to sleep for memory allocation.
418 1.1 yamt */
419 1.1 yamt void
420 1.1 yamt percpu_foreach(percpu_t *pc, percpu_callback_t cb, void *arg)
421 1.1 yamt {
422 1.1 yamt CPU_INFO_ITERATOR cii;
423 1.1 yamt struct cpu_info *ci;
424 1.1 yamt
425 1.1 yamt percpu_traverse_enter();
426 1.1 yamt for (CPU_INFO_FOREACH(cii, ci)) {
427 1.2 yamt (*cb)(percpu_getptr_remote(pc, ci), arg, ci);
428 1.1 yamt }
429 1.1 yamt percpu_traverse_exit();
430 1.1 yamt }
431