subr_percpu.c revision 1.1 1 /* $NetBSD: subr_percpu.c,v 1.1 2008/01/14 12:40:03 yamt Exp $ */
2
3 /*-
4 * Copyright (c)2007,2008 YAMAMOTO Takashi,
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /*
30 * per-cpu storage.
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: subr_percpu.c,v 1.1 2008/01/14 12:40:03 yamt Exp $");
35
36 #include <sys/param.h>
37 #include <sys/cpu.h>
38 #include <sys/kmem.h>
39 #include <sys/kernel.h>
40 #include <sys/mutex.h>
41 #include <sys/percpu.h>
42 #include <sys/rwlock.h>
43 #include <sys/vmem.h>
44 #include <sys/xcall.h>
45
46 #include <uvm/uvm_extern.h>
47
48 static krwlock_t percpu_swap_lock;
49 static kmutex_t percpu_allocation_lock;
50 static vmem_t *percpu_offset_arena;
51 static unsigned int percpu_nextoff;
52
53 #define PERCPU_QUANTUM_SIZE (ALIGNBYTES + 1)
54 #define PERCPU_QCACHE_MAX 0
55 #define PERCPU_IMPORT_SIZE 2048
56
57 static percpu_cpu_t *
58 cpu_percpu(struct cpu_info *ci)
59 {
60
61 return &ci->ci_data.cpu_percpu;
62 }
63
64 static unsigned int
65 percpu_offset(percpu_t *pc)
66 {
67
68 return (uintptr_t)pc;
69 }
70
71 /*
72 * percpu_cpu_swap: crosscall handler for percpu_cpu_enlarge
73 */
74
75 static void
76 percpu_cpu_swap(void *p1, void *p2)
77 {
78 struct cpu_info * const ci = p1;
79 percpu_cpu_t * const newpcc = p2;
80 percpu_cpu_t * const pcc = cpu_percpu(ci);
81
82 /*
83 * swap *pcc and *newpcc unless anyone has beaten us.
84 */
85
86 rw_enter(&percpu_swap_lock, RW_WRITER);
87 if (newpcc->pcc_size > pcc->pcc_size) {
88 percpu_cpu_t tmp;
89 int s;
90
91 tmp = *pcc;
92
93 /*
94 * block interrupts so that we don't lose their modifications.
95 */
96
97 s = splhigh();
98
99 /*
100 * copy data to new storage.
101 */
102
103 memcpy(newpcc->pcc_data, pcc->pcc_data, pcc->pcc_size);
104
105 /*
106 * this assignment needs to be atomic for percpu_getptr_remote.
107 */
108
109 pcc->pcc_data = newpcc->pcc_data;
110
111 splx(s);
112
113 pcc->pcc_size = newpcc->pcc_size;
114 *newpcc = tmp;
115 }
116 rw_exit(&percpu_swap_lock);
117 }
118
119 /*
120 * percpu_cpu_enlarge: ensure that percpu_cpu_t of each cpus have enough space
121 */
122
123 static void
124 percpu_cpu_enlarge(size_t size)
125 {
126 CPU_INFO_ITERATOR cii;
127 struct cpu_info *ci;
128
129 for (CPU_INFO_FOREACH(cii, ci)) {
130 percpu_cpu_t pcc;
131
132 pcc.pcc_data = kmem_alloc(size, KM_SLEEP); /* XXX cacheline */
133 pcc.pcc_size = size;
134 if (!mp_online) {
135 percpu_cpu_swap(ci, &pcc);
136 } else {
137 uint64_t where;
138
139 uvm_lwp_hold(curlwp); /* don't swap out pcc */
140 where = xc_unicast(0, percpu_cpu_swap, ci, &pcc, ci);
141 xc_wait(where);
142 uvm_lwp_rele(curlwp);
143 }
144 KASSERT(pcc.pcc_size < size);
145 if (pcc.pcc_data != NULL) {
146 kmem_free(pcc.pcc_data, pcc.pcc_size);
147 }
148 }
149 }
150
151 /*
152 * percpu_backend_alloc: vmem import callback for percpu_offset_arena
153 */
154
155 static vmem_addr_t
156 percpu_backend_alloc(vmem_t *dummy, vmem_size_t size, vmem_size_t *resultsize,
157 vm_flag_t vmflags)
158 {
159 unsigned int offset;
160 unsigned int nextoff;
161
162 ASSERT_SLEEPABLE(NULL, __func__);
163 KASSERT(dummy == NULL);
164
165 if ((vmflags & VM_NOSLEEP) != 0)
166 return VMEM_ADDR_NULL;
167
168 size = roundup(size, PERCPU_IMPORT_SIZE);
169 mutex_enter(&percpu_allocation_lock);
170 offset = percpu_nextoff;
171 percpu_nextoff = nextoff = percpu_nextoff + size;
172 mutex_exit(&percpu_allocation_lock);
173
174 percpu_cpu_enlarge(nextoff);
175
176 *resultsize = size;
177 return (vmem_addr_t)offset;
178 }
179
180 /*
181 * percpu_init: subsystem initialization
182 */
183
184 void
185 percpu_init(void)
186 {
187
188 ASSERT_SLEEPABLE(NULL, __func__);
189 rw_init(&percpu_swap_lock);
190 mutex_init(&percpu_allocation_lock, MUTEX_DEFAULT, IPL_NONE);
191
192 percpu_offset_arena = vmem_create("percpu", 0, 0, PERCPU_QUANTUM_SIZE,
193 percpu_backend_alloc, NULL, NULL, PERCPU_QCACHE_MAX, VM_SLEEP,
194 IPL_NONE);
195 }
196
197 /*
198 * percpu_init_cpu: cpu initialization
199 *
200 * => should be called before the cpu appears on the list for CPU_INFO_FOREACH.
201 */
202
203 void
204 percpu_init_cpu(struct cpu_info *ci)
205 {
206 percpu_cpu_t * const pcc = cpu_percpu(ci);
207 size_t size = percpu_nextoff; /* XXX racy */
208
209 ASSERT_SLEEPABLE(NULL, __func__);
210 pcc->pcc_size = size;
211 if (size) {
212 pcc->pcc_data = kmem_zalloc(pcc->pcc_size, KM_SLEEP);
213 }
214 }
215
216 /*
217 * percpu_alloc: allocate percpu storage
218 *
219 * => called in thread context.
220 * => considered as an expensive and rare operation.
221 */
222
223 percpu_t *
224 percpu_alloc(size_t size)
225 {
226 unsigned int offset;
227 percpu_t *pc;
228
229 ASSERT_SLEEPABLE(NULL, __func__);
230 offset = vmem_alloc(percpu_offset_arena, size, VM_SLEEP | VM_BESTFIT);
231 pc = (percpu_t *)(uintptr_t)offset;
232 percpu_zero(pc, size);
233 return pc;
234 }
235
236 /*
237 * percpu_alloc: free percpu storage
238 *
239 * => called in thread context.
240 * => considered as an expensive and rare operation.
241 */
242
243 void
244 percpu_free(percpu_t *pc, size_t size)
245 {
246
247 ASSERT_SLEEPABLE(NULL, __func__);
248 vmem_free(percpu_offset_arena, (vmem_addr_t)percpu_offset(pc), size);
249 }
250
251 /*
252 * percpu_getptr:
253 *
254 * => called with preemption disabled
255 * => safe to be used in either thread or interrupt context
256 */
257
258 void *
259 percpu_getptr(percpu_t *pc)
260 {
261
262 return percpu_getptr_remote(pc, curcpu());
263 }
264
265 /*
266 * percpu_traverse_enter, percpu_traverse_exit, percpu_getptr_remote:
267 * helpers to access remote cpu's percpu data.
268 *
269 * => called in thread context.
270 * => typical usage would be:
271 *
272 * sum = 0;
273 * percpu_traverse_enter();
274 * for (CPU_INFO_FOREACH(cii, ci)) {
275 * unsigned int *p = percpu_getptr_remote(pc, ci);
276 * sum += *p;
277 * }
278 * percpu_traverse_exit();
279 */
280
281 void
282 percpu_traverse_enter(void)
283 {
284
285 ASSERT_SLEEPABLE(NULL, __func__);
286 rw_enter(&percpu_swap_lock, RW_READER);
287 }
288
289 void
290 percpu_traverse_exit(void)
291 {
292
293 rw_exit(&percpu_swap_lock);
294 }
295
296 void *
297 percpu_getptr_remote(percpu_t *pc, struct cpu_info *ci)
298 {
299
300 return &((char *)cpu_percpu(ci)->pcc_data)[percpu_offset(pc)];
301 }
302
303 static void
304 percpu_zero_cb(void *vp, void *vp2)
305 {
306 size_t sz = (uintptr_t)vp2;
307
308 memset(vp, 0, sz);
309 }
310
311 /*
312 * percpu_zero: initialize percpu storage with zero.
313 */
314 void
315 percpu_zero(percpu_t *pc, size_t sz)
316 {
317
318 percpu_foreach(pc, percpu_zero_cb, (void *)(uintptr_t)sz);
319 }
320
321 /*
322 * percpu_foreach: call the specified callback function for each cpus.
323 *
324 * => caller should not rely on the cpu iteration order.
325 */
326 void
327 percpu_foreach(percpu_t *pc, percpu_callback_t cb, void *arg)
328 {
329 CPU_INFO_ITERATOR cii;
330 struct cpu_info *ci;
331
332 percpu_traverse_enter();
333 for (CPU_INFO_FOREACH(cii, ci)) {
334 void *p = percpu_getptr_remote(pc, ci);
335 (*cb)(p, arg);
336 }
337 percpu_traverse_exit();
338 }
339