Home | History | Annotate | Line # | Download | only in kern
subr_percpu.c revision 1.8
      1  1.8     yamt /*	$NetBSD: subr_percpu.c,v 1.8 2008/05/03 05:31:56 yamt Exp $	*/
      2  1.1     yamt 
      3  1.1     yamt /*-
      4  1.1     yamt  * Copyright (c)2007,2008 YAMAMOTO Takashi,
      5  1.1     yamt  * All rights reserved.
      6  1.1     yamt  *
      7  1.1     yamt  * Redistribution and use in source and binary forms, with or without
      8  1.1     yamt  * modification, are permitted provided that the following conditions
      9  1.1     yamt  * are met:
     10  1.1     yamt  * 1. Redistributions of source code must retain the above copyright
     11  1.1     yamt  *    notice, this list of conditions and the following disclaimer.
     12  1.1     yamt  * 2. Redistributions in binary form must reproduce the above copyright
     13  1.1     yamt  *    notice, this list of conditions and the following disclaimer in the
     14  1.1     yamt  *    documentation and/or other materials provided with the distribution.
     15  1.1     yamt  *
     16  1.1     yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17  1.1     yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18  1.1     yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19  1.1     yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20  1.1     yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21  1.1     yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22  1.1     yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23  1.1     yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24  1.1     yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  1.1     yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  1.1     yamt  * SUCH DAMAGE.
     27  1.1     yamt  */
     28  1.1     yamt 
     29  1.1     yamt /*
     30  1.1     yamt  * per-cpu storage.
     31  1.1     yamt  */
     32  1.1     yamt 
     33  1.1     yamt #include <sys/cdefs.h>
     34  1.8     yamt __KERNEL_RCSID(0, "$NetBSD: subr_percpu.c,v 1.8 2008/05/03 05:31:56 yamt Exp $");
     35  1.1     yamt 
     36  1.1     yamt #include <sys/param.h>
     37  1.1     yamt #include <sys/cpu.h>
     38  1.1     yamt #include <sys/kmem.h>
     39  1.1     yamt #include <sys/kernel.h>
     40  1.1     yamt #include <sys/mutex.h>
     41  1.1     yamt #include <sys/percpu.h>
     42  1.1     yamt #include <sys/rwlock.h>
     43  1.1     yamt #include <sys/vmem.h>
     44  1.1     yamt #include <sys/xcall.h>
     45  1.1     yamt 
     46  1.1     yamt #include <uvm/uvm_extern.h>
     47  1.1     yamt 
     48  1.1     yamt static krwlock_t percpu_swap_lock;
     49  1.1     yamt static kmutex_t percpu_allocation_lock;
     50  1.1     yamt static vmem_t *percpu_offset_arena;
     51  1.1     yamt static unsigned int percpu_nextoff;
     52  1.1     yamt 
     53  1.1     yamt #define	PERCPU_QUANTUM_SIZE	(ALIGNBYTES + 1)
     54  1.1     yamt #define	PERCPU_QCACHE_MAX	0
     55  1.1     yamt #define	PERCPU_IMPORT_SIZE	2048
     56  1.1     yamt 
     57  1.8     yamt #if defined(DIAGNOSTIC)
     58  1.8     yamt #define	MAGIC	0x50435055	/* "PCPU" */
     59  1.8     yamt #define	percpu_encrypt(pc)	((pc) ^ MAGIC)
     60  1.8     yamt #define	percpu_decrypt(pc)	((pc) ^ MAGIC)
     61  1.8     yamt #else /* defined(DIAGNOSTIC) */
     62  1.8     yamt #define	percpu_encrypt(pc)	(pc)
     63  1.8     yamt #define	percpu_decrypt(pc)	(pc)
     64  1.8     yamt #endif /* defined(DIAGNOSTIC) */
     65  1.8     yamt 
     66  1.1     yamt static percpu_cpu_t *
     67  1.1     yamt cpu_percpu(struct cpu_info *ci)
     68  1.1     yamt {
     69  1.1     yamt 
     70  1.1     yamt 	return &ci->ci_data.cpu_percpu;
     71  1.1     yamt }
     72  1.1     yamt 
     73  1.1     yamt static unsigned int
     74  1.1     yamt percpu_offset(percpu_t *pc)
     75  1.1     yamt {
     76  1.8     yamt 	const unsigned int off = percpu_decrypt((uintptr_t)pc);
     77  1.1     yamt 
     78  1.8     yamt 	KASSERT(off < percpu_nextoff);
     79  1.8     yamt 	return off;
     80  1.1     yamt }
     81  1.1     yamt 
     82  1.1     yamt /*
     83  1.1     yamt  * percpu_cpu_swap: crosscall handler for percpu_cpu_enlarge
     84  1.1     yamt  */
     85  1.1     yamt 
     86  1.1     yamt static void
     87  1.1     yamt percpu_cpu_swap(void *p1, void *p2)
     88  1.1     yamt {
     89  1.1     yamt 	struct cpu_info * const ci = p1;
     90  1.1     yamt 	percpu_cpu_t * const newpcc = p2;
     91  1.1     yamt 	percpu_cpu_t * const pcc = cpu_percpu(ci);
     92  1.1     yamt 
     93  1.1     yamt 	/*
     94  1.1     yamt 	 * swap *pcc and *newpcc unless anyone has beaten us.
     95  1.1     yamt 	 */
     96  1.1     yamt 
     97  1.1     yamt 	rw_enter(&percpu_swap_lock, RW_WRITER);
     98  1.1     yamt 	if (newpcc->pcc_size > pcc->pcc_size) {
     99  1.1     yamt 		percpu_cpu_t tmp;
    100  1.1     yamt 		int s;
    101  1.1     yamt 
    102  1.1     yamt 		tmp = *pcc;
    103  1.1     yamt 
    104  1.1     yamt 		/*
    105  1.1     yamt 		 * block interrupts so that we don't lose their modifications.
    106  1.1     yamt 		 */
    107  1.1     yamt 
    108  1.1     yamt 		s = splhigh();
    109  1.1     yamt 
    110  1.1     yamt 		/*
    111  1.1     yamt 		 * copy data to new storage.
    112  1.1     yamt 		 */
    113  1.1     yamt 
    114  1.1     yamt 		memcpy(newpcc->pcc_data, pcc->pcc_data, pcc->pcc_size);
    115  1.1     yamt 
    116  1.1     yamt 		/*
    117  1.1     yamt 		 * this assignment needs to be atomic for percpu_getptr_remote.
    118  1.1     yamt 		 */
    119  1.1     yamt 
    120  1.1     yamt 		pcc->pcc_data = newpcc->pcc_data;
    121  1.1     yamt 
    122  1.1     yamt 		splx(s);
    123  1.1     yamt 
    124  1.1     yamt 		pcc->pcc_size = newpcc->pcc_size;
    125  1.1     yamt 		*newpcc = tmp;
    126  1.1     yamt 	}
    127  1.1     yamt 	rw_exit(&percpu_swap_lock);
    128  1.1     yamt }
    129  1.1     yamt 
    130  1.1     yamt /*
    131  1.1     yamt  * percpu_cpu_enlarge: ensure that percpu_cpu_t of each cpus have enough space
    132  1.1     yamt  */
    133  1.1     yamt 
    134  1.1     yamt static void
    135  1.1     yamt percpu_cpu_enlarge(size_t size)
    136  1.1     yamt {
    137  1.1     yamt 	CPU_INFO_ITERATOR cii;
    138  1.1     yamt 	struct cpu_info *ci;
    139  1.1     yamt 
    140  1.1     yamt 	for (CPU_INFO_FOREACH(cii, ci)) {
    141  1.1     yamt 		percpu_cpu_t pcc;
    142  1.1     yamt 
    143  1.1     yamt 		pcc.pcc_data = kmem_alloc(size, KM_SLEEP); /* XXX cacheline */
    144  1.1     yamt 		pcc.pcc_size = size;
    145  1.1     yamt 		if (!mp_online) {
    146  1.1     yamt 			percpu_cpu_swap(ci, &pcc);
    147  1.1     yamt 		} else {
    148  1.1     yamt 			uint64_t where;
    149  1.1     yamt 
    150  1.1     yamt 			uvm_lwp_hold(curlwp); /* don't swap out pcc */
    151  1.1     yamt 			where = xc_unicast(0, percpu_cpu_swap, ci, &pcc, ci);
    152  1.1     yamt 			xc_wait(where);
    153  1.1     yamt 			uvm_lwp_rele(curlwp);
    154  1.1     yamt 		}
    155  1.1     yamt 		KASSERT(pcc.pcc_size < size);
    156  1.1     yamt 		if (pcc.pcc_data != NULL) {
    157  1.1     yamt 			kmem_free(pcc.pcc_data, pcc.pcc_size);
    158  1.1     yamt 		}
    159  1.1     yamt 	}
    160  1.1     yamt }
    161  1.1     yamt 
    162  1.1     yamt /*
    163  1.1     yamt  * percpu_backend_alloc: vmem import callback for percpu_offset_arena
    164  1.1     yamt  */
    165  1.1     yamt 
    166  1.1     yamt static vmem_addr_t
    167  1.1     yamt percpu_backend_alloc(vmem_t *dummy, vmem_size_t size, vmem_size_t *resultsize,
    168  1.1     yamt     vm_flag_t vmflags)
    169  1.1     yamt {
    170  1.1     yamt 	unsigned int offset;
    171  1.1     yamt 	unsigned int nextoff;
    172  1.1     yamt 
    173  1.3     yamt 	ASSERT_SLEEPABLE();
    174  1.1     yamt 	KASSERT(dummy == NULL);
    175  1.1     yamt 
    176  1.1     yamt 	if ((vmflags & VM_NOSLEEP) != 0)
    177  1.1     yamt 		return VMEM_ADDR_NULL;
    178  1.1     yamt 
    179  1.1     yamt 	size = roundup(size, PERCPU_IMPORT_SIZE);
    180  1.1     yamt 	mutex_enter(&percpu_allocation_lock);
    181  1.1     yamt 	offset = percpu_nextoff;
    182  1.1     yamt 	percpu_nextoff = nextoff = percpu_nextoff + size;
    183  1.1     yamt 	mutex_exit(&percpu_allocation_lock);
    184  1.1     yamt 
    185  1.1     yamt 	percpu_cpu_enlarge(nextoff);
    186  1.1     yamt 
    187  1.1     yamt 	*resultsize = size;
    188  1.1     yamt 	return (vmem_addr_t)offset;
    189  1.1     yamt }
    190  1.1     yamt 
    191  1.2     yamt static void
    192  1.2     yamt percpu_zero_cb(void *vp, void *vp2, struct cpu_info *ci)
    193  1.2     yamt {
    194  1.2     yamt 	size_t sz = (uintptr_t)vp2;
    195  1.2     yamt 
    196  1.2     yamt 	memset(vp, 0, sz);
    197  1.2     yamt }
    198  1.2     yamt 
    199  1.2     yamt /*
    200  1.2     yamt  * percpu_zero: initialize percpu storage with zero.
    201  1.2     yamt  */
    202  1.2     yamt 
    203  1.2     yamt static void
    204  1.2     yamt percpu_zero(percpu_t *pc, size_t sz)
    205  1.2     yamt {
    206  1.2     yamt 
    207  1.2     yamt 	percpu_foreach(pc, percpu_zero_cb, (void *)(uintptr_t)sz);
    208  1.2     yamt }
    209  1.2     yamt 
    210  1.1     yamt /*
    211  1.1     yamt  * percpu_init: subsystem initialization
    212  1.1     yamt  */
    213  1.1     yamt 
    214  1.1     yamt void
    215  1.1     yamt percpu_init(void)
    216  1.1     yamt {
    217  1.1     yamt 
    218  1.3     yamt 	ASSERT_SLEEPABLE();
    219  1.1     yamt 	rw_init(&percpu_swap_lock);
    220  1.1     yamt 	mutex_init(&percpu_allocation_lock, MUTEX_DEFAULT, IPL_NONE);
    221  1.1     yamt 
    222  1.1     yamt 	percpu_offset_arena = vmem_create("percpu", 0, 0, PERCPU_QUANTUM_SIZE,
    223  1.1     yamt 	    percpu_backend_alloc, NULL, NULL, PERCPU_QCACHE_MAX, VM_SLEEP,
    224  1.1     yamt 	    IPL_NONE);
    225  1.1     yamt }
    226  1.1     yamt 
    227  1.1     yamt /*
    228  1.1     yamt  * percpu_init_cpu: cpu initialization
    229  1.1     yamt  *
    230  1.1     yamt  * => should be called before the cpu appears on the list for CPU_INFO_FOREACH.
    231  1.1     yamt  */
    232  1.1     yamt 
    233  1.1     yamt void
    234  1.1     yamt percpu_init_cpu(struct cpu_info *ci)
    235  1.1     yamt {
    236  1.1     yamt 	percpu_cpu_t * const pcc = cpu_percpu(ci);
    237  1.1     yamt 	size_t size = percpu_nextoff; /* XXX racy */
    238  1.1     yamt 
    239  1.3     yamt 	ASSERT_SLEEPABLE();
    240  1.1     yamt 	pcc->pcc_size = size;
    241  1.1     yamt 	if (size) {
    242  1.1     yamt 		pcc->pcc_data = kmem_zalloc(pcc->pcc_size, KM_SLEEP);
    243  1.1     yamt 	}
    244  1.1     yamt }
    245  1.1     yamt 
    246  1.1     yamt /*
    247  1.1     yamt  * percpu_alloc: allocate percpu storage
    248  1.1     yamt  *
    249  1.1     yamt  * => called in thread context.
    250  1.1     yamt  * => considered as an expensive and rare operation.
    251  1.2     yamt  * => allocated storage is initialized with zeros.
    252  1.1     yamt  */
    253  1.1     yamt 
    254  1.1     yamt percpu_t *
    255  1.1     yamt percpu_alloc(size_t size)
    256  1.1     yamt {
    257  1.1     yamt 	unsigned int offset;
    258  1.1     yamt 	percpu_t *pc;
    259  1.1     yamt 
    260  1.3     yamt 	ASSERT_SLEEPABLE();
    261  1.1     yamt 	offset = vmem_alloc(percpu_offset_arena, size, VM_SLEEP | VM_BESTFIT);
    262  1.8     yamt 	pc = (percpu_t *)percpu_encrypt((uintptr_t)offset);
    263  1.1     yamt 	percpu_zero(pc, size);
    264  1.1     yamt 	return pc;
    265  1.1     yamt }
    266  1.1     yamt 
    267  1.1     yamt /*
    268  1.5     yamt  * percpu_free: free percpu storage
    269  1.1     yamt  *
    270  1.1     yamt  * => called in thread context.
    271  1.1     yamt  * => considered as an expensive and rare operation.
    272  1.1     yamt  */
    273  1.1     yamt 
    274  1.1     yamt void
    275  1.1     yamt percpu_free(percpu_t *pc, size_t size)
    276  1.1     yamt {
    277  1.1     yamt 
    278  1.3     yamt 	ASSERT_SLEEPABLE();
    279  1.1     yamt 	vmem_free(percpu_offset_arena, (vmem_addr_t)percpu_offset(pc), size);
    280  1.1     yamt }
    281  1.1     yamt 
    282  1.1     yamt /*
    283  1.4  thorpej  * percpu_getref:
    284  1.1     yamt  *
    285  1.1     yamt  * => safe to be used in either thread or interrupt context
    286  1.4  thorpej  * => disables preemption; must be bracketed with a percpu_putref()
    287  1.1     yamt  */
    288  1.1     yamt 
    289  1.1     yamt void *
    290  1.4  thorpej percpu_getref(percpu_t *pc)
    291  1.1     yamt {
    292  1.1     yamt 
    293  1.7       ad 	KPREEMPT_DISABLE(curlwp);
    294  1.1     yamt 	return percpu_getptr_remote(pc, curcpu());
    295  1.1     yamt }
    296  1.1     yamt 
    297  1.1     yamt /*
    298  1.4  thorpej  * percpu_putref:
    299  1.4  thorpej  *
    300  1.4  thorpej  * => drops the preemption-disabled count after caller is done with per-cpu
    301  1.4  thorpej  *    data
    302  1.4  thorpej  */
    303  1.4  thorpej 
    304  1.4  thorpej void
    305  1.4  thorpej percpu_putref(percpu_t *pc)
    306  1.4  thorpej {
    307  1.4  thorpej 
    308  1.7       ad 	KPREEMPT_ENABLE(curlwp);
    309  1.4  thorpej }
    310  1.4  thorpej 
    311  1.4  thorpej /*
    312  1.1     yamt  * percpu_traverse_enter, percpu_traverse_exit, percpu_getptr_remote:
    313  1.1     yamt  * helpers to access remote cpu's percpu data.
    314  1.1     yamt  *
    315  1.1     yamt  * => called in thread context.
    316  1.2     yamt  * => percpu_traverse_enter can block low-priority xcalls.
    317  1.1     yamt  * => typical usage would be:
    318  1.1     yamt  *
    319  1.1     yamt  *	sum = 0;
    320  1.1     yamt  *	percpu_traverse_enter();
    321  1.1     yamt  *	for (CPU_INFO_FOREACH(cii, ci)) {
    322  1.1     yamt  *		unsigned int *p = percpu_getptr_remote(pc, ci);
    323  1.1     yamt  *		sum += *p;
    324  1.1     yamt  *	}
    325  1.1     yamt  *	percpu_traverse_exit();
    326  1.1     yamt  */
    327  1.1     yamt 
    328  1.1     yamt void
    329  1.1     yamt percpu_traverse_enter(void)
    330  1.1     yamt {
    331  1.1     yamt 
    332  1.3     yamt 	ASSERT_SLEEPABLE();
    333  1.1     yamt 	rw_enter(&percpu_swap_lock, RW_READER);
    334  1.1     yamt }
    335  1.1     yamt 
    336  1.1     yamt void
    337  1.1     yamt percpu_traverse_exit(void)
    338  1.1     yamt {
    339  1.1     yamt 
    340  1.1     yamt 	rw_exit(&percpu_swap_lock);
    341  1.1     yamt }
    342  1.1     yamt 
    343  1.1     yamt void *
    344  1.1     yamt percpu_getptr_remote(percpu_t *pc, struct cpu_info *ci)
    345  1.1     yamt {
    346  1.1     yamt 
    347  1.1     yamt 	return &((char *)cpu_percpu(ci)->pcc_data)[percpu_offset(pc)];
    348  1.1     yamt }
    349  1.1     yamt 
    350  1.1     yamt /*
    351  1.1     yamt  * percpu_foreach: call the specified callback function for each cpus.
    352  1.1     yamt  *
    353  1.2     yamt  * => called in thread context.
    354  1.1     yamt  * => caller should not rely on the cpu iteration order.
    355  1.2     yamt  * => the callback function should be minimum because it is executed with
    356  1.2     yamt  *    holding a global lock, which can block low-priority xcalls.
    357  1.2     yamt  *    eg. it's illegal for a callback function to sleep for memory allocation.
    358  1.1     yamt  */
    359  1.1     yamt void
    360  1.1     yamt percpu_foreach(percpu_t *pc, percpu_callback_t cb, void *arg)
    361  1.1     yamt {
    362  1.1     yamt 	CPU_INFO_ITERATOR cii;
    363  1.1     yamt 	struct cpu_info *ci;
    364  1.1     yamt 
    365  1.1     yamt 	percpu_traverse_enter();
    366  1.1     yamt 	for (CPU_INFO_FOREACH(cii, ci)) {
    367  1.2     yamt 		(*cb)(percpu_getptr_remote(pc, ci), arg, ci);
    368  1.1     yamt 	}
    369  1.1     yamt 	percpu_traverse_exit();
    370  1.1     yamt }
    371