Home | History | Annotate | Line # | Download | only in kern
kern_cpu.c revision 1.51
      1 /*	$NetBSD: kern_cpu.c,v 1.51 2011/09/11 14:54:49 jdc Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2007, 2008, 2009, 2010 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*-
     33  * Copyright (c)2007 YAMAMOTO Takashi,
     34  * All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  *
     45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     55  * SUCH DAMAGE.
     56  */
     57 
     58 #include <sys/cdefs.h>
     59 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.51 2011/09/11 14:54:49 jdc Exp $");
     60 
     61 #include <sys/param.h>
     62 #include <sys/systm.h>
     63 #include <sys/idle.h>
     64 #include <sys/sched.h>
     65 #include <sys/intr.h>
     66 #include <sys/conf.h>
     67 #include <sys/cpu.h>
     68 #include <sys/cpuio.h>
     69 #include <sys/proc.h>
     70 #include <sys/percpu.h>
     71 #include <sys/kernel.h>
     72 #include <sys/kauth.h>
     73 #include <sys/xcall.h>
     74 #include <sys/pool.h>
     75 #include <sys/kmem.h>
     76 #include <sys/select.h>
     77 #include <sys/namei.h>
     78 #include <sys/callout.h>
     79 
     80 #include <uvm/uvm_extern.h>
     81 
     82 /*
     83  * If the port has state that cpu_data is the first thing in cpu_info,
     84  * verify the claim is true.  This will prevent the from getting out
     85  * of sync.
     86  */
     87 #ifdef __HAVE_CPU_DATA_FIRST
     88 CTASSERT(offsetof(struct cpu_info, ci_data) == 0);
     89 #else
     90 CTASSERT(offsetof(struct cpu_info, ci_data) != 0);
     91 #endif
     92 
     93 void	cpuctlattach(int);
     94 
     95 static void	cpu_xc_online(struct cpu_info *);
     96 static void	cpu_xc_offline(struct cpu_info *);
     97 
     98 dev_type_ioctl(cpuctl_ioctl);
     99 
    100 const struct cdevsw cpuctl_cdevsw = {
    101 	nullopen, nullclose, nullread, nullwrite, cpuctl_ioctl,
    102 	nullstop, notty, nopoll, nommap, nokqfilter,
    103 	D_OTHER | D_MPSAFE
    104 };
    105 
    106 kmutex_t	cpu_lock		__cacheline_aligned;
    107 int		ncpu			__read_mostly;
    108 int		ncpuonline		__read_mostly;
    109 bool		mp_online		__read_mostly;
    110 
    111 kcpuset_t *	kcpuset_attached	__read_mostly;
    112 
    113 struct cpuqueue	cpu_queue		__cacheline_aligned
    114     = CIRCLEQ_HEAD_INITIALIZER(cpu_queue);
    115 
    116 static struct cpu_info **cpu_infos	__read_mostly;
    117 
    118 int
    119 mi_cpu_attach(struct cpu_info *ci)
    120 {
    121 	int error;
    122 
    123 	KASSERT(maxcpus > 0);
    124 
    125 	ci->ci_index = ncpu;
    126 	CIRCLEQ_INSERT_TAIL(&cpu_queue, ci, ci_data.cpu_qchain);
    127 	TAILQ_INIT(&ci->ci_data.cpu_ld_locks);
    128 	__cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock);
    129 
    130 	/* This is useful for eg, per-cpu evcnt */
    131 	snprintf(ci->ci_data.cpu_name, sizeof(ci->ci_data.cpu_name), "cpu%d",
    132 	    cpu_index(ci));
    133 
    134 	if (__predict_false(cpu_infos == NULL)) {
    135 		cpu_infos =
    136 		    kmem_zalloc(sizeof(cpu_infos[0]) * maxcpus, KM_SLEEP);
    137 		kcpuset_create(&kcpuset_attached, true);
    138 	}
    139 	cpu_infos[cpu_index(ci)] = ci;
    140 	kcpuset_set(kcpuset_attached, ci->ci_index);
    141 
    142 	sched_cpuattach(ci);
    143 
    144 	error = create_idle_lwp(ci);
    145 	if (error != 0) {
    146 		/* XXX revert sched_cpuattach */
    147 		return error;
    148 	}
    149 
    150 	if (ci == curcpu())
    151 		ci->ci_data.cpu_onproc = curlwp;
    152 	else
    153 		ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp;
    154 
    155 	percpu_init_cpu(ci);
    156 	softint_init(ci);
    157 	callout_init_cpu(ci);
    158 	xc_init_cpu(ci);
    159 	pool_cache_cpu_init(ci);
    160 	selsysinit(ci);
    161 	cache_cpu_init(ci);
    162 	TAILQ_INIT(&ci->ci_data.cpu_biodone);
    163 	ncpu++;
    164 	ncpuonline++;
    165 
    166 	return 0;
    167 }
    168 
    169 void
    170 cpuctlattach(int dummy)
    171 {
    172 
    173 	KASSERT(cpu_infos != NULL);
    174 }
    175 
    176 int
    177 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    178 {
    179 	CPU_INFO_ITERATOR cii;
    180 	cpustate_t *cs;
    181 	struct cpu_info *ci;
    182 	int error, i;
    183 	u_int id;
    184 
    185 	error = 0;
    186 
    187 	mutex_enter(&cpu_lock);
    188 	switch (cmd) {
    189 	case IOC_CPU_SETSTATE:
    190 		if (error == 0)
    191 			cs = data;
    192 		error = kauth_authorize_system(l->l_cred,
    193 		    KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL,
    194 		    NULL);
    195 		if (error != 0)
    196 			break;
    197 		if (cs->cs_id >= maxcpus ||
    198 		    (ci = cpu_lookup(cs->cs_id)) == NULL) {
    199 			error = ESRCH;
    200 			break;
    201 		}
    202 		error = cpu_setintr(ci, cs->cs_intr);
    203 		error = cpu_setstate(ci, cs->cs_online);
    204 		break;
    205 
    206 	case IOC_CPU_GETSTATE:
    207 		if (error == 0)
    208 			cs = data;
    209 		id = cs->cs_id;
    210 		memset(cs, 0, sizeof(*cs));
    211 		cs->cs_id = id;
    212 		if (cs->cs_id >= maxcpus ||
    213 		    (ci = cpu_lookup(id)) == NULL) {
    214 			error = ESRCH;
    215 			break;
    216 		}
    217 		if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
    218 			cs->cs_online = false;
    219 		else
    220 			cs->cs_online = true;
    221 		if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
    222 			cs->cs_intr = false;
    223 		else
    224 			cs->cs_intr = true;
    225 		cs->cs_lastmod = (int32_t)ci->ci_schedstate.spc_lastmod;
    226 		cs->cs_lastmodhi = (int32_t)
    227 		    (ci->ci_schedstate.spc_lastmod >> 32);
    228 		cs->cs_intrcnt = cpu_intr_count(ci) + 1;
    229 		cs->cs_hwid = ci->ci_cpuid;
    230 		break;
    231 
    232 	case IOC_CPU_MAPID:
    233 		i = 0;
    234 		for (CPU_INFO_FOREACH(cii, ci)) {
    235 			if (i++ == *(int *)data)
    236 				break;
    237 		}
    238 		if (ci == NULL)
    239 			error = ESRCH;
    240 		else
    241 			*(int *)data = cpu_index(ci);
    242 		break;
    243 
    244 	case IOC_CPU_GETCOUNT:
    245 		*(int *)data = ncpu;
    246 		break;
    247 
    248 	default:
    249 		error = ENOTTY;
    250 		break;
    251 	}
    252 	mutex_exit(&cpu_lock);
    253 
    254 	return error;
    255 }
    256 
    257 struct cpu_info *
    258 cpu_lookup(u_int idx)
    259 {
    260 	struct cpu_info *ci;
    261 
    262 	KASSERT(idx < maxcpus);
    263 
    264 	if (__predict_false(cpu_infos == NULL)) {
    265 		KASSERT(idx == 0);
    266 		return curcpu();
    267 	}
    268 
    269 	ci = cpu_infos[idx];
    270 	KASSERT(ci == NULL || cpu_index(ci) == idx);
    271 
    272 	return ci;
    273 }
    274 
    275 static void
    276 cpu_xc_offline(struct cpu_info *ci)
    277 {
    278 	struct schedstate_percpu *spc, *mspc = NULL;
    279 	struct cpu_info *target_ci;
    280 	struct lwp *l;
    281 	CPU_INFO_ITERATOR cii;
    282 	int s;
    283 
    284 	/*
    285 	 * Thread that made the cross call (separate context) holds
    286 	 * cpu_lock on our behalf.
    287 	 */
    288 	spc = &ci->ci_schedstate;
    289 	s = splsched();
    290 	spc->spc_flags |= SPCF_OFFLINE;
    291 	splx(s);
    292 
    293 	/* Take the first available CPU for the migration. */
    294 	for (CPU_INFO_FOREACH(cii, target_ci)) {
    295 		mspc = &target_ci->ci_schedstate;
    296 		if ((mspc->spc_flags & SPCF_OFFLINE) == 0)
    297 			break;
    298 	}
    299 	KASSERT(target_ci != NULL);
    300 
    301 	/*
    302 	 * Migrate all non-bound threads to the other CPU.  Note that this
    303 	 * runs from the xcall thread, thus handling of LSONPROC is not needed.
    304 	 */
    305 	mutex_enter(proc_lock);
    306 	LIST_FOREACH(l, &alllwp, l_list) {
    307 		struct cpu_info *mci;
    308 
    309 		lwp_lock(l);
    310 		if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) {
    311 			lwp_unlock(l);
    312 			continue;
    313 		}
    314 		/* Regular case - no affinity. */
    315 		if (l->l_affinity == NULL) {
    316 			lwp_migrate(l, target_ci);
    317 			continue;
    318 		}
    319 		/* Affinity is set, find an online CPU in the set. */
    320 		for (CPU_INFO_FOREACH(cii, mci)) {
    321 			mspc = &mci->ci_schedstate;
    322 			if ((mspc->spc_flags & SPCF_OFFLINE) == 0 &&
    323 			    kcpuset_isset(l->l_affinity, cpu_index(mci)))
    324 				break;
    325 		}
    326 		if (mci == NULL) {
    327 			lwp_unlock(l);
    328 			mutex_exit(proc_lock);
    329 			goto fail;
    330 		}
    331 		lwp_migrate(l, mci);
    332 	}
    333 	mutex_exit(proc_lock);
    334 
    335 #ifdef __HAVE_MD_CPU_OFFLINE
    336 	cpu_offline_md();
    337 #endif
    338 	return;
    339 fail:
    340 	/* Just unset the SPCF_OFFLINE flag, caller will check */
    341 	s = splsched();
    342 	spc->spc_flags &= ~SPCF_OFFLINE;
    343 	splx(s);
    344 }
    345 
    346 static void
    347 cpu_xc_online(struct cpu_info *ci)
    348 {
    349 	struct schedstate_percpu *spc;
    350 	int s;
    351 
    352 	spc = &ci->ci_schedstate;
    353 	s = splsched();
    354 	spc->spc_flags &= ~SPCF_OFFLINE;
    355 	splx(s);
    356 }
    357 
    358 int
    359 cpu_setstate(struct cpu_info *ci, bool online)
    360 {
    361 	struct schedstate_percpu *spc;
    362 	CPU_INFO_ITERATOR cii;
    363 	struct cpu_info *ci2;
    364 	uint64_t where;
    365 	xcfunc_t func;
    366 	int nonline;
    367 
    368 	spc = &ci->ci_schedstate;
    369 
    370 	KASSERT(mutex_owned(&cpu_lock));
    371 
    372 	if (online) {
    373 		if ((spc->spc_flags & SPCF_OFFLINE) == 0)
    374 			return 0;
    375 		func = (xcfunc_t)cpu_xc_online;
    376 		ncpuonline++;
    377 	} else {
    378 		if ((spc->spc_flags & SPCF_OFFLINE) != 0)
    379 			return 0;
    380 		nonline = 0;
    381 		/*
    382 		 * Ensure that at least one CPU within the processor set
    383 		 * stays online.  Revisit this later.
    384 		 */
    385 		for (CPU_INFO_FOREACH(cii, ci2)) {
    386 			if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
    387 				continue;
    388 			if (ci2->ci_schedstate.spc_psid != spc->spc_psid)
    389 				continue;
    390 			nonline++;
    391 		}
    392 		if (nonline == 1)
    393 			return EBUSY;
    394 		func = (xcfunc_t)cpu_xc_offline;
    395 		ncpuonline--;
    396 	}
    397 
    398 	where = xc_unicast(0, func, ci, NULL, ci);
    399 	xc_wait(where);
    400 	if (online) {
    401 		KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0);
    402 	} else if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
    403 		/* If was not set offline, then it is busy */
    404 		return EBUSY;
    405 	}
    406 
    407 	spc->spc_lastmod = time_second;
    408 	return 0;
    409 }
    410 
    411 #ifdef __HAVE_INTR_CONTROL
    412 static void
    413 cpu_xc_intr(struct cpu_info *ci)
    414 {
    415 	struct schedstate_percpu *spc;
    416 	int s;
    417 
    418 	spc = &ci->ci_schedstate;
    419 	s = splsched();
    420 	spc->spc_flags &= ~SPCF_NOINTR;
    421 	splx(s);
    422 }
    423 
    424 static void
    425 cpu_xc_nointr(struct cpu_info *ci)
    426 {
    427 	struct schedstate_percpu *spc;
    428 	int s;
    429 
    430 	spc = &ci->ci_schedstate;
    431 	s = splsched();
    432 	spc->spc_flags |= SPCF_NOINTR;
    433 	splx(s);
    434 }
    435 
    436 int
    437 cpu_setintr(struct cpu_info *ci, bool intr)
    438 {
    439 	struct schedstate_percpu *spc;
    440 	CPU_INFO_ITERATOR cii;
    441 	struct cpu_info *ci2;
    442 	uint64_t where;
    443 	xcfunc_t func;
    444 	int nintr;
    445 
    446 	spc = &ci->ci_schedstate;
    447 
    448 	KASSERT(mutex_owned(&cpu_lock));
    449 
    450 	if (intr) {
    451 		if ((spc->spc_flags & SPCF_NOINTR) == 0)
    452 			return 0;
    453 		func = (xcfunc_t)cpu_xc_intr;
    454 	} else {
    455 		if ((spc->spc_flags & SPCF_NOINTR) != 0)
    456 			return 0;
    457 		/*
    458 		 * Ensure that at least one CPU within the system
    459 		 * is handing device interrupts.
    460 		 */
    461 		nintr = 0;
    462 		for (CPU_INFO_FOREACH(cii, ci2)) {
    463 			if ((ci2->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
    464 				continue;
    465 			if (ci2 == ci)
    466 				continue;
    467 			nintr++;
    468 		}
    469 		if (nintr == 0)
    470 			return EBUSY;
    471 		func = (xcfunc_t)cpu_xc_nointr;
    472 	}
    473 
    474 	where = xc_unicast(0, func, ci, NULL, ci);
    475 	xc_wait(where);
    476 	if (intr) {
    477 		KASSERT((spc->spc_flags & SPCF_NOINTR) == 0);
    478 	} else if ((spc->spc_flags & SPCF_NOINTR) == 0) {
    479 		/* If was not set offline, then it is busy */
    480 		return EBUSY;
    481 	}
    482 
    483 	/* Direct interrupts away from the CPU and record the change. */
    484 	cpu_intr_redistribute();
    485 	spc->spc_lastmod = time_second;
    486 	return 0;
    487 }
    488 #else	/* __HAVE_INTR_CONTROL */
    489 int
    490 cpu_setintr(struct cpu_info *ci, bool intr)
    491 {
    492 
    493 	return EOPNOTSUPP;
    494 }
    495 
    496 u_int
    497 cpu_intr_count(struct cpu_info *ci)
    498 {
    499 
    500 	return 0;	/* 0 == "don't know" */
    501 }
    502 #endif	/* __HAVE_INTR_CONTROL */
    503 
    504 bool
    505 cpu_softintr_p(void)
    506 {
    507 
    508 	return (curlwp->l_pflag & LP_INTR) != 0;
    509 }
    510