Home | History | Annotate | Line # | Download | only in kern
kern_cpu.c revision 1.46
      1 /*	$NetBSD: kern_cpu.c,v 1.46 2011/05/13 22:16:43 rmind Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2007, 2008, 2009, 2010 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*-
     33  * Copyright (c)2007 YAMAMOTO Takashi,
     34  * All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  *
     45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     55  * SUCH DAMAGE.
     56  */
     57 
     58 #include <sys/cdefs.h>
     59 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.46 2011/05/13 22:16:43 rmind Exp $");
     60 
     61 #include <sys/param.h>
     62 #include <sys/systm.h>
     63 #include <sys/idle.h>
     64 #include <sys/sched.h>
     65 #include <sys/intr.h>
     66 #include <sys/conf.h>
     67 #include <sys/cpu.h>
     68 #include <sys/cpuio.h>
     69 #include <sys/proc.h>
     70 #include <sys/percpu.h>
     71 #include <sys/kernel.h>
     72 #include <sys/kauth.h>
     73 #include <sys/xcall.h>
     74 #include <sys/pool.h>
     75 #include <sys/kmem.h>
     76 #include <sys/select.h>
     77 #include <sys/namei.h>
     78 #include <sys/callout.h>
     79 
     80 #include <uvm/uvm_extern.h>
     81 
     82 /*
     83  * If the port has state that cpu_data is the first thing in cpu_info,
     84  * verify the claim is true.  This will prevent the from getting out
     85  * of sync.
     86  */
     87 #ifdef __HAVE_CPU_DATA_FIRST
     88 CTASSERT(offsetof(struct cpu_info, ci_data) == 0);
     89 #else
     90 CTASSERT(offsetof(struct cpu_info, ci_data) != 0);
     91 #endif
     92 
     93 void	cpuctlattach(int);
     94 
     95 static void	cpu_xc_online(struct cpu_info *);
     96 static void	cpu_xc_offline(struct cpu_info *);
     97 
     98 dev_type_ioctl(cpuctl_ioctl);
     99 
    100 const struct cdevsw cpuctl_cdevsw = {
    101 	nullopen, nullclose, nullread, nullwrite, cpuctl_ioctl,
    102 	nullstop, notty, nopoll, nommap, nokqfilter,
    103 	D_OTHER | D_MPSAFE
    104 };
    105 
    106 kmutex_t	cpu_lock		__cacheline_aligned;
    107 int		ncpu			__read_mostly;
    108 int		ncpuonline		__read_mostly;
    109 bool		mp_online		__read_mostly;
    110 struct cpuqueue	cpu_queue		__cacheline_aligned
    111     = CIRCLEQ_HEAD_INITIALIZER(cpu_queue);
    112 
    113 static struct cpu_info **cpu_infos	__read_mostly;
    114 
    115 int
    116 mi_cpu_attach(struct cpu_info *ci)
    117 {
    118 	int error;
    119 
    120 	KASSERT(maxcpus > 0);
    121 
    122 	ci->ci_index = ncpu;
    123 	CIRCLEQ_INSERT_TAIL(&cpu_queue, ci, ci_data.cpu_qchain);
    124 	TAILQ_INIT(&ci->ci_data.cpu_ld_locks);
    125 	__cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock);
    126 
    127 	/* This is useful for eg, per-cpu evcnt */
    128 	snprintf(ci->ci_data.cpu_name, sizeof(ci->ci_data.cpu_name), "cpu%d",
    129 	    cpu_index(ci));
    130 
    131 	sched_cpuattach(ci);
    132 
    133 	error = create_idle_lwp(ci);
    134 	if (error != 0) {
    135 		/* XXX revert sched_cpuattach */
    136 		return error;
    137 	}
    138 
    139 	if (ci == curcpu())
    140 		ci->ci_data.cpu_onproc = curlwp;
    141 	else
    142 		ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp;
    143 
    144 	percpu_init_cpu(ci);
    145 	softint_init(ci);
    146 	callout_init_cpu(ci);
    147 	xc_init_cpu(ci);
    148 	pool_cache_cpu_init(ci);
    149 	selsysinit(ci);
    150 	cache_cpu_init(ci);
    151 	TAILQ_INIT(&ci->ci_data.cpu_biodone);
    152 	ncpu++;
    153 	ncpuonline++;
    154 
    155 	if (cpu_infos == NULL) {
    156 		cpu_infos =
    157 		    kmem_zalloc(sizeof(cpu_infos[0]) * maxcpus, KM_SLEEP);
    158 	}
    159 	cpu_infos[cpu_index(ci)] = ci;
    160 
    161 	return 0;
    162 }
    163 
    164 void
    165 cpuctlattach(int dummy)
    166 {
    167 
    168 	KASSERT(cpu_infos != NULL);
    169 }
    170 
    171 int
    172 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    173 {
    174 	CPU_INFO_ITERATOR cii;
    175 	cpustate_t *cs;
    176 	struct cpu_info *ci;
    177 	int error, i;
    178 	u_int id;
    179 
    180 	error = 0;
    181 
    182 	mutex_enter(&cpu_lock);
    183 	switch (cmd) {
    184 	case IOC_CPU_SETSTATE:
    185 		if (error == 0)
    186 			cs = data;
    187 		error = kauth_authorize_system(l->l_cred,
    188 		    KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL,
    189 		    NULL);
    190 		if (error != 0)
    191 			break;
    192 		if (cs->cs_id >= maxcpus ||
    193 		    (ci = cpu_lookup(cs->cs_id)) == NULL) {
    194 			error = ESRCH;
    195 			break;
    196 		}
    197 		error = cpu_setintr(ci, cs->cs_intr);
    198 		error = cpu_setstate(ci, cs->cs_online);
    199 		break;
    200 
    201 	case IOC_CPU_GETSTATE:
    202 		if (error == 0)
    203 			cs = data;
    204 		id = cs->cs_id;
    205 		memset(cs, 0, sizeof(*cs));
    206 		cs->cs_id = id;
    207 		if (cs->cs_id >= maxcpus ||
    208 		    (ci = cpu_lookup(id)) == NULL) {
    209 			error = ESRCH;
    210 			break;
    211 		}
    212 		if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
    213 			cs->cs_online = false;
    214 		else
    215 			cs->cs_online = true;
    216 		if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
    217 			cs->cs_intr = false;
    218 		else
    219 			cs->cs_intr = true;
    220 		cs->cs_lastmod = (int32_t)ci->ci_schedstate.spc_lastmod;
    221 		cs->cs_lastmodhi = (int32_t)
    222 		    (ci->ci_schedstate.spc_lastmod >> 32);
    223 		cs->cs_intrcnt = cpu_intr_count(ci) + 1;
    224 		break;
    225 
    226 	case IOC_CPU_MAPID:
    227 		i = 0;
    228 		for (CPU_INFO_FOREACH(cii, ci)) {
    229 			if (i++ == *(int *)data)
    230 				break;
    231 		}
    232 		if (ci == NULL)
    233 			error = ESRCH;
    234 		else
    235 			*(int *)data = cpu_index(ci);
    236 		break;
    237 
    238 	case IOC_CPU_GETCOUNT:
    239 		*(int *)data = ncpu;
    240 		break;
    241 
    242 	default:
    243 		error = ENOTTY;
    244 		break;
    245 	}
    246 	mutex_exit(&cpu_lock);
    247 
    248 	return error;
    249 }
    250 
    251 struct cpu_info *
    252 cpu_lookup(u_int idx)
    253 {
    254 	struct cpu_info *ci;
    255 
    256 	KASSERT(idx < maxcpus);
    257 
    258 	if (__predict_false(cpu_infos == NULL)) {
    259 		KASSERT(idx == 0);
    260 		return curcpu();
    261 	}
    262 
    263 	ci = cpu_infos[idx];
    264 	KASSERT(ci == NULL || cpu_index(ci) == idx);
    265 
    266 	return ci;
    267 }
    268 
    269 static void
    270 cpu_xc_offline(struct cpu_info *ci)
    271 {
    272 	struct schedstate_percpu *spc, *mspc = NULL;
    273 	struct cpu_info *target_ci;
    274 	struct lwp *l;
    275 	CPU_INFO_ITERATOR cii;
    276 	int s;
    277 
    278 	/*
    279 	 * Thread that made the cross call (separate context) holds
    280 	 * cpu_lock on our behalf.
    281 	 */
    282 	spc = &ci->ci_schedstate;
    283 	s = splsched();
    284 	spc->spc_flags |= SPCF_OFFLINE;
    285 	splx(s);
    286 
    287 	/* Take the first available CPU for the migration. */
    288 	for (CPU_INFO_FOREACH(cii, target_ci)) {
    289 		mspc = &target_ci->ci_schedstate;
    290 		if ((mspc->spc_flags & SPCF_OFFLINE) == 0)
    291 			break;
    292 	}
    293 	KASSERT(target_ci != NULL);
    294 
    295 	/*
    296 	 * Migrate all non-bound threads to the other CPU.  Note that this
    297 	 * runs from the xcall thread, thus handling of LSONPROC is not needed.
    298 	 */
    299 	mutex_enter(proc_lock);
    300 	LIST_FOREACH(l, &alllwp, l_list) {
    301 		struct cpu_info *mci;
    302 
    303 		lwp_lock(l);
    304 		if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) {
    305 			lwp_unlock(l);
    306 			continue;
    307 		}
    308 		/* Normal case - no affinity */
    309 		if ((l->l_flag & LW_AFFINITY) == 0) {
    310 			lwp_migrate(l, target_ci);
    311 			continue;
    312 		}
    313 		/* Affinity is set, find an online CPU in the set */
    314 		KASSERT(l->l_affinity != NULL);
    315 		for (CPU_INFO_FOREACH(cii, mci)) {
    316 			mspc = &mci->ci_schedstate;
    317 			if ((mspc->spc_flags & SPCF_OFFLINE) == 0 &&
    318 			    kcpuset_isset(cpu_index(mci), l->l_affinity))
    319 				break;
    320 		}
    321 		if (mci == NULL) {
    322 			lwp_unlock(l);
    323 			mutex_exit(proc_lock);
    324 			goto fail;
    325 		}
    326 		lwp_migrate(l, mci);
    327 	}
    328 	mutex_exit(proc_lock);
    329 
    330 #ifdef __HAVE_MD_CPU_OFFLINE
    331 	cpu_offline_md();
    332 #endif
    333 	return;
    334 fail:
    335 	/* Just unset the SPCF_OFFLINE flag, caller will check */
    336 	s = splsched();
    337 	spc->spc_flags &= ~SPCF_OFFLINE;
    338 	splx(s);
    339 }
    340 
    341 static void
    342 cpu_xc_online(struct cpu_info *ci)
    343 {
    344 	struct schedstate_percpu *spc;
    345 	int s;
    346 
    347 	spc = &ci->ci_schedstate;
    348 	s = splsched();
    349 	spc->spc_flags &= ~SPCF_OFFLINE;
    350 	splx(s);
    351 }
    352 
    353 int
    354 cpu_setstate(struct cpu_info *ci, bool online)
    355 {
    356 	struct schedstate_percpu *spc;
    357 	CPU_INFO_ITERATOR cii;
    358 	struct cpu_info *ci2;
    359 	uint64_t where;
    360 	xcfunc_t func;
    361 	int nonline;
    362 
    363 	spc = &ci->ci_schedstate;
    364 
    365 	KASSERT(mutex_owned(&cpu_lock));
    366 
    367 	if (online) {
    368 		if ((spc->spc_flags & SPCF_OFFLINE) == 0)
    369 			return 0;
    370 		func = (xcfunc_t)cpu_xc_online;
    371 		ncpuonline++;
    372 	} else {
    373 		if ((spc->spc_flags & SPCF_OFFLINE) != 0)
    374 			return 0;
    375 		nonline = 0;
    376 		/*
    377 		 * Ensure that at least one CPU within the processor set
    378 		 * stays online.  Revisit this later.
    379 		 */
    380 		for (CPU_INFO_FOREACH(cii, ci2)) {
    381 			if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
    382 				continue;
    383 			if (ci2->ci_schedstate.spc_psid != spc->spc_psid)
    384 				continue;
    385 			nonline++;
    386 		}
    387 		if (nonline == 1)
    388 			return EBUSY;
    389 		func = (xcfunc_t)cpu_xc_offline;
    390 		ncpuonline--;
    391 	}
    392 
    393 	where = xc_unicast(0, func, ci, NULL, ci);
    394 	xc_wait(where);
    395 	if (online) {
    396 		KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0);
    397 	} else if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
    398 		/* If was not set offline, then it is busy */
    399 		return EBUSY;
    400 	}
    401 
    402 	spc->spc_lastmod = time_second;
    403 	return 0;
    404 }
    405 
    406 #ifdef __HAVE_INTR_CONTROL
    407 static void
    408 cpu_xc_intr(struct cpu_info *ci)
    409 {
    410 	struct schedstate_percpu *spc;
    411 	int s;
    412 
    413 	spc = &ci->ci_schedstate;
    414 	s = splsched();
    415 	spc->spc_flags &= ~SPCF_NOINTR;
    416 	splx(s);
    417 }
    418 
    419 static void
    420 cpu_xc_nointr(struct cpu_info *ci)
    421 {
    422 	struct schedstate_percpu *spc;
    423 	int s;
    424 
    425 	spc = &ci->ci_schedstate;
    426 	s = splsched();
    427 	spc->spc_flags |= SPCF_NOINTR;
    428 	splx(s);
    429 }
    430 
    431 int
    432 cpu_setintr(struct cpu_info *ci, bool intr)
    433 {
    434 	struct schedstate_percpu *spc;
    435 	CPU_INFO_ITERATOR cii;
    436 	struct cpu_info *ci2;
    437 	uint64_t where;
    438 	xcfunc_t func;
    439 	int nintr;
    440 
    441 	spc = &ci->ci_schedstate;
    442 
    443 	KASSERT(mutex_owned(&cpu_lock));
    444 
    445 	if (intr) {
    446 		if ((spc->spc_flags & SPCF_NOINTR) == 0)
    447 			return 0;
    448 		func = (xcfunc_t)cpu_xc_intr;
    449 	} else {
    450 		if ((spc->spc_flags & SPCF_NOINTR) != 0)
    451 			return 0;
    452 		/*
    453 		 * Ensure that at least one CPU within the system
    454 		 * is handing device interrupts.
    455 		 */
    456 		nintr = 0;
    457 		for (CPU_INFO_FOREACH(cii, ci2)) {
    458 			if ((ci2->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
    459 				continue;
    460 			if (ci2 == ci)
    461 				continue;
    462 			nintr++;
    463 		}
    464 		if (nintr == 0)
    465 			return EBUSY;
    466 		func = (xcfunc_t)cpu_xc_nointr;
    467 	}
    468 
    469 	where = xc_unicast(0, func, ci, NULL, ci);
    470 	xc_wait(where);
    471 	if (intr) {
    472 		KASSERT((spc->spc_flags & SPCF_NOINTR) == 0);
    473 	} else if ((spc->spc_flags & SPCF_NOINTR) == 0) {
    474 		/* If was not set offline, then it is busy */
    475 		return EBUSY;
    476 	}
    477 
    478 	/* Direct interrupts away from the CPU and record the change. */
    479 	cpu_intr_redistribute();
    480 	spc->spc_lastmod = time_second;
    481 	return 0;
    482 }
    483 #else	/* __HAVE_INTR_CONTROL */
    484 int
    485 cpu_setintr(struct cpu_info *ci, bool intr)
    486 {
    487 
    488 	return EOPNOTSUPP;
    489 }
    490 
    491 u_int
    492 cpu_intr_count(struct cpu_info *ci)
    493 {
    494 
    495 	return 0;	/* 0 == "don't know" */
    496 }
    497 #endif	/* __HAVE_INTR_CONTROL */
    498 
    499 bool
    500 cpu_softintr_p(void)
    501 {
    502 
    503 	return (curlwp->l_pflag & LP_INTR) != 0;
    504 }
    505