Home | History | Annotate | Line # | Download | only in kern
kern_cpu.c revision 1.33
      1 /*	$NetBSD: kern_cpu.c,v 1.33 2008/06/22 13:59:06 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*-
     33  * Copyright (c)2007 YAMAMOTO Takashi,
     34  * All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  *
     45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     55  * SUCH DAMAGE.
     56  */
     57 
     58 #include <sys/cdefs.h>
     59 
     60 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.33 2008/06/22 13:59:06 ad Exp $");
     61 
     62 #include <sys/param.h>
     63 #include <sys/systm.h>
     64 #include <sys/idle.h>
     65 #include <sys/sched.h>
     66 #include <sys/intr.h>
     67 #include <sys/conf.h>
     68 #include <sys/cpu.h>
     69 #include <sys/cpuio.h>
     70 #include <sys/proc.h>
     71 #include <sys/percpu.h>
     72 #include <sys/kernel.h>
     73 #include <sys/kauth.h>
     74 #include <sys/xcall.h>
     75 #include <sys/pool.h>
     76 #include <sys/kmem.h>
     77 #include <sys/select.h>
     78 #include <sys/namei.h>
     79 #include <sys/callout.h>
     80 
     81 #include <uvm/uvm_extern.h>
     82 
     83 void	cpuctlattach(int);
     84 
     85 static void	cpu_xc_online(struct cpu_info *);
     86 static void	cpu_xc_offline(struct cpu_info *);
     87 
     88 dev_type_ioctl(cpuctl_ioctl);
     89 
     90 const struct cdevsw cpuctl_cdevsw = {
     91 	nullopen, nullclose, nullread, nullwrite, cpuctl_ioctl,
     92 	nullstop, notty, nopoll, nommap, nokqfilter,
     93 	D_OTHER | D_MPSAFE
     94 };
     95 
     96 kmutex_t cpu_lock;
     97 int	ncpu;
     98 int	ncpuonline;
     99 bool	mp_online;
    100 struct	cpuqueue cpu_queue = CIRCLEQ_HEAD_INITIALIZER(cpu_queue);
    101 
    102 static struct cpu_info *cpu_infos[MAXCPUS];
    103 
    104 int
    105 mi_cpu_attach(struct cpu_info *ci)
    106 {
    107 	int error;
    108 
    109 	ci->ci_index = ncpu;
    110 	cpu_infos[cpu_index(ci)] = ci;
    111 	CIRCLEQ_INSERT_TAIL(&cpu_queue, ci, ci_data.cpu_qchain);
    112 	TAILQ_INIT(&ci->ci_data.cpu_ld_locks);
    113 	__cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock);
    114 
    115 	sched_cpuattach(ci);
    116 
    117 	error = create_idle_lwp(ci);
    118 	if (error != 0) {
    119 		/* XXX revert sched_cpuattach */
    120 		return error;
    121 	}
    122 
    123 	if (ci == curcpu())
    124 		ci->ci_data.cpu_onproc = curlwp;
    125 	else
    126 		ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp;
    127 
    128 	percpu_init_cpu(ci);
    129 	softint_init(ci);
    130 	callout_init_cpu(ci);
    131 	xc_init_cpu(ci);
    132 	pool_cache_cpu_init(ci);
    133 	selsysinit(ci);
    134 	cache_cpu_init(ci);
    135 	TAILQ_INIT(&ci->ci_data.cpu_biodone);
    136 	ncpu++;
    137 	ncpuonline++;
    138 
    139 	return 0;
    140 }
    141 
    142 void
    143 cpuctlattach(int dummy)
    144 {
    145 
    146 }
    147 
    148 int
    149 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
    150 {
    151 	CPU_INFO_ITERATOR cii;
    152 	cpustate_t *cs;
    153 	struct cpu_info *ci;
    154 	int error, i;
    155 	u_int id;
    156 
    157 	error = 0;
    158 
    159 	mutex_enter(&cpu_lock);
    160 	switch (cmd) {
    161 	case IOC_CPU_SETSTATE:
    162 		cs = data;
    163 		error = kauth_authorize_system(l->l_cred,
    164 		    KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL,
    165 		    NULL);
    166 		if (error != 0)
    167 			break;
    168 		if ((ci = cpu_lookup(cs->cs_id)) == NULL) {
    169 			error = ESRCH;
    170 			break;
    171 		}
    172 		if (!cs->cs_intr) {
    173 			error = EOPNOTSUPP;
    174 			break;
    175 		}
    176 		error = cpu_setonline(ci, cs->cs_online);
    177 		break;
    178 
    179 	case IOC_CPU_GETSTATE:
    180 		cs = data;
    181 		id = cs->cs_id;
    182 		memset(cs, 0, sizeof(*cs));
    183 		cs->cs_id = id;
    184 		if ((ci = cpu_lookup(id)) == NULL) {
    185 			error = ESRCH;
    186 			break;
    187 		}
    188 		if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
    189 			cs->cs_online = false;
    190 		else
    191 			cs->cs_online = true;
    192 		cs->cs_intr = true;
    193 		cs->cs_lastmod = ci->ci_schedstate.spc_lastmod;
    194 		break;
    195 
    196 	case IOC_CPU_MAPID:
    197 		i = 0;
    198 		for (CPU_INFO_FOREACH(cii, ci)) {
    199 			if (i++ == *(int *)data)
    200 				break;
    201 		}
    202 		if (ci == NULL)
    203 			error = ESRCH;
    204 		else
    205 			*(int *)data = ci->ci_cpuid;
    206 		break;
    207 
    208 	case IOC_CPU_GETCOUNT:
    209 		*(int *)data = ncpu;
    210 		break;
    211 
    212 	default:
    213 		error = ENOTTY;
    214 		break;
    215 	}
    216 	mutex_exit(&cpu_lock);
    217 
    218 	return error;
    219 }
    220 
    221 struct cpu_info *
    222 cpu_lookup(cpuid_t id)
    223 {
    224 	CPU_INFO_ITERATOR cii;
    225 	struct cpu_info *ci;
    226 
    227 	for (CPU_INFO_FOREACH(cii, ci)) {
    228 		if (ci->ci_cpuid == id)
    229 			return ci;
    230 	}
    231 
    232 	return NULL;
    233 }
    234 
    235 struct cpu_info *
    236 cpu_lookup_byindex(u_int idx)
    237 {
    238 	struct cpu_info *ci = cpu_infos[idx];
    239 
    240 	KASSERT(idx < MAXCPUS);
    241 	KASSERT(ci == NULL || cpu_index(ci) == idx);
    242 
    243 	return ci;
    244 }
    245 
    246 static void
    247 cpu_xc_offline(struct cpu_info *ci)
    248 {
    249 	struct schedstate_percpu *spc, *mspc = NULL;
    250 	struct cpu_info *mci;
    251 	struct lwp *l;
    252 	CPU_INFO_ITERATOR cii;
    253 	int s;
    254 
    255 	spc = &ci->ci_schedstate;
    256 	s = splsched();
    257 	spc->spc_flags |= SPCF_OFFLINE;
    258 	splx(s);
    259 
    260 	/* Take the first available CPU for the migration */
    261 	for (CPU_INFO_FOREACH(cii, mci)) {
    262 		mspc = &mci->ci_schedstate;
    263 		if ((mspc->spc_flags & SPCF_OFFLINE) == 0)
    264 			break;
    265 	}
    266 	KASSERT(mci != NULL);
    267 
    268 	/*
    269 	 * Migrate all non-bound threads to the other CPU.
    270 	 *
    271 	 * Please note, that this runs from the xcall thread, thus handling
    272 	 * of LSONPROC is not needed.  Threads which change the state will
    273 	 * be handled by sched_takecpu().
    274 	 */
    275 	mutex_enter(proc_lock);
    276 	spc_dlock(ci, mci);
    277 	LIST_FOREACH(l, &alllwp, l_list) {
    278 		lwp_lock(l);
    279 		if (l->l_cpu != ci || (l->l_pflag & LP_BOUND) != 0) {
    280 			lwp_unlock(l);
    281 			continue;
    282 		}
    283 		if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
    284 			sched_dequeue(l);
    285 			l->l_cpu = mci;
    286 			lwp_setlock(l, mspc->spc_mutex);
    287 			sched_enqueue(l, false);
    288 			lwp_unlock(l);
    289 		} else {
    290 			lwp_migrate(l, mci);
    291 		}
    292 	}
    293 	spc_dunlock(ci, mci);
    294 	mutex_exit(proc_lock);
    295 
    296 #ifdef __HAVE_MD_CPU_OFFLINE
    297 	cpu_offline_md();
    298 #endif
    299 }
    300 
    301 static void
    302 cpu_xc_online(struct cpu_info *ci)
    303 {
    304 	struct schedstate_percpu *spc;
    305 	int s;
    306 
    307 	spc = &ci->ci_schedstate;
    308 	s = splsched();
    309 	spc->spc_flags &= ~SPCF_OFFLINE;
    310 	splx(s);
    311 }
    312 
    313 int
    314 cpu_setonline(struct cpu_info *ci, bool online)
    315 {
    316 	struct schedstate_percpu *spc;
    317 	CPU_INFO_ITERATOR cii;
    318 	struct cpu_info *ci2;
    319 	uint64_t where;
    320 	xcfunc_t func;
    321 	int nonline;
    322 
    323 	spc = &ci->ci_schedstate;
    324 
    325 	KASSERT(mutex_owned(&cpu_lock));
    326 
    327 	if (online) {
    328 		if ((spc->spc_flags & SPCF_OFFLINE) == 0)
    329 			return 0;
    330 		func = (xcfunc_t)cpu_xc_online;
    331 		ncpuonline++;
    332 	} else {
    333 		if ((spc->spc_flags & SPCF_OFFLINE) != 0)
    334 			return 0;
    335 		nonline = 0;
    336 		/*
    337 		 * Ensure that at least one CPU within the processor set
    338 		 * stays online.  Revisit this later.
    339 		 */
    340 		for (CPU_INFO_FOREACH(cii, ci2)) {
    341 			if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
    342 				continue;
    343 			if (ci2->ci_schedstate.spc_psid != spc->spc_psid)
    344 				continue;
    345 			nonline++;
    346 		}
    347 		if (nonline == 1)
    348 			return EBUSY;
    349 		func = (xcfunc_t)cpu_xc_offline;
    350 		ncpuonline--;
    351 	}
    352 
    353 	where = xc_unicast(0, func, ci, NULL, ci);
    354 	xc_wait(where);
    355 	if (online) {
    356 		KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0);
    357 	} else {
    358 		KASSERT(spc->spc_flags & SPCF_OFFLINE);
    359 	}
    360 	spc->spc_lastmod = time_second;
    361 
    362 	return 0;
    363 }
    364