kern_synch.c revision 1.217 1 1.217 ad /* $NetBSD: kern_synch.c,v 1.217 2008/02/14 14:26:57 ad Exp $ */
2 1.63 thorpej
3 1.63 thorpej /*-
4 1.174 ad * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 1.63 thorpej * All rights reserved.
6 1.63 thorpej *
7 1.63 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.63 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.188 yamt * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
10 1.188 yamt * Daniel Sieger.
11 1.63 thorpej *
12 1.63 thorpej * Redistribution and use in source and binary forms, with or without
13 1.63 thorpej * modification, are permitted provided that the following conditions
14 1.63 thorpej * are met:
15 1.63 thorpej * 1. Redistributions of source code must retain the above copyright
16 1.63 thorpej * notice, this list of conditions and the following disclaimer.
17 1.63 thorpej * 2. Redistributions in binary form must reproduce the above copyright
18 1.63 thorpej * notice, this list of conditions and the following disclaimer in the
19 1.63 thorpej * documentation and/or other materials provided with the distribution.
20 1.63 thorpej * 3. All advertising materials mentioning features or use of this software
21 1.63 thorpej * must display the following acknowledgement:
22 1.63 thorpej * This product includes software developed by the NetBSD
23 1.63 thorpej * Foundation, Inc. and its contributors.
24 1.63 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
25 1.63 thorpej * contributors may be used to endorse or promote products derived
26 1.63 thorpej * from this software without specific prior written permission.
27 1.63 thorpej *
28 1.63 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 1.63 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 1.63 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 1.63 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 1.63 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 1.63 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 1.63 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 1.63 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 1.63 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 1.63 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 1.63 thorpej * POSSIBILITY OF SUCH DAMAGE.
39 1.63 thorpej */
40 1.26 cgd
41 1.26 cgd /*-
42 1.26 cgd * Copyright (c) 1982, 1986, 1990, 1991, 1993
43 1.26 cgd * The Regents of the University of California. All rights reserved.
44 1.26 cgd * (c) UNIX System Laboratories, Inc.
45 1.26 cgd * All or some portions of this file are derived from material licensed
46 1.26 cgd * to the University of California by American Telephone and Telegraph
47 1.26 cgd * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 1.26 cgd * the permission of UNIX System Laboratories, Inc.
49 1.26 cgd *
50 1.26 cgd * Redistribution and use in source and binary forms, with or without
51 1.26 cgd * modification, are permitted provided that the following conditions
52 1.26 cgd * are met:
53 1.26 cgd * 1. Redistributions of source code must retain the above copyright
54 1.26 cgd * notice, this list of conditions and the following disclaimer.
55 1.26 cgd * 2. Redistributions in binary form must reproduce the above copyright
56 1.26 cgd * notice, this list of conditions and the following disclaimer in the
57 1.26 cgd * documentation and/or other materials provided with the distribution.
58 1.136 agc * 3. Neither the name of the University nor the names of its contributors
59 1.26 cgd * may be used to endorse or promote products derived from this software
60 1.26 cgd * without specific prior written permission.
61 1.26 cgd *
62 1.26 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 1.26 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 1.26 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 1.26 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 1.26 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 1.26 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 1.26 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 1.26 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 1.26 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 1.26 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 1.26 cgd * SUCH DAMAGE.
73 1.26 cgd *
74 1.50 fvdl * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
75 1.26 cgd */
76 1.106 lukem
77 1.106 lukem #include <sys/cdefs.h>
78 1.217 ad __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.217 2008/02/14 14:26:57 ad Exp $");
79 1.48 mrg
80 1.109 yamt #include "opt_kstack.h"
81 1.82 thorpej #include "opt_lockdebug.h"
82 1.83 thorpej #include "opt_multiprocessor.h"
83 1.110 briggs #include "opt_perfctrs.h"
84 1.26 cgd
85 1.174 ad #define __MUTEX_PRIVATE
86 1.174 ad
87 1.26 cgd #include <sys/param.h>
88 1.26 cgd #include <sys/systm.h>
89 1.26 cgd #include <sys/proc.h>
90 1.26 cgd #include <sys/kernel.h>
91 1.111 briggs #if defined(PERFCTRS)
92 1.110 briggs #include <sys/pmc.h>
93 1.111 briggs #endif
94 1.188 yamt #include <sys/cpu.h>
95 1.26 cgd #include <sys/resourcevar.h>
96 1.55 ross #include <sys/sched.h>
97 1.179 dsl #include <sys/syscall_stats.h>
98 1.174 ad #include <sys/sleepq.h>
99 1.174 ad #include <sys/lockdebug.h>
100 1.190 ad #include <sys/evcnt.h>
101 1.199 ad #include <sys/intr.h>
102 1.207 ad #include <sys/lwpctl.h>
103 1.209 ad #include <sys/atomic.h>
104 1.215 ad #include <sys/simplelock.h>
105 1.47 mrg
106 1.47 mrg #include <uvm/uvm_extern.h>
107 1.47 mrg
108 1.190 ad callout_t sched_pstats_ch;
109 1.188 yamt unsigned int sched_pstats_ticks;
110 1.34 christos
111 1.190 ad kcondvar_t lbolt; /* once a second sleep address */
112 1.26 cgd
113 1.188 yamt static void sched_unsleep(struct lwp *);
114 1.188 yamt static void sched_changepri(struct lwp *, pri_t);
115 1.188 yamt static void sched_lendpri(struct lwp *, pri_t);
116 1.122 thorpej
117 1.174 ad syncobj_t sleep_syncobj = {
118 1.174 ad SOBJ_SLEEPQ_SORTED,
119 1.174 ad sleepq_unsleep,
120 1.184 yamt sleepq_changepri,
121 1.184 yamt sleepq_lendpri,
122 1.184 yamt syncobj_noowner,
123 1.174 ad };
124 1.174 ad
125 1.174 ad syncobj_t sched_syncobj = {
126 1.174 ad SOBJ_SLEEPQ_SORTED,
127 1.174 ad sched_unsleep,
128 1.184 yamt sched_changepri,
129 1.184 yamt sched_lendpri,
130 1.184 yamt syncobj_noowner,
131 1.174 ad };
132 1.122 thorpej
133 1.26 cgd /*
134 1.174 ad * During autoconfiguration or after a panic, a sleep will simply lower the
135 1.174 ad * priority briefly to allow interrupts, then return. The priority to be
136 1.174 ad * used (safepri) is machine-dependent, thus this value is initialized and
137 1.174 ad * maintained in the machine-dependent layers. This priority will typically
138 1.174 ad * be 0, or the lowest priority that is safe for use on the interrupt stack;
139 1.174 ad * it can be made higher to block network software interrupts after panics.
140 1.26 cgd */
141 1.174 ad int safepri;
142 1.26 cgd
143 1.26 cgd /*
144 1.174 ad * OBSOLETE INTERFACE
145 1.174 ad *
146 1.26 cgd * General sleep call. Suspends the current process until a wakeup is
147 1.26 cgd * performed on the specified identifier. The process will then be made
148 1.174 ad * runnable with the specified priority. Sleeps at most timo/hz seconds (0
149 1.174 ad * means no timeout). If pri includes PCATCH flag, signals are checked
150 1.26 cgd * before and after sleeping, else signals are not checked. Returns 0 if
151 1.26 cgd * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
152 1.26 cgd * signal needs to be delivered, ERESTART is returned if the current system
153 1.26 cgd * call should be restarted if possible, and EINTR is returned if the system
154 1.26 cgd * call should be interrupted by the signal (return EINTR).
155 1.77 thorpej *
156 1.174 ad * The interlock is held until we are on a sleep queue. The interlock will
157 1.174 ad * be locked before returning back to the caller unless the PNORELOCK flag
158 1.174 ad * is specified, in which case the interlock will always be unlocked upon
159 1.174 ad * return.
160 1.26 cgd */
161 1.26 cgd int
162 1.185 yamt ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
163 1.174 ad volatile struct simplelock *interlock)
164 1.26 cgd {
165 1.122 thorpej struct lwp *l = curlwp;
166 1.174 ad sleepq_t *sq;
167 1.188 yamt int error;
168 1.26 cgd
169 1.204 ad KASSERT((l->l_pflag & LP_INTR) == 0);
170 1.204 ad
171 1.174 ad if (sleepq_dontsleep(l)) {
172 1.174 ad (void)sleepq_abort(NULL, 0);
173 1.174 ad if ((priority & PNORELOCK) != 0)
174 1.77 thorpej simple_unlock(interlock);
175 1.174 ad return 0;
176 1.26 cgd }
177 1.78 sommerfe
178 1.204 ad l->l_kpriority = true;
179 1.174 ad sq = sleeptab_lookup(&sleeptab, ident);
180 1.174 ad sleepq_enter(sq, l);
181 1.204 ad sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
182 1.42 cgd
183 1.174 ad if (interlock != NULL) {
184 1.204 ad KASSERT(simple_lock_held(interlock));
185 1.174 ad simple_unlock(interlock);
186 1.150 chs }
187 1.150 chs
188 1.188 yamt error = sleepq_block(timo, priority & PCATCH);
189 1.126 pk
190 1.174 ad if (interlock != NULL && (priority & PNORELOCK) == 0)
191 1.126 pk simple_lock(interlock);
192 1.174 ad
193 1.174 ad return error;
194 1.26 cgd }
195 1.26 cgd
196 1.187 ad int
197 1.187 ad mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
198 1.187 ad kmutex_t *mtx)
199 1.187 ad {
200 1.187 ad struct lwp *l = curlwp;
201 1.187 ad sleepq_t *sq;
202 1.188 yamt int error;
203 1.187 ad
204 1.204 ad KASSERT((l->l_pflag & LP_INTR) == 0);
205 1.204 ad
206 1.187 ad if (sleepq_dontsleep(l)) {
207 1.187 ad (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
208 1.187 ad return 0;
209 1.187 ad }
210 1.187 ad
211 1.204 ad l->l_kpriority = true;
212 1.187 ad sq = sleeptab_lookup(&sleeptab, ident);
213 1.187 ad sleepq_enter(sq, l);
214 1.204 ad sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
215 1.187 ad mutex_exit(mtx);
216 1.188 yamt error = sleepq_block(timo, priority & PCATCH);
217 1.187 ad
218 1.187 ad if ((priority & PNORELOCK) == 0)
219 1.187 ad mutex_enter(mtx);
220 1.187 ad
221 1.187 ad return error;
222 1.187 ad }
223 1.187 ad
224 1.26 cgd /*
225 1.174 ad * General sleep call for situations where a wake-up is not expected.
226 1.26 cgd */
227 1.174 ad int
228 1.182 thorpej kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
229 1.26 cgd {
230 1.174 ad struct lwp *l = curlwp;
231 1.174 ad sleepq_t *sq;
232 1.174 ad int error;
233 1.26 cgd
234 1.174 ad if (sleepq_dontsleep(l))
235 1.174 ad return sleepq_abort(NULL, 0);
236 1.26 cgd
237 1.174 ad if (mtx != NULL)
238 1.174 ad mutex_exit(mtx);
239 1.204 ad l->l_kpriority = true;
240 1.174 ad sq = sleeptab_lookup(&sleeptab, l);
241 1.174 ad sleepq_enter(sq, l);
242 1.204 ad sleepq_enqueue(sq, l, wmesg, &sleep_syncobj);
243 1.188 yamt error = sleepq_block(timo, intr);
244 1.174 ad if (mtx != NULL)
245 1.174 ad mutex_enter(mtx);
246 1.83 thorpej
247 1.174 ad return error;
248 1.139 cl }
249 1.139 cl
250 1.26 cgd /*
251 1.174 ad * OBSOLETE INTERFACE
252 1.174 ad *
253 1.26 cgd * Make all processes sleeping on the specified identifier runnable.
254 1.26 cgd */
255 1.26 cgd void
256 1.174 ad wakeup(wchan_t ident)
257 1.26 cgd {
258 1.174 ad sleepq_t *sq;
259 1.83 thorpej
260 1.174 ad if (cold)
261 1.174 ad return;
262 1.83 thorpej
263 1.174 ad sq = sleeptab_lookup(&sleeptab, ident);
264 1.174 ad sleepq_wake(sq, ident, (u_int)-1);
265 1.63 thorpej }
266 1.63 thorpej
267 1.63 thorpej /*
268 1.174 ad * OBSOLETE INTERFACE
269 1.174 ad *
270 1.63 thorpej * Make the highest priority process first in line on the specified
271 1.63 thorpej * identifier runnable.
272 1.63 thorpej */
273 1.174 ad void
274 1.174 ad wakeup_one(wchan_t ident)
275 1.63 thorpej {
276 1.174 ad sleepq_t *sq;
277 1.63 thorpej
278 1.174 ad if (cold)
279 1.174 ad return;
280 1.188 yamt
281 1.174 ad sq = sleeptab_lookup(&sleeptab, ident);
282 1.174 ad sleepq_wake(sq, ident, 1);
283 1.174 ad }
284 1.63 thorpej
285 1.117 gmcgarry
286 1.117 gmcgarry /*
287 1.117 gmcgarry * General yield call. Puts the current process back on its run queue and
288 1.117 gmcgarry * performs a voluntary context switch. Should only be called when the
289 1.198 ad * current process explicitly requests it (eg sched_yield(2)).
290 1.117 gmcgarry */
291 1.117 gmcgarry void
292 1.117 gmcgarry yield(void)
293 1.117 gmcgarry {
294 1.122 thorpej struct lwp *l = curlwp;
295 1.117 gmcgarry
296 1.174 ad KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
297 1.174 ad lwp_lock(l);
298 1.217 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
299 1.188 yamt KASSERT(l->l_stat == LSONPROC);
300 1.204 ad l->l_kpriority = false;
301 1.204 ad if (l->l_class == SCHED_OTHER) {
302 1.204 ad /*
303 1.204 ad * Only for timeshared threads. It will be reset
304 1.204 ad * by the scheduler in due course.
305 1.204 ad */
306 1.204 ad l->l_priority = 0;
307 1.204 ad }
308 1.188 yamt (void)mi_switch(l);
309 1.174 ad KERNEL_LOCK(l->l_biglocks, l);
310 1.69 thorpej }
311 1.69 thorpej
312 1.69 thorpej /*
313 1.69 thorpej * General preemption call. Puts the current process back on its run queue
314 1.156 rpaulo * and performs an involuntary context switch.
315 1.69 thorpej */
316 1.69 thorpej void
317 1.174 ad preempt(void)
318 1.69 thorpej {
319 1.122 thorpej struct lwp *l = curlwp;
320 1.69 thorpej
321 1.174 ad KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
322 1.174 ad lwp_lock(l);
323 1.217 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
324 1.188 yamt KASSERT(l->l_stat == LSONPROC);
325 1.204 ad l->l_kpriority = false;
326 1.174 ad l->l_nivcsw++;
327 1.188 yamt (void)mi_switch(l);
328 1.174 ad KERNEL_LOCK(l->l_biglocks, l);
329 1.69 thorpej }
330 1.69 thorpej
331 1.69 thorpej /*
332 1.188 yamt * Compute the amount of time during which the current lwp was running.
333 1.130 nathanw *
334 1.188 yamt * - update l_rtime unless it's an idle lwp.
335 1.188 yamt */
336 1.188 yamt
337 1.199 ad void
338 1.212 yamt updatertime(lwp_t *l, const struct bintime *now)
339 1.188 yamt {
340 1.188 yamt
341 1.199 ad if ((l->l_flag & LW_IDLE) != 0)
342 1.188 yamt return;
343 1.188 yamt
344 1.212 yamt /* rtime += now - stime */
345 1.212 yamt bintime_add(&l->l_rtime, now);
346 1.212 yamt bintime_sub(&l->l_rtime, &l->l_stime);
347 1.188 yamt }
348 1.188 yamt
349 1.188 yamt /*
350 1.188 yamt * The machine independent parts of context switch.
351 1.188 yamt *
352 1.188 yamt * Returns 1 if another LWP was actually run.
353 1.26 cgd */
354 1.122 thorpej int
355 1.199 ad mi_switch(lwp_t *l)
356 1.26 cgd {
357 1.216 rmind struct cpu_info *ci, *tci = NULL;
358 1.76 thorpej struct schedstate_percpu *spc;
359 1.188 yamt struct lwp *newl;
360 1.174 ad int retval, oldspl;
361 1.212 yamt struct bintime bt;
362 1.199 ad bool returning;
363 1.26 cgd
364 1.188 yamt KASSERT(lwp_locked(l, NULL));
365 1.188 yamt LOCKDEBUG_BARRIER(l->l_mutex, 1);
366 1.174 ad
367 1.174 ad #ifdef KSTACK_CHECK_MAGIC
368 1.174 ad kstack_check_magic(l);
369 1.174 ad #endif
370 1.83 thorpej
371 1.212 yamt binuptime(&bt);
372 1.199 ad
373 1.209 ad KDASSERT(l->l_cpu == curcpu());
374 1.196 ad ci = l->l_cpu;
375 1.196 ad spc = &ci->ci_schedstate;
376 1.199 ad returning = false;
377 1.190 ad newl = NULL;
378 1.190 ad
379 1.199 ad /*
380 1.199 ad * If we have been asked to switch to a specific LWP, then there
381 1.199 ad * is no need to inspect the run queues. If a soft interrupt is
382 1.199 ad * blocking, then return to the interrupted thread without adjusting
383 1.199 ad * VM context or its start time: neither have been changed in order
384 1.199 ad * to take the interrupt.
385 1.199 ad */
386 1.190 ad if (l->l_switchto != NULL) {
387 1.204 ad if ((l->l_pflag & LP_INTR) != 0) {
388 1.199 ad returning = true;
389 1.199 ad softint_block(l);
390 1.199 ad if ((l->l_flag & LW_TIMEINTR) != 0)
391 1.212 yamt updatertime(l, &bt);
392 1.199 ad }
393 1.190 ad newl = l->l_switchto;
394 1.190 ad l->l_switchto = NULL;
395 1.190 ad }
396 1.204 ad #ifndef __HAVE_FAST_SOFTINTS
397 1.204 ad else if (ci->ci_data.cpu_softints != 0) {
398 1.204 ad /* There are pending soft interrupts, so pick one. */
399 1.204 ad newl = softint_picklwp();
400 1.204 ad newl->l_stat = LSONPROC;
401 1.204 ad newl->l_flag |= LW_RUNNING;
402 1.204 ad }
403 1.204 ad #endif /* !__HAVE_FAST_SOFTINTS */
404 1.190 ad
405 1.180 dsl /* Count time spent in current system call */
406 1.199 ad if (!returning) {
407 1.199 ad SYSCALL_TIME_SLEEP(l);
408 1.180 dsl
409 1.199 ad /*
410 1.199 ad * XXXSMP If we are using h/w performance counters,
411 1.199 ad * save context.
412 1.199 ad */
413 1.174 ad #if PERFCTRS
414 1.199 ad if (PMC_ENABLED(l->l_proc)) {
415 1.199 ad pmc_save_context(l->l_proc);
416 1.199 ad }
417 1.199 ad #endif
418 1.212 yamt updatertime(l, &bt);
419 1.174 ad }
420 1.113 gmcgarry
421 1.113 gmcgarry /*
422 1.174 ad * If on the CPU and we have gotten this far, then we must yield.
423 1.113 gmcgarry */
424 1.174 ad KASSERT(l->l_stat != LSRUN);
425 1.216 rmind if (l->l_stat == LSONPROC && (l->l_target_cpu || l != newl)) {
426 1.217 ad KASSERT(lwp_locked(l, spc->spc_lwplock));
427 1.216 rmind
428 1.216 rmind tci = l->l_target_cpu;
429 1.216 rmind if (__predict_false(tci != NULL)) {
430 1.216 rmind /* Double-lock the runqueues */
431 1.216 rmind spc_dlock(ci, tci);
432 1.216 rmind } else {
433 1.216 rmind /* Lock the runqueue */
434 1.216 rmind spc_lock(ci);
435 1.216 rmind }
436 1.216 rmind
437 1.188 yamt if ((l->l_flag & LW_IDLE) == 0) {
438 1.188 yamt l->l_stat = LSRUN;
439 1.216 rmind if (__predict_false(tci != NULL)) {
440 1.216 rmind /*
441 1.216 rmind * Set the new CPU, lock and unset the
442 1.216 rmind * l_target_cpu - thread will be enqueued
443 1.216 rmind * to the runqueue of target CPU.
444 1.216 rmind */
445 1.216 rmind l->l_cpu = tci;
446 1.216 rmind lwp_setlock(l, tci->ci_schedstate.spc_mutex);
447 1.216 rmind l->l_target_cpu = NULL;
448 1.216 rmind } else {
449 1.216 rmind lwp_setlock(l, spc->spc_mutex);
450 1.216 rmind }
451 1.188 yamt sched_enqueue(l, true);
452 1.216 rmind } else {
453 1.216 rmind KASSERT(tci == NULL);
454 1.188 yamt l->l_stat = LSIDL;
455 1.216 rmind }
456 1.216 rmind } else {
457 1.216 rmind /* Lock the runqueue */
458 1.216 rmind spc_lock(ci);
459 1.174 ad }
460 1.174 ad
461 1.174 ad /*
462 1.201 rmind * Let sched_nextlwp() select the LWP to run the CPU next.
463 1.209 ad * If no LWP is runnable, select the idle LWP.
464 1.209 ad *
465 1.209 ad * Note that spc_lwplock might not necessary be held, and
466 1.209 ad * new thread would be unlocked after setting the LWP-lock.
467 1.174 ad */
468 1.190 ad if (newl == NULL) {
469 1.190 ad newl = sched_nextlwp();
470 1.190 ad if (newl != NULL) {
471 1.190 ad sched_dequeue(newl);
472 1.190 ad KASSERT(lwp_locked(newl, spc->spc_mutex));
473 1.190 ad newl->l_stat = LSONPROC;
474 1.196 ad newl->l_cpu = ci;
475 1.190 ad newl->l_flag |= LW_RUNNING;
476 1.217 ad lwp_setlock(newl, spc->spc_lwplock);
477 1.190 ad } else {
478 1.196 ad newl = ci->ci_data.cpu_idlelwp;
479 1.190 ad newl->l_stat = LSONPROC;
480 1.190 ad newl->l_flag |= LW_RUNNING;
481 1.190 ad }
482 1.204 ad /*
483 1.204 ad * Only clear want_resched if there are no
484 1.204 ad * pending (slow) software interrupts.
485 1.204 ad */
486 1.204 ad ci->ci_want_resched = ci->ci_data.cpu_softints;
487 1.199 ad spc->spc_flags &= ~SPCF_SWITCHCLEAR;
488 1.204 ad spc->spc_curpriority = lwp_eprio(newl);
489 1.199 ad }
490 1.199 ad
491 1.204 ad /* Items that must be updated with the CPU locked. */
492 1.199 ad if (!returning) {
493 1.204 ad /* Update the new LWP's start time. */
494 1.212 yamt newl->l_stime = bt;
495 1.204 ad
496 1.199 ad /*
497 1.204 ad * ci_curlwp changes when a fast soft interrupt occurs.
498 1.204 ad * We use cpu_onproc to keep track of which kernel or
499 1.204 ad * user thread is running 'underneath' the software
500 1.204 ad * interrupt. This is important for time accounting,
501 1.204 ad * itimers and forcing user threads to preempt (aston).
502 1.199 ad */
503 1.204 ad ci->ci_data.cpu_onproc = newl;
504 1.188 yamt }
505 1.188 yamt
506 1.188 yamt if (l != newl) {
507 1.188 yamt struct lwp *prevlwp;
508 1.174 ad
509 1.209 ad /* Release all locks, but leave the current LWP locked */
510 1.216 rmind if (l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex) {
511 1.216 rmind /*
512 1.216 rmind * In case of migration, drop the local runqueue
513 1.216 rmind * lock, thread is on other runqueue now.
514 1.216 rmind */
515 1.216 rmind if (__predict_false(tci != NULL))
516 1.216 rmind spc_unlock(ci);
517 1.209 ad /*
518 1.209 ad * Drop spc_lwplock, if the current LWP has been moved
519 1.209 ad * to the run queue (it is now locked by spc_mutex).
520 1.209 ad */
521 1.217 ad mutex_spin_exit(spc->spc_lwplock);
522 1.188 yamt } else {
523 1.209 ad /*
524 1.209 ad * Otherwise, drop the spc_mutex, we are done with the
525 1.209 ad * run queues.
526 1.209 ad */
527 1.188 yamt mutex_spin_exit(spc->spc_mutex);
528 1.216 rmind KASSERT(tci == NULL);
529 1.188 yamt }
530 1.188 yamt
531 1.209 ad /*
532 1.209 ad * Mark that context switch is going to be perfomed
533 1.209 ad * for this LWP, to protect it from being switched
534 1.209 ad * to on another CPU.
535 1.209 ad */
536 1.209 ad KASSERT(l->l_ctxswtch == 0);
537 1.209 ad l->l_ctxswtch = 1;
538 1.209 ad l->l_ncsw++;
539 1.209 ad l->l_flag &= ~LW_RUNNING;
540 1.209 ad
541 1.209 ad /*
542 1.209 ad * Increase the count of spin-mutexes before the release
543 1.209 ad * of the last lock - we must remain at IPL_SCHED during
544 1.209 ad * the context switch.
545 1.209 ad */
546 1.209 ad oldspl = MUTEX_SPIN_OLDSPL(ci);
547 1.209 ad ci->ci_mtx_count--;
548 1.209 ad lwp_unlock(l);
549 1.209 ad
550 1.188 yamt /* Unlocked, but for statistics only. */
551 1.188 yamt uvmexp.swtch++;
552 1.188 yamt
553 1.209 ad /* Update status for lwpctl, if present. */
554 1.209 ad if (l->l_lwpctl != NULL)
555 1.209 ad l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE;
556 1.209 ad
557 1.199 ad /*
558 1.199 ad * Save old VM context, unless a soft interrupt
559 1.199 ad * handler is blocking.
560 1.199 ad */
561 1.199 ad if (!returning)
562 1.199 ad pmap_deactivate(l);
563 1.188 yamt
564 1.209 ad /*
565 1.209 ad * We may need to spin-wait for if 'newl' is still
566 1.209 ad * context switching on another CPU.
567 1.209 ad */
568 1.209 ad if (newl->l_ctxswtch != 0) {
569 1.209 ad u_int count;
570 1.209 ad count = SPINLOCK_BACKOFF_MIN;
571 1.209 ad while (newl->l_ctxswtch)
572 1.209 ad SPINLOCK_BACKOFF(count);
573 1.209 ad }
574 1.207 ad
575 1.188 yamt /* Switch to the new LWP.. */
576 1.204 ad prevlwp = cpu_switchto(l, newl, returning);
577 1.207 ad ci = curcpu();
578 1.207 ad
579 1.188 yamt /*
580 1.209 ad * Switched away - we have new curlwp.
581 1.209 ad * Restore VM context and IPL.
582 1.188 yamt */
583 1.209 ad pmap_activate(l);
584 1.188 yamt if (prevlwp != NULL) {
585 1.209 ad /* Normalize the count of the spin-mutexes */
586 1.209 ad ci->ci_mtx_count++;
587 1.209 ad /* Unmark the state of context switch */
588 1.209 ad membar_exit();
589 1.209 ad prevlwp->l_ctxswtch = 0;
590 1.188 yamt }
591 1.209 ad splx(oldspl);
592 1.209 ad
593 1.209 ad /* Update status for lwpctl, if present. */
594 1.209 ad if (l->l_lwpctl != NULL)
595 1.209 ad l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
596 1.174 ad
597 1.188 yamt retval = 1;
598 1.188 yamt } else {
599 1.188 yamt /* Nothing to do - just unlock and return. */
600 1.216 rmind KASSERT(tci == NULL);
601 1.216 rmind spc_unlock(ci);
602 1.188 yamt lwp_unlock(l);
603 1.122 thorpej retval = 0;
604 1.122 thorpej }
605 1.110 briggs
606 1.188 yamt KASSERT(l == curlwp);
607 1.188 yamt KASSERT(l->l_stat == LSONPROC);
608 1.207 ad KASSERT(l->l_cpu == ci);
609 1.188 yamt
610 1.110 briggs /*
611 1.174 ad * XXXSMP If we are using h/w performance counters, restore context.
612 1.26 cgd */
613 1.114 gmcgarry #if PERFCTRS
614 1.175 christos if (PMC_ENABLED(l->l_proc)) {
615 1.175 christos pmc_restore_context(l->l_proc);
616 1.166 christos }
617 1.114 gmcgarry #endif
618 1.180 dsl SYSCALL_TIME_WAKEUP(l);
619 1.188 yamt LOCKDEBUG_BARRIER(NULL, 1);
620 1.169 yamt
621 1.122 thorpej return retval;
622 1.26 cgd }
623 1.26 cgd
624 1.26 cgd /*
625 1.174 ad * Change process state to be runnable, placing it on the run queue if it is
626 1.174 ad * in memory, and awakening the swapper if it isn't in memory.
627 1.174 ad *
628 1.174 ad * Call with the process and LWP locked. Will return with the LWP unlocked.
629 1.26 cgd */
630 1.26 cgd void
631 1.122 thorpej setrunnable(struct lwp *l)
632 1.26 cgd {
633 1.122 thorpej struct proc *p = l->l_proc;
634 1.205 ad struct cpu_info *ci;
635 1.174 ad sigset_t *ss;
636 1.26 cgd
637 1.188 yamt KASSERT((l->l_flag & LW_IDLE) == 0);
638 1.183 ad KASSERT(mutex_owned(&p->p_smutex));
639 1.183 ad KASSERT(lwp_locked(l, NULL));
640 1.205 ad KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
641 1.83 thorpej
642 1.122 thorpej switch (l->l_stat) {
643 1.122 thorpej case LSSTOP:
644 1.33 mycroft /*
645 1.33 mycroft * If we're being traced (possibly because someone attached us
646 1.33 mycroft * while we were stopped), check for a signal from the debugger.
647 1.33 mycroft */
648 1.174 ad if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
649 1.174 ad if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
650 1.174 ad ss = &l->l_sigpend.sp_set;
651 1.174 ad else
652 1.174 ad ss = &p->p_sigpend.sp_set;
653 1.174 ad sigaddset(ss, p->p_xstat);
654 1.174 ad signotify(l);
655 1.53 mycroft }
656 1.174 ad p->p_nrlwps++;
657 1.26 cgd break;
658 1.174 ad case LSSUSPENDED:
659 1.178 pavel l->l_flag &= ~LW_WSUSPEND;
660 1.174 ad p->p_nrlwps++;
661 1.192 rmind cv_broadcast(&p->p_lwpcv);
662 1.122 thorpej break;
663 1.174 ad case LSSLEEP:
664 1.174 ad KASSERT(l->l_wchan != NULL);
665 1.26 cgd break;
666 1.174 ad default:
667 1.174 ad panic("setrunnable: lwp %p state was %d", l, l->l_stat);
668 1.26 cgd }
669 1.139 cl
670 1.174 ad /*
671 1.174 ad * If the LWP was sleeping interruptably, then it's OK to start it
672 1.174 ad * again. If not, mark it as still sleeping.
673 1.174 ad */
674 1.174 ad if (l->l_wchan != NULL) {
675 1.174 ad l->l_stat = LSSLEEP;
676 1.183 ad /* lwp_unsleep() will release the lock. */
677 1.183 ad lwp_unsleep(l);
678 1.174 ad return;
679 1.174 ad }
680 1.139 cl
681 1.174 ad /*
682 1.174 ad * If the LWP is still on the CPU, mark it as LSONPROC. It may be
683 1.174 ad * about to call mi_switch(), in which case it will yield.
684 1.174 ad */
685 1.188 yamt if ((l->l_flag & LW_RUNNING) != 0) {
686 1.174 ad l->l_stat = LSONPROC;
687 1.174 ad l->l_slptime = 0;
688 1.174 ad lwp_unlock(l);
689 1.174 ad return;
690 1.174 ad }
691 1.122 thorpej
692 1.174 ad /*
693 1.205 ad * Look for a CPU to run.
694 1.205 ad * Set the LWP runnable.
695 1.174 ad */
696 1.205 ad ci = sched_takecpu(l);
697 1.205 ad l->l_cpu = ci;
698 1.206 ad if (l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex) {
699 1.206 ad lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
700 1.206 ad lwp_lock(l);
701 1.206 ad }
702 1.188 yamt sched_setrunnable(l);
703 1.174 ad l->l_stat = LSRUN;
704 1.122 thorpej l->l_slptime = 0;
705 1.174 ad
706 1.205 ad /*
707 1.205 ad * If thread is swapped out - wake the swapper to bring it back in.
708 1.205 ad * Otherwise, enter it into a run queue.
709 1.205 ad */
710 1.178 pavel if (l->l_flag & LW_INMEM) {
711 1.188 yamt sched_enqueue(l, false);
712 1.188 yamt resched_cpu(l);
713 1.174 ad lwp_unlock(l);
714 1.174 ad } else {
715 1.174 ad lwp_unlock(l);
716 1.177 ad uvm_kick_scheduler();
717 1.174 ad }
718 1.26 cgd }
719 1.26 cgd
720 1.26 cgd /*
721 1.174 ad * suspendsched:
722 1.174 ad *
723 1.174 ad * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
724 1.174 ad */
725 1.94 bouyer void
726 1.174 ad suspendsched(void)
727 1.94 bouyer {
728 1.174 ad CPU_INFO_ITERATOR cii;
729 1.174 ad struct cpu_info *ci;
730 1.122 thorpej struct lwp *l;
731 1.174 ad struct proc *p;
732 1.94 bouyer
733 1.94 bouyer /*
734 1.174 ad * We do this by process in order not to violate the locking rules.
735 1.94 bouyer */
736 1.204 ad mutex_enter(&proclist_lock);
737 1.174 ad PROCLIST_FOREACH(p, &allproc) {
738 1.174 ad mutex_enter(&p->p_smutex);
739 1.174 ad
740 1.178 pavel if ((p->p_flag & PK_SYSTEM) != 0) {
741 1.174 ad mutex_exit(&p->p_smutex);
742 1.94 bouyer continue;
743 1.174 ad }
744 1.174 ad
745 1.174 ad p->p_stat = SSTOP;
746 1.174 ad
747 1.174 ad LIST_FOREACH(l, &p->p_lwps, l_sibling) {
748 1.174 ad if (l == curlwp)
749 1.174 ad continue;
750 1.174 ad
751 1.174 ad lwp_lock(l);
752 1.122 thorpej
753 1.97 enami /*
754 1.174 ad * Set L_WREBOOT so that the LWP will suspend itself
755 1.174 ad * when it tries to return to user mode. We want to
756 1.174 ad * try and get to get as many LWPs as possible to
757 1.174 ad * the user / kernel boundary, so that they will
758 1.174 ad * release any locks that they hold.
759 1.97 enami */
760 1.178 pavel l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
761 1.174 ad
762 1.174 ad if (l->l_stat == LSSLEEP &&
763 1.178 pavel (l->l_flag & LW_SINTR) != 0) {
764 1.174 ad /* setrunnable() will release the lock. */
765 1.174 ad setrunnable(l);
766 1.174 ad continue;
767 1.174 ad }
768 1.174 ad
769 1.174 ad lwp_unlock(l);
770 1.94 bouyer }
771 1.174 ad
772 1.174 ad mutex_exit(&p->p_smutex);
773 1.94 bouyer }
774 1.204 ad mutex_exit(&proclist_lock);
775 1.174 ad
776 1.174 ad /*
777 1.174 ad * Kick all CPUs to make them preempt any LWPs running in user mode.
778 1.174 ad * They'll trap into the kernel and suspend themselves in userret().
779 1.174 ad */
780 1.204 ad for (CPU_INFO_FOREACH(cii, ci)) {
781 1.204 ad spc_lock(ci);
782 1.204 ad cpu_need_resched(ci, RESCHED_IMMED);
783 1.204 ad spc_unlock(ci);
784 1.204 ad }
785 1.174 ad }
786 1.174 ad
787 1.174 ad /*
788 1.174 ad * sched_unsleep:
789 1.174 ad *
790 1.174 ad * The is called when the LWP has not been awoken normally but instead
791 1.174 ad * interrupted: for example, if the sleep timed out. Because of this,
792 1.174 ad * it's not a valid action for running or idle LWPs.
793 1.174 ad */
794 1.188 yamt static void
795 1.174 ad sched_unsleep(struct lwp *l)
796 1.174 ad {
797 1.174 ad
798 1.174 ad lwp_unlock(l);
799 1.174 ad panic("sched_unsleep");
800 1.174 ad }
801 1.174 ad
802 1.204 ad void
803 1.188 yamt resched_cpu(struct lwp *l)
804 1.188 yamt {
805 1.188 yamt struct cpu_info *ci;
806 1.188 yamt
807 1.188 yamt /*
808 1.188 yamt * XXXSMP
809 1.188 yamt * Since l->l_cpu persists across a context switch,
810 1.188 yamt * this gives us *very weak* processor affinity, in
811 1.188 yamt * that we notify the CPU on which the process last
812 1.188 yamt * ran that it should try to switch.
813 1.188 yamt *
814 1.188 yamt * This does not guarantee that the process will run on
815 1.188 yamt * that processor next, because another processor might
816 1.188 yamt * grab it the next time it performs a context switch.
817 1.188 yamt *
818 1.188 yamt * This also does not handle the case where its last
819 1.188 yamt * CPU is running a higher-priority process, but every
820 1.188 yamt * other CPU is running a lower-priority process. There
821 1.188 yamt * are ways to handle this situation, but they're not
822 1.188 yamt * currently very pretty, and we also need to weigh the
823 1.188 yamt * cost of moving a process from one CPU to another.
824 1.188 yamt */
825 1.204 ad ci = l->l_cpu;
826 1.204 ad if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority)
827 1.188 yamt cpu_need_resched(ci, 0);
828 1.188 yamt }
829 1.188 yamt
830 1.188 yamt static void
831 1.185 yamt sched_changepri(struct lwp *l, pri_t pri)
832 1.174 ad {
833 1.174 ad
834 1.188 yamt KASSERT(lwp_locked(l, NULL));
835 1.174 ad
836 1.204 ad if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
837 1.204 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
838 1.204 ad sched_dequeue(l);
839 1.204 ad l->l_priority = pri;
840 1.204 ad sched_enqueue(l, false);
841 1.204 ad } else {
842 1.174 ad l->l_priority = pri;
843 1.157 yamt }
844 1.188 yamt resched_cpu(l);
845 1.184 yamt }
846 1.184 yamt
847 1.188 yamt static void
848 1.185 yamt sched_lendpri(struct lwp *l, pri_t pri)
849 1.184 yamt {
850 1.184 yamt
851 1.188 yamt KASSERT(lwp_locked(l, NULL));
852 1.184 yamt
853 1.204 ad if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
854 1.204 ad KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
855 1.204 ad sched_dequeue(l);
856 1.204 ad l->l_inheritedprio = pri;
857 1.204 ad sched_enqueue(l, false);
858 1.204 ad } else {
859 1.184 yamt l->l_inheritedprio = pri;
860 1.184 yamt }
861 1.188 yamt resched_cpu(l);
862 1.184 yamt }
863 1.184 yamt
864 1.184 yamt struct lwp *
865 1.184 yamt syncobj_noowner(wchan_t wchan)
866 1.184 yamt {
867 1.184 yamt
868 1.184 yamt return NULL;
869 1.151 yamt }
870 1.151 yamt
871 1.113 gmcgarry
872 1.188 yamt /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
873 1.188 yamt fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
874 1.115 nisimura
875 1.130 nathanw /*
876 1.188 yamt * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
877 1.188 yamt * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
878 1.188 yamt * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
879 1.188 yamt *
880 1.188 yamt * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
881 1.188 yamt * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
882 1.188 yamt *
883 1.188 yamt * If you dont want to bother with the faster/more-accurate formula, you
884 1.188 yamt * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
885 1.188 yamt * (more general) method of calculating the %age of CPU used by a process.
886 1.134 matt */
887 1.188 yamt #define CCPU_SHIFT (FSHIFT + 1)
888 1.134 matt
889 1.134 matt /*
890 1.188 yamt * sched_pstats:
891 1.188 yamt *
892 1.188 yamt * Update process statistics and check CPU resource allocation.
893 1.188 yamt * Call scheduler-specific hook to eventually adjust process/LWP
894 1.188 yamt * priorities.
895 1.130 nathanw */
896 1.188 yamt /* ARGSUSED */
897 1.113 gmcgarry void
898 1.188 yamt sched_pstats(void *arg)
899 1.113 gmcgarry {
900 1.188 yamt struct rlimit *rlim;
901 1.188 yamt struct lwp *l;
902 1.188 yamt struct proc *p;
903 1.204 ad int sig, clkhz;
904 1.188 yamt long runtm;
905 1.113 gmcgarry
906 1.188 yamt sched_pstats_ticks++;
907 1.174 ad
908 1.211 ad mutex_enter(&proclist_lock);
909 1.188 yamt PROCLIST_FOREACH(p, &allproc) {
910 1.188 yamt /*
911 1.188 yamt * Increment time in/out of memory and sleep time (if
912 1.188 yamt * sleeping). We ignore overflow; with 16-bit int's
913 1.188 yamt * (remember them?) overflow takes 45 days.
914 1.188 yamt */
915 1.188 yamt mutex_enter(&p->p_smutex);
916 1.188 yamt mutex_spin_enter(&p->p_stmutex);
917 1.212 yamt runtm = p->p_rtime.sec;
918 1.188 yamt LIST_FOREACH(l, &p->p_lwps, l_sibling) {
919 1.188 yamt if ((l->l_flag & LW_IDLE) != 0)
920 1.188 yamt continue;
921 1.188 yamt lwp_lock(l);
922 1.212 yamt runtm += l->l_rtime.sec;
923 1.188 yamt l->l_swtime++;
924 1.200 rmind sched_pstats_hook(l);
925 1.188 yamt lwp_unlock(l);
926 1.113 gmcgarry
927 1.188 yamt /*
928 1.188 yamt * p_pctcpu is only for ps.
929 1.188 yamt */
930 1.188 yamt l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
931 1.188 yamt if (l->l_slptime < 1) {
932 1.188 yamt clkhz = stathz != 0 ? stathz : hz;
933 1.188 yamt #if (FSHIFT >= CCPU_SHIFT)
934 1.188 yamt l->l_pctcpu += (clkhz == 100) ?
935 1.188 yamt ((fixpt_t)l->l_cpticks) <<
936 1.188 yamt (FSHIFT - CCPU_SHIFT) :
937 1.188 yamt 100 * (((fixpt_t) p->p_cpticks)
938 1.188 yamt << (FSHIFT - CCPU_SHIFT)) / clkhz;
939 1.188 yamt #else
940 1.188 yamt l->l_pctcpu += ((FSCALE - ccpu) *
941 1.188 yamt (l->l_cpticks * FSCALE / clkhz)) >> FSHIFT;
942 1.146 matt #endif
943 1.188 yamt l->l_cpticks = 0;
944 1.188 yamt }
945 1.188 yamt }
946 1.188 yamt p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
947 1.188 yamt mutex_spin_exit(&p->p_stmutex);
948 1.174 ad
949 1.188 yamt /*
950 1.188 yamt * Check if the process exceeds its CPU resource allocation.
951 1.188 yamt * If over max, kill it.
952 1.188 yamt */
953 1.188 yamt rlim = &p->p_rlimit[RLIMIT_CPU];
954 1.188 yamt sig = 0;
955 1.188 yamt if (runtm >= rlim->rlim_cur) {
956 1.188 yamt if (runtm >= rlim->rlim_max)
957 1.188 yamt sig = SIGKILL;
958 1.188 yamt else {
959 1.188 yamt sig = SIGXCPU;
960 1.188 yamt if (rlim->rlim_cur < rlim->rlim_max)
961 1.188 yamt rlim->rlim_cur += 5;
962 1.188 yamt }
963 1.188 yamt }
964 1.188 yamt mutex_exit(&p->p_smutex);
965 1.188 yamt if (sig) {
966 1.213 ad mutex_enter(&proclist_mutex);
967 1.188 yamt psignal(p, sig);
968 1.213 ad mutex_exit(&proclist_mutex);
969 1.188 yamt }
970 1.174 ad }
971 1.211 ad mutex_exit(&proclist_lock);
972 1.188 yamt uvm_meter();
973 1.191 ad cv_wakeup(&lbolt);
974 1.188 yamt callout_schedule(&sched_pstats_ch, hz);
975 1.113 gmcgarry }
976 1.190 ad
977 1.190 ad void
978 1.190 ad sched_init(void)
979 1.190 ad {
980 1.190 ad
981 1.208 ad cv_init(&lbolt, "lbolt");
982 1.214 ad callout_init(&sched_pstats_ch, CALLOUT_MPSAFE);
983 1.190 ad callout_setfunc(&sched_pstats_ch, sched_pstats, NULL);
984 1.190 ad sched_setup();
985 1.190 ad sched_pstats(NULL);
986 1.190 ad }
987