kern_synch.c revision 1.187.2.1 1 1.187.2.1 mjf /* $NetBSD: kern_synch.c,v 1.187.2.1 2007/07/11 20:09:56 mjf Exp $ */
2 1.63 thorpej
3 1.63 thorpej /*-
4 1.174 ad * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 1.63 thorpej * All rights reserved.
6 1.63 thorpej *
7 1.63 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.63 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.187.2.1 mjf * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
10 1.187.2.1 mjf * Daniel Sieger.
11 1.63 thorpej *
12 1.63 thorpej * Redistribution and use in source and binary forms, with or without
13 1.63 thorpej * modification, are permitted provided that the following conditions
14 1.63 thorpej * are met:
15 1.63 thorpej * 1. Redistributions of source code must retain the above copyright
16 1.63 thorpej * notice, this list of conditions and the following disclaimer.
17 1.63 thorpej * 2. Redistributions in binary form must reproduce the above copyright
18 1.63 thorpej * notice, this list of conditions and the following disclaimer in the
19 1.63 thorpej * documentation and/or other materials provided with the distribution.
20 1.63 thorpej * 3. All advertising materials mentioning features or use of this software
21 1.63 thorpej * must display the following acknowledgement:
22 1.63 thorpej * This product includes software developed by the NetBSD
23 1.63 thorpej * Foundation, Inc. and its contributors.
24 1.63 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
25 1.63 thorpej * contributors may be used to endorse or promote products derived
26 1.63 thorpej * from this software without specific prior written permission.
27 1.63 thorpej *
28 1.63 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 1.63 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 1.63 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 1.63 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 1.63 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 1.63 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 1.63 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 1.63 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 1.63 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 1.63 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 1.63 thorpej * POSSIBILITY OF SUCH DAMAGE.
39 1.63 thorpej */
40 1.26 cgd
41 1.26 cgd /*-
42 1.26 cgd * Copyright (c) 1982, 1986, 1990, 1991, 1993
43 1.26 cgd * The Regents of the University of California. All rights reserved.
44 1.26 cgd * (c) UNIX System Laboratories, Inc.
45 1.26 cgd * All or some portions of this file are derived from material licensed
46 1.26 cgd * to the University of California by American Telephone and Telegraph
47 1.26 cgd * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 1.26 cgd * the permission of UNIX System Laboratories, Inc.
49 1.26 cgd *
50 1.26 cgd * Redistribution and use in source and binary forms, with or without
51 1.26 cgd * modification, are permitted provided that the following conditions
52 1.26 cgd * are met:
53 1.26 cgd * 1. Redistributions of source code must retain the above copyright
54 1.26 cgd * notice, this list of conditions and the following disclaimer.
55 1.26 cgd * 2. Redistributions in binary form must reproduce the above copyright
56 1.26 cgd * notice, this list of conditions and the following disclaimer in the
57 1.26 cgd * documentation and/or other materials provided with the distribution.
58 1.136 agc * 3. Neither the name of the University nor the names of its contributors
59 1.26 cgd * may be used to endorse or promote products derived from this software
60 1.26 cgd * without specific prior written permission.
61 1.26 cgd *
62 1.26 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 1.26 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 1.26 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 1.26 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 1.26 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 1.26 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 1.26 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 1.26 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 1.26 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 1.26 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 1.26 cgd * SUCH DAMAGE.
73 1.26 cgd *
74 1.50 fvdl * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
75 1.26 cgd */
76 1.106 lukem
77 1.106 lukem #include <sys/cdefs.h>
78 1.187.2.1 mjf __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.187.2.1 2007/07/11 20:09:56 mjf Exp $");
79 1.48 mrg
80 1.109 yamt #include "opt_kstack.h"
81 1.82 thorpej #include "opt_lockdebug.h"
82 1.83 thorpej #include "opt_multiprocessor.h"
83 1.110 briggs #include "opt_perfctrs.h"
84 1.26 cgd
85 1.174 ad #define __MUTEX_PRIVATE
86 1.174 ad
87 1.26 cgd #include <sys/param.h>
88 1.26 cgd #include <sys/systm.h>
89 1.26 cgd #include <sys/proc.h>
90 1.26 cgd #include <sys/kernel.h>
91 1.111 briggs #if defined(PERFCTRS)
92 1.110 briggs #include <sys/pmc.h>
93 1.111 briggs #endif
94 1.187.2.1 mjf #include <sys/cpu.h>
95 1.26 cgd #include <sys/resourcevar.h>
96 1.55 ross #include <sys/sched.h>
97 1.179 dsl #include <sys/syscall_stats.h>
98 1.174 ad #include <sys/sleepq.h>
99 1.174 ad #include <sys/lockdebug.h>
100 1.187.2.1 mjf #include <sys/evcnt.h>
101 1.47 mrg
102 1.47 mrg #include <uvm/uvm_extern.h>
103 1.47 mrg
104 1.187.2.1 mjf callout_t sched_pstats_ch;
105 1.187.2.1 mjf unsigned int sched_pstats_ticks;
106 1.34 christos
107 1.187.2.1 mjf kcondvar_t lbolt; /* once a second sleep address */
108 1.26 cgd
109 1.187.2.1 mjf static void sched_unsleep(struct lwp *);
110 1.187.2.1 mjf static void sched_changepri(struct lwp *, pri_t);
111 1.187.2.1 mjf static void sched_lendpri(struct lwp *, pri_t);
112 1.122 thorpej
113 1.174 ad syncobj_t sleep_syncobj = {
114 1.174 ad SOBJ_SLEEPQ_SORTED,
115 1.174 ad sleepq_unsleep,
116 1.184 yamt sleepq_changepri,
117 1.184 yamt sleepq_lendpri,
118 1.184 yamt syncobj_noowner,
119 1.174 ad };
120 1.174 ad
121 1.174 ad syncobj_t sched_syncobj = {
122 1.174 ad SOBJ_SLEEPQ_SORTED,
123 1.174 ad sched_unsleep,
124 1.184 yamt sched_changepri,
125 1.184 yamt sched_lendpri,
126 1.184 yamt syncobj_noowner,
127 1.174 ad };
128 1.122 thorpej
129 1.26 cgd /*
130 1.174 ad * During autoconfiguration or after a panic, a sleep will simply lower the
131 1.174 ad * priority briefly to allow interrupts, then return. The priority to be
132 1.174 ad * used (safepri) is machine-dependent, thus this value is initialized and
133 1.174 ad * maintained in the machine-dependent layers. This priority will typically
134 1.174 ad * be 0, or the lowest priority that is safe for use on the interrupt stack;
135 1.174 ad * it can be made higher to block network software interrupts after panics.
136 1.26 cgd */
137 1.174 ad int safepri;
138 1.26 cgd
139 1.26 cgd /*
140 1.174 ad * OBSOLETE INTERFACE
141 1.174 ad *
142 1.26 cgd * General sleep call. Suspends the current process until a wakeup is
143 1.26 cgd * performed on the specified identifier. The process will then be made
144 1.174 ad * runnable with the specified priority. Sleeps at most timo/hz seconds (0
145 1.174 ad * means no timeout). If pri includes PCATCH flag, signals are checked
146 1.26 cgd * before and after sleeping, else signals are not checked. Returns 0 if
147 1.26 cgd * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
148 1.26 cgd * signal needs to be delivered, ERESTART is returned if the current system
149 1.26 cgd * call should be restarted if possible, and EINTR is returned if the system
150 1.26 cgd * call should be interrupted by the signal (return EINTR).
151 1.77 thorpej *
152 1.174 ad * The interlock is held until we are on a sleep queue. The interlock will
153 1.174 ad * be locked before returning back to the caller unless the PNORELOCK flag
154 1.174 ad * is specified, in which case the interlock will always be unlocked upon
155 1.174 ad * return.
156 1.26 cgd */
157 1.26 cgd int
158 1.185 yamt ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
159 1.174 ad volatile struct simplelock *interlock)
160 1.26 cgd {
161 1.122 thorpej struct lwp *l = curlwp;
162 1.174 ad sleepq_t *sq;
163 1.187.2.1 mjf int error;
164 1.26 cgd
165 1.174 ad if (sleepq_dontsleep(l)) {
166 1.174 ad (void)sleepq_abort(NULL, 0);
167 1.174 ad if ((priority & PNORELOCK) != 0)
168 1.77 thorpej simple_unlock(interlock);
169 1.174 ad return 0;
170 1.26 cgd }
171 1.78 sommerfe
172 1.174 ad sq = sleeptab_lookup(&sleeptab, ident);
173 1.174 ad sleepq_enter(sq, l);
174 1.187.2.1 mjf sleepq_enqueue(sq, priority & PRIMASK, ident, wmesg, &sleep_syncobj);
175 1.42 cgd
176 1.174 ad if (interlock != NULL) {
177 1.174 ad LOCK_ASSERT(simple_lock_held(interlock));
178 1.174 ad simple_unlock(interlock);
179 1.150 chs }
180 1.150 chs
181 1.187.2.1 mjf error = sleepq_block(timo, priority & PCATCH);
182 1.126 pk
183 1.174 ad if (interlock != NULL && (priority & PNORELOCK) == 0)
184 1.126 pk simple_lock(interlock);
185 1.174 ad
186 1.174 ad return error;
187 1.26 cgd }
188 1.26 cgd
189 1.187 ad int
190 1.187 ad mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
191 1.187 ad kmutex_t *mtx)
192 1.187 ad {
193 1.187 ad struct lwp *l = curlwp;
194 1.187 ad sleepq_t *sq;
195 1.187.2.1 mjf int error;
196 1.187 ad
197 1.187 ad if (sleepq_dontsleep(l)) {
198 1.187 ad (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
199 1.187 ad return 0;
200 1.187 ad }
201 1.187 ad
202 1.187 ad sq = sleeptab_lookup(&sleeptab, ident);
203 1.187 ad sleepq_enter(sq, l);
204 1.187.2.1 mjf sleepq_enqueue(sq, priority & PRIMASK, ident, wmesg, &sleep_syncobj);
205 1.187 ad mutex_exit(mtx);
206 1.187.2.1 mjf error = sleepq_block(timo, priority & PCATCH);
207 1.187 ad
208 1.187 ad if ((priority & PNORELOCK) == 0)
209 1.187 ad mutex_enter(mtx);
210 1.187 ad
211 1.187 ad return error;
212 1.187 ad }
213 1.187 ad
214 1.26 cgd /*
215 1.174 ad * General sleep call for situations where a wake-up is not expected.
216 1.26 cgd */
217 1.174 ad int
218 1.182 thorpej kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
219 1.26 cgd {
220 1.174 ad struct lwp *l = curlwp;
221 1.174 ad sleepq_t *sq;
222 1.174 ad int error;
223 1.26 cgd
224 1.174 ad if (sleepq_dontsleep(l))
225 1.174 ad return sleepq_abort(NULL, 0);
226 1.26 cgd
227 1.174 ad if (mtx != NULL)
228 1.174 ad mutex_exit(mtx);
229 1.174 ad sq = sleeptab_lookup(&sleeptab, l);
230 1.174 ad sleepq_enter(sq, l);
231 1.187.2.1 mjf sleepq_enqueue(sq, sched_kpri(l), l, wmesg, &sleep_syncobj);
232 1.187.2.1 mjf error = sleepq_block(timo, intr);
233 1.174 ad if (mtx != NULL)
234 1.174 ad mutex_enter(mtx);
235 1.83 thorpej
236 1.174 ad return error;
237 1.139 cl }
238 1.139 cl
239 1.26 cgd /*
240 1.174 ad * OBSOLETE INTERFACE
241 1.174 ad *
242 1.26 cgd * Make all processes sleeping on the specified identifier runnable.
243 1.26 cgd */
244 1.26 cgd void
245 1.174 ad wakeup(wchan_t ident)
246 1.26 cgd {
247 1.174 ad sleepq_t *sq;
248 1.83 thorpej
249 1.174 ad if (cold)
250 1.174 ad return;
251 1.83 thorpej
252 1.174 ad sq = sleeptab_lookup(&sleeptab, ident);
253 1.174 ad sleepq_wake(sq, ident, (u_int)-1);
254 1.63 thorpej }
255 1.63 thorpej
256 1.63 thorpej /*
257 1.174 ad * OBSOLETE INTERFACE
258 1.174 ad *
259 1.63 thorpej * Make the highest priority process first in line on the specified
260 1.63 thorpej * identifier runnable.
261 1.63 thorpej */
262 1.174 ad void
263 1.174 ad wakeup_one(wchan_t ident)
264 1.63 thorpej {
265 1.174 ad sleepq_t *sq;
266 1.63 thorpej
267 1.174 ad if (cold)
268 1.174 ad return;
269 1.187.2.1 mjf
270 1.174 ad sq = sleeptab_lookup(&sleeptab, ident);
271 1.174 ad sleepq_wake(sq, ident, 1);
272 1.174 ad }
273 1.63 thorpej
274 1.117 gmcgarry
275 1.117 gmcgarry /*
276 1.117 gmcgarry * General yield call. Puts the current process back on its run queue and
277 1.117 gmcgarry * performs a voluntary context switch. Should only be called when the
278 1.117 gmcgarry * current process explicitly requests it (eg sched_yield(2) in compat code).
279 1.117 gmcgarry */
280 1.117 gmcgarry void
281 1.117 gmcgarry yield(void)
282 1.117 gmcgarry {
283 1.122 thorpej struct lwp *l = curlwp;
284 1.117 gmcgarry
285 1.174 ad KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
286 1.174 ad lwp_lock(l);
287 1.187.2.1 mjf KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
288 1.187.2.1 mjf KASSERT(l->l_stat == LSONPROC);
289 1.187.2.1 mjf l->l_priority = l->l_usrpri;
290 1.187.2.1 mjf (void)mi_switch(l);
291 1.174 ad KERNEL_LOCK(l->l_biglocks, l);
292 1.69 thorpej }
293 1.69 thorpej
294 1.69 thorpej /*
295 1.69 thorpej * General preemption call. Puts the current process back on its run queue
296 1.156 rpaulo * and performs an involuntary context switch.
297 1.69 thorpej */
298 1.69 thorpej void
299 1.174 ad preempt(void)
300 1.69 thorpej {
301 1.122 thorpej struct lwp *l = curlwp;
302 1.69 thorpej
303 1.174 ad KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
304 1.174 ad lwp_lock(l);
305 1.187.2.1 mjf KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
306 1.187.2.1 mjf KASSERT(l->l_stat == LSONPROC);
307 1.187.2.1 mjf l->l_priority = l->l_usrpri;
308 1.174 ad l->l_nivcsw++;
309 1.187.2.1 mjf (void)mi_switch(l);
310 1.174 ad KERNEL_LOCK(l->l_biglocks, l);
311 1.69 thorpej }
312 1.69 thorpej
313 1.69 thorpej /*
314 1.187.2.1 mjf * Compute the amount of time during which the current lwp was running.
315 1.187.2.1 mjf *
316 1.187.2.1 mjf * - update l_rtime unless it's an idle lwp.
317 1.187.2.1 mjf * - update spc_runtime for the next lwp.
318 1.187.2.1 mjf */
319 1.187.2.1 mjf
320 1.187.2.1 mjf static inline void
321 1.187.2.1 mjf updatertime(struct lwp *l, struct schedstate_percpu *spc)
322 1.187.2.1 mjf {
323 1.187.2.1 mjf struct timeval tv;
324 1.187.2.1 mjf long s, u;
325 1.187.2.1 mjf
326 1.187.2.1 mjf if ((l->l_flag & LW_IDLE) != 0) {
327 1.187.2.1 mjf microtime(&spc->spc_runtime);
328 1.187.2.1 mjf return;
329 1.187.2.1 mjf }
330 1.187.2.1 mjf
331 1.187.2.1 mjf microtime(&tv);
332 1.187.2.1 mjf u = l->l_rtime.tv_usec + (tv.tv_usec - spc->spc_runtime.tv_usec);
333 1.187.2.1 mjf s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
334 1.187.2.1 mjf if (u < 0) {
335 1.187.2.1 mjf u += 1000000;
336 1.187.2.1 mjf s--;
337 1.187.2.1 mjf } else if (u >= 1000000) {
338 1.187.2.1 mjf u -= 1000000;
339 1.187.2.1 mjf s++;
340 1.187.2.1 mjf }
341 1.187.2.1 mjf l->l_rtime.tv_usec = u;
342 1.187.2.1 mjf l->l_rtime.tv_sec = s;
343 1.187.2.1 mjf
344 1.187.2.1 mjf spc->spc_runtime = tv;
345 1.187.2.1 mjf }
346 1.187.2.1 mjf
347 1.187.2.1 mjf /*
348 1.187.2.1 mjf * The machine independent parts of context switch.
349 1.130 nathanw *
350 1.187.2.1 mjf * Returns 1 if another LWP was actually run.
351 1.26 cgd */
352 1.122 thorpej int
353 1.187.2.1 mjf mi_switch(struct lwp *l)
354 1.26 cgd {
355 1.76 thorpej struct schedstate_percpu *spc;
356 1.187.2.1 mjf struct lwp *newl;
357 1.174 ad int retval, oldspl;
358 1.26 cgd
359 1.187.2.1 mjf KASSERT(lwp_locked(l, NULL));
360 1.187.2.1 mjf LOCKDEBUG_BARRIER(l->l_mutex, 1);
361 1.174 ad
362 1.174 ad #ifdef KSTACK_CHECK_MAGIC
363 1.174 ad kstack_check_magic(l);
364 1.174 ad #endif
365 1.83 thorpej
366 1.90 sommerfe /*
367 1.174 ad * It's safe to read the per CPU schedstate unlocked here, as all we
368 1.174 ad * are after is the run time and that's guarenteed to have been last
369 1.174 ad * updated by this CPU.
370 1.90 sommerfe */
371 1.122 thorpej KDASSERT(l->l_cpu == curcpu());
372 1.76 thorpej
373 1.26 cgd /*
374 1.187.2.1 mjf * Process is about to yield the CPU; clear the appropriate
375 1.187.2.1 mjf * scheduling flags.
376 1.26 cgd */
377 1.187.2.1 mjf spc = &l->l_cpu->ci_schedstate;
378 1.187.2.1 mjf newl = NULL;
379 1.187.2.1 mjf
380 1.187.2.1 mjf if (l->l_switchto != NULL) {
381 1.187.2.1 mjf newl = l->l_switchto;
382 1.187.2.1 mjf l->l_switchto = NULL;
383 1.26 cgd }
384 1.26 cgd
385 1.180 dsl /* Count time spent in current system call */
386 1.180 dsl SYSCALL_TIME_SLEEP(l);
387 1.180 dsl
388 1.26 cgd /*
389 1.187.2.1 mjf * XXXSMP If we are using h/w performance counters,
390 1.187.2.1 mjf * save context.
391 1.69 thorpej */
392 1.174 ad #if PERFCTRS
393 1.175 christos if (PMC_ENABLED(l->l_proc)) {
394 1.175 christos pmc_save_context(l->l_proc);
395 1.174 ad }
396 1.109 yamt #endif
397 1.187.2.1 mjf spc->spc_flags &= ~SPCF_SWITCHCLEAR;
398 1.187.2.1 mjf updatertime(l, spc);
399 1.113 gmcgarry
400 1.113 gmcgarry /*
401 1.174 ad * If on the CPU and we have gotten this far, then we must yield.
402 1.113 gmcgarry */
403 1.187.2.1 mjf mutex_spin_enter(spc->spc_mutex);
404 1.174 ad KASSERT(l->l_stat != LSRUN);
405 1.174 ad if (l->l_stat == LSONPROC) {
406 1.187.2.1 mjf KASSERT(lwp_locked(l, &spc->spc_lwplock));
407 1.187.2.1 mjf if ((l->l_flag & LW_IDLE) == 0) {
408 1.187.2.1 mjf l->l_stat = LSRUN;
409 1.187.2.1 mjf lwp_setlock(l, spc->spc_mutex);
410 1.187.2.1 mjf sched_enqueue(l, true);
411 1.187.2.1 mjf } else
412 1.187.2.1 mjf l->l_stat = LSIDL;
413 1.174 ad }
414 1.174 ad
415 1.174 ad /*
416 1.187.2.1 mjf * Let sched_nextlwp() select the LWP to run the CPU next.
417 1.187.2.1 mjf * If no LWP is runnable, switch to the idle LWP.
418 1.174 ad */
419 1.187.2.1 mjf if (newl == NULL) {
420 1.187.2.1 mjf newl = sched_nextlwp();
421 1.187.2.1 mjf if (newl != NULL) {
422 1.187.2.1 mjf sched_dequeue(newl);
423 1.187.2.1 mjf KASSERT(lwp_locked(newl, spc->spc_mutex));
424 1.187.2.1 mjf newl->l_stat = LSONPROC;
425 1.187.2.1 mjf newl->l_cpu = l->l_cpu;
426 1.187.2.1 mjf newl->l_flag |= LW_RUNNING;
427 1.187.2.1 mjf lwp_setlock(newl, &spc->spc_lwplock);
428 1.187.2.1 mjf } else {
429 1.187.2.1 mjf newl = l->l_cpu->ci_data.cpu_idlelwp;
430 1.187.2.1 mjf newl->l_stat = LSONPROC;
431 1.187.2.1 mjf newl->l_flag |= LW_RUNNING;
432 1.187.2.1 mjf }
433 1.187.2.1 mjf spc->spc_curpriority = newl->l_usrpri;
434 1.187.2.1 mjf newl->l_priority = newl->l_usrpri;
435 1.187.2.1 mjf cpu_did_resched();
436 1.187.2.1 mjf }
437 1.174 ad
438 1.187.2.1 mjf if (l != newl) {
439 1.187.2.1 mjf struct lwp *prevlwp;
440 1.174 ad
441 1.187.2.1 mjf /*
442 1.187.2.1 mjf * If the old LWP has been moved to a run queue above,
443 1.187.2.1 mjf * drop the general purpose LWP lock: it's now locked
444 1.187.2.1 mjf * by the scheduler lock.
445 1.187.2.1 mjf *
446 1.187.2.1 mjf * Otherwise, drop the scheduler lock. We're done with
447 1.187.2.1 mjf * the run queues for now.
448 1.187.2.1 mjf */
449 1.187.2.1 mjf if (l->l_mutex == spc->spc_mutex) {
450 1.187.2.1 mjf mutex_spin_exit(&spc->spc_lwplock);
451 1.187.2.1 mjf } else {
452 1.187.2.1 mjf mutex_spin_exit(spc->spc_mutex);
453 1.187.2.1 mjf }
454 1.187.2.1 mjf
455 1.187.2.1 mjf /* Unlocked, but for statistics only. */
456 1.187.2.1 mjf uvmexp.swtch++;
457 1.187.2.1 mjf
458 1.187.2.1 mjf /* Save old VM context. */
459 1.187.2.1 mjf pmap_deactivate(l);
460 1.187.2.1 mjf
461 1.187.2.1 mjf /* Switch to the new LWP.. */
462 1.187.2.1 mjf l->l_ncsw++;
463 1.187.2.1 mjf l->l_flag &= ~LW_RUNNING;
464 1.187.2.1 mjf oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
465 1.187.2.1 mjf prevlwp = cpu_switchto(l, newl);
466 1.187.2.1 mjf
467 1.187.2.1 mjf /*
468 1.187.2.1 mjf * .. we have switched away and are now back so we must
469 1.187.2.1 mjf * be the new curlwp. prevlwp is who we replaced.
470 1.187.2.1 mjf */
471 1.187.2.1 mjf curlwp = l;
472 1.187.2.1 mjf if (prevlwp != NULL) {
473 1.187.2.1 mjf curcpu()->ci_mtx_oldspl = oldspl;
474 1.187.2.1 mjf lwp_unlock(prevlwp);
475 1.187.2.1 mjf } else {
476 1.187.2.1 mjf splx(oldspl);
477 1.187.2.1 mjf }
478 1.174 ad
479 1.187.2.1 mjf /* Restore VM context. */
480 1.187.2.1 mjf pmap_activate(l);
481 1.187.2.1 mjf retval = 1;
482 1.187.2.1 mjf } else {
483 1.187.2.1 mjf /* Nothing to do - just unlock and return. */
484 1.187.2.1 mjf mutex_spin_exit(spc->spc_mutex);
485 1.187.2.1 mjf lwp_unlock(l);
486 1.122 thorpej retval = 0;
487 1.122 thorpej }
488 1.110 briggs
489 1.187.2.1 mjf KASSERT(l == curlwp);
490 1.187.2.1 mjf KASSERT(l->l_stat == LSONPROC);
491 1.187.2.1 mjf
492 1.110 briggs /*
493 1.174 ad * XXXSMP If we are using h/w performance counters, restore context.
494 1.26 cgd */
495 1.114 gmcgarry #if PERFCTRS
496 1.175 christos if (PMC_ENABLED(l->l_proc)) {
497 1.175 christos pmc_restore_context(l->l_proc);
498 1.166 christos }
499 1.114 gmcgarry #endif
500 1.110 briggs
501 1.110 briggs /*
502 1.76 thorpej * We're running again; record our new start time. We might
503 1.174 ad * be running on a new CPU now, so don't use the cached
504 1.76 thorpej * schedstate_percpu pointer.
505 1.76 thorpej */
506 1.180 dsl SYSCALL_TIME_WAKEUP(l);
507 1.122 thorpej KDASSERT(l->l_cpu == curcpu());
508 1.187.2.1 mjf LOCKDEBUG_BARRIER(NULL, 1);
509 1.169 yamt
510 1.122 thorpej return retval;
511 1.26 cgd }
512 1.26 cgd
513 1.26 cgd /*
514 1.174 ad * Change process state to be runnable, placing it on the run queue if it is
515 1.174 ad * in memory, and awakening the swapper if it isn't in memory.
516 1.174 ad *
517 1.174 ad * Call with the process and LWP locked. Will return with the LWP unlocked.
518 1.26 cgd */
519 1.26 cgd void
520 1.122 thorpej setrunnable(struct lwp *l)
521 1.26 cgd {
522 1.122 thorpej struct proc *p = l->l_proc;
523 1.174 ad sigset_t *ss;
524 1.26 cgd
525 1.187.2.1 mjf KASSERT((l->l_flag & LW_IDLE) == 0);
526 1.183 ad KASSERT(mutex_owned(&p->p_smutex));
527 1.183 ad KASSERT(lwp_locked(l, NULL));
528 1.83 thorpej
529 1.122 thorpej switch (l->l_stat) {
530 1.122 thorpej case LSSTOP:
531 1.33 mycroft /*
532 1.33 mycroft * If we're being traced (possibly because someone attached us
533 1.33 mycroft * while we were stopped), check for a signal from the debugger.
534 1.33 mycroft */
535 1.174 ad if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
536 1.174 ad if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
537 1.174 ad ss = &l->l_sigpend.sp_set;
538 1.174 ad else
539 1.174 ad ss = &p->p_sigpend.sp_set;
540 1.174 ad sigaddset(ss, p->p_xstat);
541 1.174 ad signotify(l);
542 1.53 mycroft }
543 1.174 ad p->p_nrlwps++;
544 1.26 cgd break;
545 1.174 ad case LSSUSPENDED:
546 1.178 pavel l->l_flag &= ~LW_WSUSPEND;
547 1.174 ad p->p_nrlwps++;
548 1.122 thorpej break;
549 1.174 ad case LSSLEEP:
550 1.174 ad KASSERT(l->l_wchan != NULL);
551 1.26 cgd break;
552 1.174 ad default:
553 1.174 ad panic("setrunnable: lwp %p state was %d", l, l->l_stat);
554 1.26 cgd }
555 1.139 cl
556 1.174 ad /*
557 1.174 ad * If the LWP was sleeping interruptably, then it's OK to start it
558 1.174 ad * again. If not, mark it as still sleeping.
559 1.174 ad */
560 1.174 ad if (l->l_wchan != NULL) {
561 1.174 ad l->l_stat = LSSLEEP;
562 1.183 ad /* lwp_unsleep() will release the lock. */
563 1.183 ad lwp_unsleep(l);
564 1.174 ad return;
565 1.174 ad }
566 1.139 cl
567 1.174 ad /*
568 1.174 ad * If the LWP is still on the CPU, mark it as LSONPROC. It may be
569 1.174 ad * about to call mi_switch(), in which case it will yield.
570 1.174 ad */
571 1.187.2.1 mjf if ((l->l_flag & LW_RUNNING) != 0) {
572 1.174 ad l->l_stat = LSONPROC;
573 1.174 ad l->l_slptime = 0;
574 1.174 ad lwp_unlock(l);
575 1.174 ad return;
576 1.174 ad }
577 1.122 thorpej
578 1.174 ad /*
579 1.174 ad * Set the LWP runnable. If it's swapped out, we need to wake the swapper
580 1.174 ad * to bring it back in. Otherwise, enter it into a run queue.
581 1.174 ad */
582 1.187.2.1 mjf if (l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex) {
583 1.187.2.1 mjf spc_lock(l->l_cpu);
584 1.187.2.1 mjf lwp_unlock_to(l, l->l_cpu->ci_schedstate.spc_mutex);
585 1.187.2.1 mjf }
586 1.187.2.1 mjf
587 1.187.2.1 mjf sched_setrunnable(l);
588 1.174 ad l->l_stat = LSRUN;
589 1.122 thorpej l->l_slptime = 0;
590 1.174 ad
591 1.178 pavel if (l->l_flag & LW_INMEM) {
592 1.187.2.1 mjf sched_enqueue(l, false);
593 1.187.2.1 mjf resched_cpu(l);
594 1.174 ad lwp_unlock(l);
595 1.174 ad } else {
596 1.174 ad lwp_unlock(l);
597 1.177 ad uvm_kick_scheduler();
598 1.174 ad }
599 1.26 cgd }
600 1.26 cgd
601 1.26 cgd /*
602 1.174 ad * suspendsched:
603 1.174 ad *
604 1.174 ad * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
605 1.174 ad */
606 1.94 bouyer void
607 1.174 ad suspendsched(void)
608 1.94 bouyer {
609 1.174 ad #ifdef MULTIPROCESSOR
610 1.174 ad CPU_INFO_ITERATOR cii;
611 1.174 ad struct cpu_info *ci;
612 1.174 ad #endif
613 1.122 thorpej struct lwp *l;
614 1.174 ad struct proc *p;
615 1.94 bouyer
616 1.94 bouyer /*
617 1.174 ad * We do this by process in order not to violate the locking rules.
618 1.94 bouyer */
619 1.174 ad mutex_enter(&proclist_mutex);
620 1.174 ad PROCLIST_FOREACH(p, &allproc) {
621 1.174 ad mutex_enter(&p->p_smutex);
622 1.174 ad
623 1.178 pavel if ((p->p_flag & PK_SYSTEM) != 0) {
624 1.174 ad mutex_exit(&p->p_smutex);
625 1.94 bouyer continue;
626 1.174 ad }
627 1.174 ad
628 1.174 ad p->p_stat = SSTOP;
629 1.174 ad
630 1.174 ad LIST_FOREACH(l, &p->p_lwps, l_sibling) {
631 1.174 ad if (l == curlwp)
632 1.174 ad continue;
633 1.174 ad
634 1.174 ad lwp_lock(l);
635 1.122 thorpej
636 1.97 enami /*
637 1.174 ad * Set L_WREBOOT so that the LWP will suspend itself
638 1.174 ad * when it tries to return to user mode. We want to
639 1.174 ad * try and get to get as many LWPs as possible to
640 1.174 ad * the user / kernel boundary, so that they will
641 1.174 ad * release any locks that they hold.
642 1.97 enami */
643 1.178 pavel l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
644 1.174 ad
645 1.174 ad if (l->l_stat == LSSLEEP &&
646 1.178 pavel (l->l_flag & LW_SINTR) != 0) {
647 1.174 ad /* setrunnable() will release the lock. */
648 1.174 ad setrunnable(l);
649 1.174 ad continue;
650 1.174 ad }
651 1.174 ad
652 1.174 ad lwp_unlock(l);
653 1.94 bouyer }
654 1.174 ad
655 1.174 ad mutex_exit(&p->p_smutex);
656 1.94 bouyer }
657 1.174 ad mutex_exit(&proclist_mutex);
658 1.174 ad
659 1.174 ad /*
660 1.174 ad * Kick all CPUs to make them preempt any LWPs running in user mode.
661 1.174 ad * They'll trap into the kernel and suspend themselves in userret().
662 1.174 ad */
663 1.174 ad #ifdef MULTIPROCESSOR
664 1.174 ad for (CPU_INFO_FOREACH(cii, ci))
665 1.187.2.1 mjf cpu_need_resched(ci, 0);
666 1.174 ad #else
667 1.187.2.1 mjf cpu_need_resched(curcpu(), 0);
668 1.174 ad #endif
669 1.174 ad }
670 1.174 ad
671 1.174 ad /*
672 1.174 ad * sched_kpri:
673 1.174 ad *
674 1.174 ad * Scale a priority level to a kernel priority level, usually
675 1.174 ad * for an LWP that is about to sleep.
676 1.174 ad */
677 1.185 yamt pri_t
678 1.174 ad sched_kpri(struct lwp *l)
679 1.174 ad {
680 1.174 ad /*
681 1.174 ad * Scale user priorities (127 -> 50) up to kernel priorities
682 1.174 ad * in the range (49 -> 8). Reserve the top 8 kernel priorities
683 1.174 ad * for high priority kthreads. Kernel priorities passed in
684 1.174 ad * are left "as is". XXX This is somewhat arbitrary.
685 1.174 ad */
686 1.174 ad static const uint8_t kpri_tab[] = {
687 1.174 ad 0, 1, 2, 3, 4, 5, 6, 7,
688 1.174 ad 8, 9, 10, 11, 12, 13, 14, 15,
689 1.174 ad 16, 17, 18, 19, 20, 21, 22, 23,
690 1.174 ad 24, 25, 26, 27, 28, 29, 30, 31,
691 1.174 ad 32, 33, 34, 35, 36, 37, 38, 39,
692 1.174 ad 40, 41, 42, 43, 44, 45, 46, 47,
693 1.174 ad 48, 49, 8, 8, 9, 9, 10, 10,
694 1.174 ad 11, 11, 12, 12, 13, 14, 14, 15,
695 1.174 ad 15, 16, 16, 17, 17, 18, 18, 19,
696 1.174 ad 20, 20, 21, 21, 22, 22, 23, 23,
697 1.174 ad 24, 24, 25, 26, 26, 27, 27, 28,
698 1.174 ad 28, 29, 29, 30, 30, 31, 32, 32,
699 1.174 ad 33, 33, 34, 34, 35, 35, 36, 36,
700 1.174 ad 37, 38, 38, 39, 39, 40, 40, 41,
701 1.174 ad 41, 42, 42, 43, 44, 44, 45, 45,
702 1.174 ad 46, 46, 47, 47, 48, 48, 49, 49,
703 1.174 ad };
704 1.174 ad
705 1.185 yamt return (pri_t)kpri_tab[l->l_usrpri];
706 1.174 ad }
707 1.174 ad
708 1.174 ad /*
709 1.174 ad * sched_unsleep:
710 1.174 ad *
711 1.174 ad * The is called when the LWP has not been awoken normally but instead
712 1.174 ad * interrupted: for example, if the sleep timed out. Because of this,
713 1.174 ad * it's not a valid action for running or idle LWPs.
714 1.174 ad */
715 1.187.2.1 mjf static void
716 1.174 ad sched_unsleep(struct lwp *l)
717 1.174 ad {
718 1.174 ad
719 1.174 ad lwp_unlock(l);
720 1.174 ad panic("sched_unsleep");
721 1.174 ad }
722 1.174 ad
723 1.187.2.1 mjf inline void
724 1.187.2.1 mjf resched_cpu(struct lwp *l)
725 1.187.2.1 mjf {
726 1.187.2.1 mjf struct cpu_info *ci;
727 1.187.2.1 mjf const pri_t pri = lwp_eprio(l);
728 1.187.2.1 mjf
729 1.187.2.1 mjf /*
730 1.187.2.1 mjf * XXXSMP
731 1.187.2.1 mjf * Since l->l_cpu persists across a context switch,
732 1.187.2.1 mjf * this gives us *very weak* processor affinity, in
733 1.187.2.1 mjf * that we notify the CPU on which the process last
734 1.187.2.1 mjf * ran that it should try to switch.
735 1.187.2.1 mjf *
736 1.187.2.1 mjf * This does not guarantee that the process will run on
737 1.187.2.1 mjf * that processor next, because another processor might
738 1.187.2.1 mjf * grab it the next time it performs a context switch.
739 1.187.2.1 mjf *
740 1.187.2.1 mjf * This also does not handle the case where its last
741 1.187.2.1 mjf * CPU is running a higher-priority process, but every
742 1.187.2.1 mjf * other CPU is running a lower-priority process. There
743 1.187.2.1 mjf * are ways to handle this situation, but they're not
744 1.187.2.1 mjf * currently very pretty, and we also need to weigh the
745 1.187.2.1 mjf * cost of moving a process from one CPU to another.
746 1.187.2.1 mjf */
747 1.187.2.1 mjf ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
748 1.187.2.1 mjf if (pri < ci->ci_schedstate.spc_curpriority)
749 1.187.2.1 mjf cpu_need_resched(ci, 0);
750 1.187.2.1 mjf }
751 1.187.2.1 mjf
752 1.187.2.1 mjf static void
753 1.185 yamt sched_changepri(struct lwp *l, pri_t pri)
754 1.174 ad {
755 1.174 ad
756 1.187.2.1 mjf KASSERT(lwp_locked(l, NULL));
757 1.174 ad
758 1.174 ad l->l_usrpri = pri;
759 1.174 ad if (l->l_priority < PUSER)
760 1.174 ad return;
761 1.184 yamt
762 1.184 yamt if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
763 1.174 ad l->l_priority = pri;
764 1.174 ad return;
765 1.157 yamt }
766 1.174 ad
767 1.187.2.1 mjf KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
768 1.187.2.1 mjf
769 1.187.2.1 mjf sched_dequeue(l);
770 1.174 ad l->l_priority = pri;
771 1.187.2.1 mjf sched_enqueue(l, false);
772 1.187.2.1 mjf resched_cpu(l);
773 1.184 yamt }
774 1.184 yamt
775 1.187.2.1 mjf static void
776 1.185 yamt sched_lendpri(struct lwp *l, pri_t pri)
777 1.184 yamt {
778 1.184 yamt
779 1.187.2.1 mjf KASSERT(lwp_locked(l, NULL));
780 1.184 yamt
781 1.184 yamt if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
782 1.184 yamt l->l_inheritedprio = pri;
783 1.184 yamt return;
784 1.184 yamt }
785 1.184 yamt
786 1.187.2.1 mjf KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
787 1.187.2.1 mjf
788 1.187.2.1 mjf sched_dequeue(l);
789 1.184 yamt l->l_inheritedprio = pri;
790 1.187.2.1 mjf sched_enqueue(l, false);
791 1.187.2.1 mjf resched_cpu(l);
792 1.184 yamt }
793 1.184 yamt
794 1.184 yamt struct lwp *
795 1.184 yamt syncobj_noowner(wchan_t wchan)
796 1.184 yamt {
797 1.184 yamt
798 1.184 yamt return NULL;
799 1.151 yamt }
800 1.151 yamt
801 1.113 gmcgarry
802 1.187.2.1 mjf /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
803 1.187.2.1 mjf fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
804 1.115 nisimura
805 1.130 nathanw /*
806 1.187.2.1 mjf * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
807 1.187.2.1 mjf * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
808 1.187.2.1 mjf * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
809 1.187.2.1 mjf *
810 1.187.2.1 mjf * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
811 1.187.2.1 mjf * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
812 1.187.2.1 mjf *
813 1.187.2.1 mjf * If you dont want to bother with the faster/more-accurate formula, you
814 1.187.2.1 mjf * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
815 1.187.2.1 mjf * (more general) method of calculating the %age of CPU used by a process.
816 1.134 matt */
817 1.187.2.1 mjf #define CCPU_SHIFT (FSHIFT + 1)
818 1.134 matt
819 1.134 matt /*
820 1.187.2.1 mjf * sched_pstats:
821 1.187.2.1 mjf *
822 1.187.2.1 mjf * Update process statistics and check CPU resource allocation.
823 1.187.2.1 mjf * Call scheduler-specific hook to eventually adjust process/LWP
824 1.187.2.1 mjf * priorities.
825 1.187.2.1 mjf *
826 1.187.2.1 mjf * XXXSMP This needs to be reorganised in order to reduce the locking
827 1.187.2.1 mjf * burden.
828 1.130 nathanw */
829 1.187.2.1 mjf /* ARGSUSED */
830 1.113 gmcgarry void
831 1.187.2.1 mjf sched_pstats(void *arg)
832 1.113 gmcgarry {
833 1.187.2.1 mjf struct rlimit *rlim;
834 1.187.2.1 mjf struct lwp *l;
835 1.187.2.1 mjf struct proc *p;
836 1.187.2.1 mjf int minslp, sig, clkhz;
837 1.187.2.1 mjf long runtm;
838 1.113 gmcgarry
839 1.187.2.1 mjf sched_pstats_ticks++;
840 1.174 ad
841 1.187.2.1 mjf mutex_enter(&proclist_mutex);
842 1.187.2.1 mjf PROCLIST_FOREACH(p, &allproc) {
843 1.187.2.1 mjf /*
844 1.187.2.1 mjf * Increment time in/out of memory and sleep time (if
845 1.187.2.1 mjf * sleeping). We ignore overflow; with 16-bit int's
846 1.187.2.1 mjf * (remember them?) overflow takes 45 days.
847 1.187.2.1 mjf */
848 1.187.2.1 mjf minslp = 2;
849 1.187.2.1 mjf mutex_enter(&p->p_smutex);
850 1.187.2.1 mjf mutex_spin_enter(&p->p_stmutex);
851 1.187.2.1 mjf runtm = p->p_rtime.tv_sec;
852 1.187.2.1 mjf LIST_FOREACH(l, &p->p_lwps, l_sibling) {
853 1.187.2.1 mjf if ((l->l_flag & LW_IDLE) != 0)
854 1.187.2.1 mjf continue;
855 1.187.2.1 mjf lwp_lock(l);
856 1.187.2.1 mjf runtm += l->l_rtime.tv_sec;
857 1.187.2.1 mjf l->l_swtime++;
858 1.187.2.1 mjf if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
859 1.187.2.1 mjf l->l_stat == LSSUSPENDED) {
860 1.187.2.1 mjf l->l_slptime++;
861 1.187.2.1 mjf minslp = min(minslp, l->l_slptime);
862 1.187.2.1 mjf } else
863 1.187.2.1 mjf minslp = 0;
864 1.187.2.1 mjf lwp_unlock(l);
865 1.187.2.1 mjf
866 1.187.2.1 mjf /*
867 1.187.2.1 mjf * p_pctcpu is only for ps.
868 1.187.2.1 mjf */
869 1.187.2.1 mjf l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
870 1.187.2.1 mjf if (l->l_slptime < 1) {
871 1.187.2.1 mjf clkhz = stathz != 0 ? stathz : hz;
872 1.187.2.1 mjf #if (FSHIFT >= CCPU_SHIFT)
873 1.187.2.1 mjf l->l_pctcpu += (clkhz == 100) ?
874 1.187.2.1 mjf ((fixpt_t)l->l_cpticks) <<
875 1.187.2.1 mjf (FSHIFT - CCPU_SHIFT) :
876 1.187.2.1 mjf 100 * (((fixpt_t) p->p_cpticks)
877 1.187.2.1 mjf << (FSHIFT - CCPU_SHIFT)) / clkhz;
878 1.187.2.1 mjf #else
879 1.187.2.1 mjf l->l_pctcpu += ((FSCALE - ccpu) *
880 1.187.2.1 mjf (l->l_cpticks * FSCALE / clkhz)) >> FSHIFT;
881 1.146 matt #endif
882 1.187.2.1 mjf l->l_cpticks = 0;
883 1.187.2.1 mjf }
884 1.187.2.1 mjf }
885 1.187.2.1 mjf p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
886 1.187.2.1 mjf sched_pstats_hook(p, minslp);
887 1.187.2.1 mjf mutex_spin_exit(&p->p_stmutex);
888 1.187.2.1 mjf
889 1.187.2.1 mjf /*
890 1.187.2.1 mjf * Check if the process exceeds its CPU resource allocation.
891 1.187.2.1 mjf * If over max, kill it.
892 1.187.2.1 mjf */
893 1.187.2.1 mjf rlim = &p->p_rlimit[RLIMIT_CPU];
894 1.187.2.1 mjf sig = 0;
895 1.187.2.1 mjf if (runtm >= rlim->rlim_cur) {
896 1.187.2.1 mjf if (runtm >= rlim->rlim_max)
897 1.187.2.1 mjf sig = SIGKILL;
898 1.187.2.1 mjf else {
899 1.187.2.1 mjf sig = SIGXCPU;
900 1.187.2.1 mjf if (rlim->rlim_cur < rlim->rlim_max)
901 1.187.2.1 mjf rlim->rlim_cur += 5;
902 1.187.2.1 mjf }
903 1.187.2.1 mjf }
904 1.187.2.1 mjf mutex_exit(&p->p_smutex);
905 1.187.2.1 mjf if (sig) {
906 1.187.2.1 mjf psignal(p, sig);
907 1.187.2.1 mjf }
908 1.187.2.1 mjf }
909 1.187.2.1 mjf mutex_exit(&proclist_mutex);
910 1.187.2.1 mjf uvm_meter();
911 1.187.2.1 mjf cv_broadcast(&lbolt);
912 1.187.2.1 mjf callout_schedule(&sched_pstats_ch, hz);
913 1.113 gmcgarry }
914 1.113 gmcgarry
915 1.113 gmcgarry void
916 1.187.2.1 mjf sched_init(void)
917 1.113 gmcgarry {
918 1.174 ad
919 1.187.2.1 mjf cv_init(&lbolt, "lbolt");
920 1.187.2.1 mjf callout_init(&sched_pstats_ch, 0);
921 1.187.2.1 mjf callout_setfunc(&sched_pstats_ch, sched_pstats, NULL);
922 1.187.2.1 mjf sched_setup();
923 1.187.2.1 mjf sched_pstats(NULL);
924 1.113 gmcgarry }
925