kern_synch.c revision 1.186.2.8 1 /* $NetBSD: kern_synch.c,v 1.186.2.8 2007/06/17 21:31:28 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
10 * Daniel Sieger.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /*-
42 * Copyright (c) 1982, 1986, 1990, 1991, 1993
43 * The Regents of the University of California. All rights reserved.
44 * (c) UNIX System Laboratories, Inc.
45 * All or some portions of this file are derived from material licensed
46 * to the University of California by American Telephone and Telegraph
47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 * the permission of UNIX System Laboratories, Inc.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.186.2.8 2007/06/17 21:31:28 ad Exp $");
79
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #define __MUTEX_PRIVATE
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/proc.h>
90 #include <sys/kernel.h>
91 #if defined(PERFCTRS)
92 #include <sys/pmc.h>
93 #endif
94 #include <sys/cpu.h>
95 #include <sys/resourcevar.h>
96 #include <sys/sched.h>
97 #include <sys/syscall_stats.h>
98 #include <sys/sleepq.h>
99 #include <sys/lockdebug.h>
100
101 #include <uvm/uvm_extern.h>
102
103 struct callout sched_pstats_ch = CALLOUT_INITIALIZER_SETFUNC(sched_pstats, NULL);
104 unsigned int sched_pstats_ticks;
105
106 kcondvar_t lbolt; /* once a second sleep address */
107
108 static void sched_unsleep(struct lwp *);
109 static void sched_changepri(struct lwp *, pri_t);
110 static void sched_lendpri(struct lwp *, pri_t);
111
112 syncobj_t sleep_syncobj = {
113 SOBJ_SLEEPQ_SORTED,
114 sleepq_unsleep,
115 sleepq_changepri,
116 sleepq_lendpri,
117 syncobj_noowner,
118 };
119
120 syncobj_t sched_syncobj = {
121 SOBJ_SLEEPQ_SORTED,
122 sched_unsleep,
123 sched_changepri,
124 sched_lendpri,
125 syncobj_noowner,
126 };
127
128 /*
129 * During autoconfiguration or after a panic, a sleep will simply lower the
130 * priority briefly to allow interrupts, then return. The priority to be
131 * used (safepri) is machine-dependent, thus this value is initialized and
132 * maintained in the machine-dependent layers. This priority will typically
133 * be 0, or the lowest priority that is safe for use on the interrupt stack;
134 * it can be made higher to block network software interrupts after panics.
135 */
136 int safepri;
137
138 /*
139 * OBSOLETE INTERFACE
140 *
141 * General sleep call. Suspends the current process until a wakeup is
142 * performed on the specified identifier. The process will then be made
143 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
144 * means no timeout). If pri includes PCATCH flag, signals are checked
145 * before and after sleeping, else signals are not checked. Returns 0 if
146 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
147 * signal needs to be delivered, ERESTART is returned if the current system
148 * call should be restarted if possible, and EINTR is returned if the system
149 * call should be interrupted by the signal (return EINTR).
150 *
151 * The interlock is held until we are on a sleep queue. The interlock will
152 * be locked before returning back to the caller unless the PNORELOCK flag
153 * is specified, in which case the interlock will always be unlocked upon
154 * return.
155 */
156 int
157 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
158 volatile struct simplelock *interlock)
159 {
160 struct lwp *l = curlwp;
161 sleepq_t *sq;
162 int error;
163
164 if (sleepq_dontsleep(l)) {
165 (void)sleepq_abort(NULL, 0);
166 if ((priority & PNORELOCK) != 0)
167 simple_unlock(interlock);
168 return 0;
169 }
170
171 sq = sleeptab_lookup(&sleeptab, ident);
172 sleepq_enter(sq, l);
173 sleepq_enqueue(sq, sched_kpri(l), ident, wmesg, &sleep_syncobj);
174
175 if (interlock != NULL) {
176 KASSERT(simple_lock_held(interlock));
177 simple_unlock(interlock);
178 }
179
180 error = sleepq_block(timo, priority & PCATCH);
181
182 if (interlock != NULL && (priority & PNORELOCK) == 0)
183 simple_lock(interlock);
184
185 return error;
186 }
187
188 int
189 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
190 kmutex_t *mtx)
191 {
192 struct lwp *l = curlwp;
193 sleepq_t *sq;
194 int error;
195
196 if (sleepq_dontsleep(l)) {
197 (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
198 return 0;
199 }
200
201 sq = sleeptab_lookup(&sleeptab, ident);
202 sleepq_enter(sq, l);
203 sleepq_enqueue(sq, sched_kpri(l), ident, wmesg, &sleep_syncobj);
204 mutex_exit(mtx);
205 error = sleepq_block(timo, priority & PCATCH);
206
207 if ((priority & PNORELOCK) == 0)
208 mutex_enter(mtx);
209
210 return error;
211 }
212
213 /*
214 * General sleep call for situations where a wake-up is not expected.
215 */
216 int
217 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
218 {
219 struct lwp *l = curlwp;
220 sleepq_t *sq;
221 int error;
222
223 if (sleepq_dontsleep(l))
224 return sleepq_abort(NULL, 0);
225
226 if (mtx != NULL)
227 mutex_exit(mtx);
228 sq = sleeptab_lookup(&sleeptab, l);
229 sleepq_enter(sq, l);
230 sleepq_enqueue(sq, sched_kpri(l), l, wmesg, &sleep_syncobj);
231 error = sleepq_block(timo, intr);
232 if (mtx != NULL)
233 mutex_enter(mtx);
234
235 return error;
236 }
237
238 /*
239 * OBSOLETE INTERFACE
240 *
241 * Make all processes sleeping on the specified identifier runnable.
242 */
243 void
244 wakeup(wchan_t ident)
245 {
246 sleepq_t *sq;
247
248 if (cold)
249 return;
250
251 sq = sleeptab_lookup(&sleeptab, ident);
252 sleepq_wake(sq, ident, (u_int)-1);
253 }
254
255 /*
256 * OBSOLETE INTERFACE
257 *
258 * Make the highest priority process first in line on the specified
259 * identifier runnable.
260 */
261 void
262 wakeup_one(wchan_t ident)
263 {
264 sleepq_t *sq;
265
266 if (cold)
267 return;
268
269 sq = sleeptab_lookup(&sleeptab, ident);
270 sleepq_wake(sq, ident, 1);
271 }
272
273
274 /*
275 * General yield call. Puts the current process back on its run queue and
276 * performs a voluntary context switch. Should only be called when the
277 * current process explicitly requests it (eg sched_yield(2) in compat code).
278 */
279 void
280 yield(void)
281 {
282 struct lwp *l = curlwp;
283
284 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
285 lwp_lock(l);
286 KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
287 KASSERT(l->l_stat == LSONPROC);
288 l->l_priority = l->l_usrpri;
289 (void)mi_switch(l);
290 KERNEL_LOCK(l->l_biglocks, l);
291 }
292
293 /*
294 * General preemption call. Puts the current process back on its run queue
295 * and performs an involuntary context switch.
296 */
297 void
298 preempt(void)
299 {
300 struct lwp *l = curlwp;
301
302 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
303 lwp_lock(l);
304 KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
305 KASSERT(l->l_stat == LSONPROC);
306 l->l_priority = l->l_usrpri;
307 l->l_nivcsw++;
308 (void)mi_switch(l);
309 KERNEL_LOCK(l->l_biglocks, l);
310 }
311
312 /*
313 * Compute the amount of time during which the current lwp was running.
314 *
315 * - update l_rtime unless it's an idle lwp.
316 * - update spc_runtime for the next lwp.
317 */
318
319 static inline void
320 updatertime(struct lwp *l, struct schedstate_percpu *spc)
321 {
322 struct timeval tv;
323 long s, u;
324
325 if ((l->l_flag & LW_IDLE) != 0) {
326 microtime(&spc->spc_runtime);
327 return;
328 }
329
330 microtime(&tv);
331 u = l->l_rtime.tv_usec + (tv.tv_usec - spc->spc_runtime.tv_usec);
332 s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
333 if (u < 0) {
334 u += 1000000;
335 s--;
336 } else if (u >= 1000000) {
337 u -= 1000000;
338 s++;
339 }
340 l->l_rtime.tv_usec = u;
341 l->l_rtime.tv_sec = s;
342
343 spc->spc_runtime = tv;
344 }
345
346 /*
347 * The machine independent parts of context switch.
348 *
349 * Returns 1 if another LWP was actually run.
350 */
351 int
352 mi_switch(struct lwp *l)
353 {
354 struct schedstate_percpu *spc;
355 struct lwp *newl;
356 int retval, oldspl;
357 bool returning;
358
359 KASSERT(lwp_locked(l, NULL));
360 LOCKDEBUG_BARRIER(l->l_mutex, 1);
361
362 #ifdef KSTACK_CHECK_MAGIC
363 kstack_check_magic(l);
364 #endif
365
366 /*
367 * It's safe to read the per CPU schedstate unlocked here, as all we
368 * are after is the run time and that's guarenteed to have been last
369 * updated by this CPU.
370 */
371 KDASSERT(l->l_cpu == curcpu());
372
373 /*
374 * Process is about to yield the CPU; clear the appropriate
375 * scheduling flags.
376 */
377 spc = &l->l_cpu->ci_schedstate;
378 if (l->l_pinned != NULL) {
379 returning = true;
380 newl = l->l_pinned;
381 l->l_pinned = NULL;
382 } else {
383 returning = false;
384 newl = NULL;
385
386 /* Count time spent in current system call */
387 SYSCALL_TIME_SLEEP(l);
388
389 /*
390 * XXXSMP If we are using h/w performance counters,
391 * save context.
392 */
393 #if PERFCTRS
394 if (PMC_ENABLED(l->l_proc)) {
395 pmc_save_context(l->l_proc);
396 }
397 #endif
398 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
399 updatertime(l, spc);
400 }
401
402 /*
403 * If on the CPU and we have gotten this far, then we must yield.
404 */
405 mutex_spin_enter(spc->spc_mutex);
406 KASSERT(l->l_stat != LSRUN);
407 if (l->l_stat == LSONPROC) {
408 KASSERT(lwp_locked(l, &spc->spc_lwplock));
409 if ((l->l_flag & LW_IDLE) == 0) {
410 l->l_stat = LSRUN;
411 lwp_setlock(l, spc->spc_mutex);
412 sched_enqueue(l, true);
413 } else
414 l->l_stat = LSIDL;
415 }
416
417 /*
418 * Let sched_nextlwp() select the LWP to run the CPU next.
419 * If no LWP is runnable, switch to the idle LWP.
420 */
421 if (!returning) {
422 newl = sched_nextlwp();
423 if (newl) {
424 sched_dequeue(newl);
425 KASSERT(lwp_locked(newl, spc->spc_mutex));
426 newl->l_stat = LSONPROC;
427 newl->l_cpu = l->l_cpu;
428 newl->l_flag |= LW_RUNNING;
429 lwp_setlock(newl, &spc->spc_lwplock);
430 } else {
431 newl = l->l_cpu->ci_data.cpu_idlelwp;
432 newl->l_stat = LSONPROC;
433 newl->l_flag |= LW_RUNNING;
434 }
435 spc->spc_curpriority = newl->l_usrpri;
436 newl->l_priority = newl->l_usrpri;
437 cpu_did_resched();
438 }
439
440 if (l != newl) {
441 struct lwp *prevlwp;
442
443 /*
444 * If the old LWP has been moved to a run queue above,
445 * drop the general purpose LWP lock: it's now locked
446 * by the scheduler lock.
447 *
448 * Otherwise, drop the scheduler lock. We're done with
449 * the run queues for now.
450 */
451 if (l->l_mutex == spc->spc_mutex) {
452 mutex_spin_exit(&spc->spc_lwplock);
453 } else {
454 mutex_spin_exit(spc->spc_mutex);
455 }
456
457 /* Unlocked, but for statistics only. */
458 uvmexp.swtch++;
459
460 /*
461 * Save old VM context, unless a soft interrupt
462 * handler is blocking.
463 */
464 if (!returning)
465 pmap_deactivate(l);
466
467 /* Switch to the new LWP.. */
468 l->l_ncsw++;
469 l->l_flag &= ~LW_RUNNING;
470 oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
471 prevlwp = cpu_switchto(l, newl, returning);
472
473 /*
474 * .. we have switched away and are now back so we must
475 * be the new curlwp. prevlwp is who we replaced.
476 */
477 curlwp = l;
478 if (prevlwp != NULL) {
479 curcpu()->ci_mtx_oldspl = oldspl;
480 lwp_unlock(prevlwp);
481 } else {
482 splx(oldspl);
483 }
484
485 /* Restore VM context. */
486 pmap_activate(l);
487 retval = 1;
488 } else {
489 /* Nothing to do - just unlock and return. */
490 mutex_spin_exit(spc->spc_mutex);
491 lwp_unlock(l);
492 retval = 0;
493 }
494
495 KASSERT(l == curlwp);
496 KASSERT(l->l_stat == LSONPROC);
497
498 /*
499 * XXXSMP If we are using h/w performance counters, restore context.
500 */
501 #if PERFCTRS
502 if (PMC_ENABLED(l->l_proc)) {
503 pmc_restore_context(l->l_proc);
504 }
505 #endif
506
507 /*
508 * We're running again; record our new start time. We might
509 * be running on a new CPU now, so don't use the cached
510 * schedstate_percpu pointer.
511 */
512 SYSCALL_TIME_WAKEUP(l);
513 KDASSERT(l->l_cpu == curcpu());
514 LOCKDEBUG_BARRIER(NULL, 1);
515
516 return retval;
517 }
518
519 /*
520 * Change process state to be runnable, placing it on the run queue if it is
521 * in memory, and awakening the swapper if it isn't in memory.
522 *
523 * Call with the process and LWP locked. Will return with the LWP unlocked.
524 */
525 void
526 setrunnable(struct lwp *l)
527 {
528 struct proc *p = l->l_proc;
529 sigset_t *ss;
530
531 KASSERT((l->l_flag & LW_IDLE) == 0);
532 KASSERT(mutex_owned(&p->p_smutex));
533 KASSERT(lwp_locked(l, NULL));
534
535 switch (l->l_stat) {
536 case LSSTOP:
537 /*
538 * If we're being traced (possibly because someone attached us
539 * while we were stopped), check for a signal from the debugger.
540 */
541 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
542 if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
543 ss = &l->l_sigpend.sp_set;
544 else
545 ss = &p->p_sigpend.sp_set;
546 sigaddset(ss, p->p_xstat);
547 signotify(l);
548 }
549 p->p_nrlwps++;
550 break;
551 case LSSUSPENDED:
552 l->l_flag &= ~LW_WSUSPEND;
553 p->p_nrlwps++;
554 break;
555 case LSSLEEP:
556 KASSERT(l->l_wchan != NULL);
557 break;
558 default:
559 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
560 }
561
562 /*
563 * If the LWP was sleeping interruptably, then it's OK to start it
564 * again. If not, mark it as still sleeping.
565 */
566 if (l->l_wchan != NULL) {
567 l->l_stat = LSSLEEP;
568 /* lwp_unsleep() will release the lock. */
569 lwp_unsleep(l);
570 return;
571 }
572
573 /*
574 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
575 * about to call mi_switch(), in which case it will yield.
576 */
577 if ((l->l_flag & LW_RUNNING) != 0) {
578 l->l_stat = LSONPROC;
579 l->l_slptime = 0;
580 lwp_unlock(l);
581 return;
582 }
583
584 /*
585 * Set the LWP runnable. If it's swapped out, we need to wake the swapper
586 * to bring it back in. Otherwise, enter it into a run queue.
587 */
588 if (l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex) {
589 spc_lock(l->l_cpu);
590 lwp_unlock_to(l, l->l_cpu->ci_schedstate.spc_mutex);
591 }
592
593 sched_setrunnable(l);
594 l->l_stat = LSRUN;
595 l->l_slptime = 0;
596
597 if (l->l_flag & LW_INMEM) {
598 sched_enqueue(l, false);
599 resched_cpu(l);
600 lwp_unlock(l);
601 } else {
602 lwp_unlock(l);
603 uvm_kick_scheduler();
604 }
605 }
606
607 /*
608 * suspendsched:
609 *
610 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
611 */
612 void
613 suspendsched(void)
614 {
615 #ifdef MULTIPROCESSOR
616 CPU_INFO_ITERATOR cii;
617 struct cpu_info *ci;
618 #endif
619 struct lwp *l;
620 struct proc *p;
621
622 /*
623 * We do this by process in order not to violate the locking rules.
624 */
625 mutex_enter(&proclist_lock);
626 PROCLIST_FOREACH(p, &allproc) {
627 mutex_enter(&p->p_smutex);
628
629 if ((p->p_flag & PK_SYSTEM) != 0) {
630 mutex_exit(&p->p_smutex);
631 continue;
632 }
633
634 p->p_stat = SSTOP;
635
636 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
637 if (l == curlwp)
638 continue;
639
640 lwp_lock(l);
641
642 /*
643 * Set L_WREBOOT so that the LWP will suspend itself
644 * when it tries to return to user mode. We want to
645 * try and get to get as many LWPs as possible to
646 * the user / kernel boundary, so that they will
647 * release any locks that they hold.
648 */
649 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
650
651 if (l->l_stat == LSSLEEP &&
652 (l->l_flag & LW_SINTR) != 0) {
653 /* setrunnable() will release the lock. */
654 setrunnable(l);
655 continue;
656 }
657
658 lwp_unlock(l);
659 }
660
661 mutex_exit(&p->p_smutex);
662 }
663 mutex_exit(&proclist_lock);
664
665 /*
666 * Kick all CPUs to make them preempt any LWPs running in user mode.
667 * They'll trap into the kernel and suspend themselves in userret().
668 */
669 #ifdef MULTIPROCESSOR
670 for (CPU_INFO_FOREACH(cii, ci))
671 cpu_need_resched(ci, 0);
672 #else
673 cpu_need_resched(curcpu(), 0);
674 #endif
675 }
676
677 /*
678 * sched_kpri:
679 *
680 * Scale a priority level to a kernel priority level, usually
681 * for an LWP that is about to sleep.
682 */
683 pri_t
684 sched_kpri(struct lwp *l)
685 {
686 pri_t pri;
687
688 /*
689 * Scale user priorities (0 -> 63) up to kernel priorities
690 * in the range (64 -> 95). This makes assumptions about
691 * the priority space and so should be kept in sync with
692 * param.h.
693 */
694 if ((pri = l->l_usrpri) >= PRI_KERNEL)
695 return pri;
696
697 return (pri >> 1) + PRI_KERNEL;
698 }
699
700 /*
701 * sched_unsleep:
702 *
703 * The is called when the LWP has not been awoken normally but instead
704 * interrupted: for example, if the sleep timed out. Because of this,
705 * it's not a valid action for running or idle LWPs.
706 */
707 static void
708 sched_unsleep(struct lwp *l)
709 {
710
711 lwp_unlock(l);
712 panic("sched_unsleep");
713 }
714
715 inline void
716 resched_cpu(struct lwp *l)
717 {
718 struct cpu_info *ci;
719 const pri_t pri = lwp_eprio(l);
720
721 /*
722 * XXXSMP
723 * Since l->l_cpu persists across a context switch,
724 * this gives us *very weak* processor affinity, in
725 * that we notify the CPU on which the process last
726 * ran that it should try to switch.
727 *
728 * This does not guarantee that the process will run on
729 * that processor next, because another processor might
730 * grab it the next time it performs a context switch.
731 *
732 * This also does not handle the case where its last
733 * CPU is running a higher-priority process, but every
734 * other CPU is running a lower-priority process. There
735 * are ways to handle this situation, but they're not
736 * currently very pretty, and we also need to weigh the
737 * cost of moving a process from one CPU to another.
738 */
739 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
740 if (pri < ci->ci_schedstate.spc_curpriority)
741 cpu_need_resched(ci, 0);
742 }
743
744 static void
745 sched_changepri(struct lwp *l, pri_t pri)
746 {
747
748 KASSERT(lwp_locked(l, NULL));
749
750 l->l_usrpri = pri;
751 if (l->l_priority >= PRI_KERNEL)
752 return;
753
754 if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
755 l->l_priority = pri;
756 return;
757 }
758
759 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
760
761 sched_dequeue(l);
762 l->l_priority = pri;
763 sched_enqueue(l, false);
764 resched_cpu(l);
765 }
766
767 static void
768 sched_lendpri(struct lwp *l, pri_t pri)
769 {
770
771 KASSERT(lwp_locked(l, NULL));
772
773 if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
774 l->l_inheritedprio = pri;
775 return;
776 }
777
778 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
779
780 sched_dequeue(l);
781 l->l_inheritedprio = pri;
782 sched_enqueue(l, false);
783 resched_cpu(l);
784 }
785
786 struct lwp *
787 syncobj_noowner(wchan_t wchan)
788 {
789
790 return NULL;
791 }
792
793
794 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
795 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
796
797 /*
798 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
799 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
800 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
801 *
802 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
803 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
804 *
805 * If you dont want to bother with the faster/more-accurate formula, you
806 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
807 * (more general) method of calculating the %age of CPU used by a process.
808 */
809 #define CCPU_SHIFT (FSHIFT + 1)
810
811 /*
812 * sched_pstats:
813 *
814 * Update process statistics and check CPU resource allocation.
815 * Call scheduler-specific hook to eventually adjust process/LWP
816 * priorities.
817 *
818 * XXXSMP This needs to be reorganised in order to reduce the locking
819 * burden.
820 */
821 /* ARGSUSED */
822 void
823 sched_pstats(void *arg)
824 {
825 struct rlimit *rlim;
826 struct lwp *l;
827 struct proc *p;
828 int minslp, sig, clkhz;
829 long runtm;
830
831 sched_pstats_ticks++;
832
833 mutex_enter(&proclist_lock);
834 PROCLIST_FOREACH(p, &allproc) {
835 /*
836 * Increment time in/out of memory and sleep time (if
837 * sleeping). We ignore overflow; with 16-bit int's
838 * (remember them?) overflow takes 45 days.
839 */
840 minslp = 2;
841 mutex_enter(&p->p_smutex);
842 mutex_spin_enter(&p->p_stmutex);
843 runtm = p->p_rtime.tv_sec;
844 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
845 if ((l->l_flag & LW_IDLE) != 0)
846 continue;
847 lwp_lock(l);
848 runtm += l->l_rtime.tv_sec;
849 l->l_swtime++;
850 if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
851 l->l_stat == LSSUSPENDED) {
852 l->l_slptime++;
853 minslp = min(minslp, l->l_slptime);
854 } else
855 minslp = 0;
856 lwp_unlock(l);
857
858 /*
859 * p_pctcpu is only for ps.
860 */
861 l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
862 if (l->l_slptime < 1) {
863 clkhz = stathz != 0 ? stathz : hz;
864 #if (FSHIFT >= CCPU_SHIFT)
865 l->l_pctcpu += (clkhz == 100) ?
866 ((fixpt_t)l->l_cpticks) <<
867 (FSHIFT - CCPU_SHIFT) :
868 100 * (((fixpt_t) p->p_cpticks)
869 << (FSHIFT - CCPU_SHIFT)) / clkhz;
870 #else
871 l->l_pctcpu += ((FSCALE - ccpu) *
872 (l->l_cpticks * FSCALE / clkhz)) >> FSHIFT;
873 #endif
874 l->l_cpticks = 0;
875 }
876 }
877 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
878 sched_pstats_hook(p, minslp);
879 mutex_spin_exit(&p->p_stmutex);
880
881 /*
882 * Check if the process exceeds its CPU resource allocation.
883 * If over max, kill it.
884 */
885 rlim = &p->p_rlimit[RLIMIT_CPU];
886 sig = 0;
887 if (runtm >= rlim->rlim_cur) {
888 if (runtm >= rlim->rlim_max)
889 sig = SIGKILL;
890 else {
891 sig = SIGXCPU;
892 if (rlim->rlim_cur < rlim->rlim_max)
893 rlim->rlim_cur += 5;
894 }
895 }
896 mutex_exit(&p->p_smutex);
897 if (sig) {
898 /* XXXAD */
899 mutex_enter(&proclist_mutex);
900 psignal(p, sig);
901 mutex_enter(&proclist_mutex);
902 }
903 }
904 mutex_exit(&proclist_lock);
905 uvm_meter();
906 cv_broadcast(&lbolt);
907 callout_schedule(&sched_pstats_ch, hz);
908 }
909