kern_synch.c revision 1.213 1 /* $NetBSD: kern_synch.c,v 1.213 2007/12/27 22:13:19 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
10 * Daniel Sieger.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /*-
42 * Copyright (c) 1982, 1986, 1990, 1991, 1993
43 * The Regents of the University of California. All rights reserved.
44 * (c) UNIX System Laboratories, Inc.
45 * All or some portions of this file are derived from material licensed
46 * to the University of California by American Telephone and Telegraph
47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 * the permission of UNIX System Laboratories, Inc.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.213 2007/12/27 22:13:19 ad Exp $");
79
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #define __MUTEX_PRIVATE
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/proc.h>
90 #include <sys/kernel.h>
91 #if defined(PERFCTRS)
92 #include <sys/pmc.h>
93 #endif
94 #include <sys/cpu.h>
95 #include <sys/resourcevar.h>
96 #include <sys/sched.h>
97 #include <sys/syscall_stats.h>
98 #include <sys/sleepq.h>
99 #include <sys/lockdebug.h>
100 #include <sys/evcnt.h>
101 #include <sys/intr.h>
102 #include <sys/lwpctl.h>
103 #include <sys/atomic.h>
104
105 #include <uvm/uvm_extern.h>
106
107 callout_t sched_pstats_ch;
108 unsigned int sched_pstats_ticks;
109
110 kcondvar_t lbolt; /* once a second sleep address */
111
112 static void sched_unsleep(struct lwp *);
113 static void sched_changepri(struct lwp *, pri_t);
114 static void sched_lendpri(struct lwp *, pri_t);
115
116 syncobj_t sleep_syncobj = {
117 SOBJ_SLEEPQ_SORTED,
118 sleepq_unsleep,
119 sleepq_changepri,
120 sleepq_lendpri,
121 syncobj_noowner,
122 };
123
124 syncobj_t sched_syncobj = {
125 SOBJ_SLEEPQ_SORTED,
126 sched_unsleep,
127 sched_changepri,
128 sched_lendpri,
129 syncobj_noowner,
130 };
131
132 /*
133 * During autoconfiguration or after a panic, a sleep will simply lower the
134 * priority briefly to allow interrupts, then return. The priority to be
135 * used (safepri) is machine-dependent, thus this value is initialized and
136 * maintained in the machine-dependent layers. This priority will typically
137 * be 0, or the lowest priority that is safe for use on the interrupt stack;
138 * it can be made higher to block network software interrupts after panics.
139 */
140 int safepri;
141
142 /*
143 * OBSOLETE INTERFACE
144 *
145 * General sleep call. Suspends the current process until a wakeup is
146 * performed on the specified identifier. The process will then be made
147 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
148 * means no timeout). If pri includes PCATCH flag, signals are checked
149 * before and after sleeping, else signals are not checked. Returns 0 if
150 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
151 * signal needs to be delivered, ERESTART is returned if the current system
152 * call should be restarted if possible, and EINTR is returned if the system
153 * call should be interrupted by the signal (return EINTR).
154 *
155 * The interlock is held until we are on a sleep queue. The interlock will
156 * be locked before returning back to the caller unless the PNORELOCK flag
157 * is specified, in which case the interlock will always be unlocked upon
158 * return.
159 */
160 int
161 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
162 volatile struct simplelock *interlock)
163 {
164 struct lwp *l = curlwp;
165 sleepq_t *sq;
166 int error;
167
168 KASSERT((l->l_pflag & LP_INTR) == 0);
169
170 if (sleepq_dontsleep(l)) {
171 (void)sleepq_abort(NULL, 0);
172 if ((priority & PNORELOCK) != 0)
173 simple_unlock(interlock);
174 return 0;
175 }
176
177 l->l_kpriority = true;
178 sq = sleeptab_lookup(&sleeptab, ident);
179 sleepq_enter(sq, l);
180 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
181
182 if (interlock != NULL) {
183 KASSERT(simple_lock_held(interlock));
184 simple_unlock(interlock);
185 }
186
187 error = sleepq_block(timo, priority & PCATCH);
188
189 if (interlock != NULL && (priority & PNORELOCK) == 0)
190 simple_lock(interlock);
191
192 return error;
193 }
194
195 int
196 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
197 kmutex_t *mtx)
198 {
199 struct lwp *l = curlwp;
200 sleepq_t *sq;
201 int error;
202
203 KASSERT((l->l_pflag & LP_INTR) == 0);
204
205 if (sleepq_dontsleep(l)) {
206 (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
207 return 0;
208 }
209
210 l->l_kpriority = true;
211 sq = sleeptab_lookup(&sleeptab, ident);
212 sleepq_enter(sq, l);
213 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
214 mutex_exit(mtx);
215 error = sleepq_block(timo, priority & PCATCH);
216
217 if ((priority & PNORELOCK) == 0)
218 mutex_enter(mtx);
219
220 return error;
221 }
222
223 /*
224 * General sleep call for situations where a wake-up is not expected.
225 */
226 int
227 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
228 {
229 struct lwp *l = curlwp;
230 sleepq_t *sq;
231 int error;
232
233 if (sleepq_dontsleep(l))
234 return sleepq_abort(NULL, 0);
235
236 if (mtx != NULL)
237 mutex_exit(mtx);
238 l->l_kpriority = true;
239 sq = sleeptab_lookup(&sleeptab, l);
240 sleepq_enter(sq, l);
241 sleepq_enqueue(sq, l, wmesg, &sleep_syncobj);
242 error = sleepq_block(timo, intr);
243 if (mtx != NULL)
244 mutex_enter(mtx);
245
246 return error;
247 }
248
249 /*
250 * OBSOLETE INTERFACE
251 *
252 * Make all processes sleeping on the specified identifier runnable.
253 */
254 void
255 wakeup(wchan_t ident)
256 {
257 sleepq_t *sq;
258
259 if (cold)
260 return;
261
262 sq = sleeptab_lookup(&sleeptab, ident);
263 sleepq_wake(sq, ident, (u_int)-1);
264 }
265
266 /*
267 * OBSOLETE INTERFACE
268 *
269 * Make the highest priority process first in line on the specified
270 * identifier runnable.
271 */
272 void
273 wakeup_one(wchan_t ident)
274 {
275 sleepq_t *sq;
276
277 if (cold)
278 return;
279
280 sq = sleeptab_lookup(&sleeptab, ident);
281 sleepq_wake(sq, ident, 1);
282 }
283
284
285 /*
286 * General yield call. Puts the current process back on its run queue and
287 * performs a voluntary context switch. Should only be called when the
288 * current process explicitly requests it (eg sched_yield(2)).
289 */
290 void
291 yield(void)
292 {
293 struct lwp *l = curlwp;
294
295 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
296 lwp_lock(l);
297 KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
298 KASSERT(l->l_stat == LSONPROC);
299 l->l_kpriority = false;
300 if (l->l_class == SCHED_OTHER) {
301 /*
302 * Only for timeshared threads. It will be reset
303 * by the scheduler in due course.
304 */
305 l->l_priority = 0;
306 }
307 (void)mi_switch(l);
308 KERNEL_LOCK(l->l_biglocks, l);
309 }
310
311 /*
312 * General preemption call. Puts the current process back on its run queue
313 * and performs an involuntary context switch.
314 */
315 void
316 preempt(void)
317 {
318 struct lwp *l = curlwp;
319
320 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
321 lwp_lock(l);
322 KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
323 KASSERT(l->l_stat == LSONPROC);
324 l->l_kpriority = false;
325 l->l_nivcsw++;
326 (void)mi_switch(l);
327 KERNEL_LOCK(l->l_biglocks, l);
328 }
329
330 /*
331 * Compute the amount of time during which the current lwp was running.
332 *
333 * - update l_rtime unless it's an idle lwp.
334 */
335
336 void
337 updatertime(lwp_t *l, const struct bintime *now)
338 {
339
340 if ((l->l_flag & LW_IDLE) != 0)
341 return;
342
343 /* rtime += now - stime */
344 bintime_add(&l->l_rtime, now);
345 bintime_sub(&l->l_rtime, &l->l_stime);
346 }
347
348 /*
349 * The machine independent parts of context switch.
350 *
351 * Returns 1 if another LWP was actually run.
352 */
353 int
354 mi_switch(lwp_t *l)
355 {
356 struct schedstate_percpu *spc;
357 struct lwp *newl;
358 int retval, oldspl;
359 struct cpu_info *ci;
360 struct bintime bt;
361 bool returning;
362
363 KASSERT(lwp_locked(l, NULL));
364 LOCKDEBUG_BARRIER(l->l_mutex, 1);
365
366 #ifdef KSTACK_CHECK_MAGIC
367 kstack_check_magic(l);
368 #endif
369
370 binuptime(&bt);
371
372 KDASSERT(l->l_cpu == curcpu());
373 ci = l->l_cpu;
374 spc = &ci->ci_schedstate;
375 returning = false;
376 newl = NULL;
377
378 /*
379 * If we have been asked to switch to a specific LWP, then there
380 * is no need to inspect the run queues. If a soft interrupt is
381 * blocking, then return to the interrupted thread without adjusting
382 * VM context or its start time: neither have been changed in order
383 * to take the interrupt.
384 */
385 if (l->l_switchto != NULL) {
386 if ((l->l_pflag & LP_INTR) != 0) {
387 returning = true;
388 softint_block(l);
389 if ((l->l_flag & LW_TIMEINTR) != 0)
390 updatertime(l, &bt);
391 }
392 newl = l->l_switchto;
393 l->l_switchto = NULL;
394 }
395 #ifndef __HAVE_FAST_SOFTINTS
396 else if (ci->ci_data.cpu_softints != 0) {
397 /* There are pending soft interrupts, so pick one. */
398 newl = softint_picklwp();
399 newl->l_stat = LSONPROC;
400 newl->l_flag |= LW_RUNNING;
401 }
402 #endif /* !__HAVE_FAST_SOFTINTS */
403
404 /* Count time spent in current system call */
405 if (!returning) {
406 SYSCALL_TIME_SLEEP(l);
407
408 /*
409 * XXXSMP If we are using h/w performance counters,
410 * save context.
411 */
412 #if PERFCTRS
413 if (PMC_ENABLED(l->l_proc)) {
414 pmc_save_context(l->l_proc);
415 }
416 #endif
417 updatertime(l, &bt);
418 }
419
420 /*
421 * If on the CPU and we have gotten this far, then we must yield.
422 */
423 mutex_spin_enter(spc->spc_mutex);
424 KASSERT(l->l_stat != LSRUN);
425 if (l->l_stat == LSONPROC && l != newl) {
426 KASSERT(lwp_locked(l, &spc->spc_lwplock));
427 if ((l->l_flag & LW_IDLE) == 0) {
428 l->l_stat = LSRUN;
429 lwp_setlock(l, spc->spc_mutex);
430 sched_enqueue(l, true);
431 } else
432 l->l_stat = LSIDL;
433 }
434
435 /*
436 * Let sched_nextlwp() select the LWP to run the CPU next.
437 * If no LWP is runnable, select the idle LWP.
438 *
439 * Note that spc_lwplock might not necessary be held, and
440 * new thread would be unlocked after setting the LWP-lock.
441 */
442 if (newl == NULL) {
443 newl = sched_nextlwp();
444 if (newl != NULL) {
445 sched_dequeue(newl);
446 KASSERT(lwp_locked(newl, spc->spc_mutex));
447 newl->l_stat = LSONPROC;
448 newl->l_cpu = ci;
449 newl->l_flag |= LW_RUNNING;
450 lwp_setlock(newl, &spc->spc_lwplock);
451 } else {
452 newl = ci->ci_data.cpu_idlelwp;
453 newl->l_stat = LSONPROC;
454 newl->l_flag |= LW_RUNNING;
455 }
456 /*
457 * Only clear want_resched if there are no
458 * pending (slow) software interrupts.
459 */
460 ci->ci_want_resched = ci->ci_data.cpu_softints;
461 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
462 spc->spc_curpriority = lwp_eprio(newl);
463 }
464
465 /* Items that must be updated with the CPU locked. */
466 if (!returning) {
467 /* Update the new LWP's start time. */
468 newl->l_stime = bt;
469
470 /*
471 * ci_curlwp changes when a fast soft interrupt occurs.
472 * We use cpu_onproc to keep track of which kernel or
473 * user thread is running 'underneath' the software
474 * interrupt. This is important for time accounting,
475 * itimers and forcing user threads to preempt (aston).
476 */
477 ci->ci_data.cpu_onproc = newl;
478 }
479
480 if (l != newl) {
481 struct lwp *prevlwp;
482
483 /* Release all locks, but leave the current LWP locked */
484 if (l->l_mutex == spc->spc_mutex) {
485 /*
486 * Drop spc_lwplock, if the current LWP has been moved
487 * to the run queue (it is now locked by spc_mutex).
488 */
489 mutex_spin_exit(&spc->spc_lwplock);
490 } else {
491 /*
492 * Otherwise, drop the spc_mutex, we are done with the
493 * run queues.
494 */
495 mutex_spin_exit(spc->spc_mutex);
496 }
497
498 /*
499 * Mark that context switch is going to be perfomed
500 * for this LWP, to protect it from being switched
501 * to on another CPU.
502 */
503 KASSERT(l->l_ctxswtch == 0);
504 l->l_ctxswtch = 1;
505 l->l_ncsw++;
506 l->l_flag &= ~LW_RUNNING;
507
508 /*
509 * Increase the count of spin-mutexes before the release
510 * of the last lock - we must remain at IPL_SCHED during
511 * the context switch.
512 */
513 oldspl = MUTEX_SPIN_OLDSPL(ci);
514 ci->ci_mtx_count--;
515 lwp_unlock(l);
516
517 /* Unlocked, but for statistics only. */
518 uvmexp.swtch++;
519
520 /* Update status for lwpctl, if present. */
521 if (l->l_lwpctl != NULL)
522 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE;
523
524 /*
525 * Save old VM context, unless a soft interrupt
526 * handler is blocking.
527 */
528 if (!returning)
529 pmap_deactivate(l);
530
531 /*
532 * We may need to spin-wait for if 'newl' is still
533 * context switching on another CPU.
534 */
535 if (newl->l_ctxswtch != 0) {
536 u_int count;
537 count = SPINLOCK_BACKOFF_MIN;
538 while (newl->l_ctxswtch)
539 SPINLOCK_BACKOFF(count);
540 }
541
542 /* Switch to the new LWP.. */
543 prevlwp = cpu_switchto(l, newl, returning);
544 ci = curcpu();
545
546 /*
547 * Switched away - we have new curlwp.
548 * Restore VM context and IPL.
549 */
550 pmap_activate(l);
551 if (prevlwp != NULL) {
552 /* Normalize the count of the spin-mutexes */
553 ci->ci_mtx_count++;
554 /* Unmark the state of context switch */
555 membar_exit();
556 prevlwp->l_ctxswtch = 0;
557 }
558 splx(oldspl);
559
560 /* Update status for lwpctl, if present. */
561 if (l->l_lwpctl != NULL)
562 l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
563
564 retval = 1;
565 } else {
566 /* Nothing to do - just unlock and return. */
567 mutex_spin_exit(spc->spc_mutex);
568 lwp_unlock(l);
569 retval = 0;
570 }
571
572 KASSERT(l == curlwp);
573 KASSERT(l->l_stat == LSONPROC);
574 KASSERT(l->l_cpu == ci);
575
576 /*
577 * XXXSMP If we are using h/w performance counters, restore context.
578 */
579 #if PERFCTRS
580 if (PMC_ENABLED(l->l_proc)) {
581 pmc_restore_context(l->l_proc);
582 }
583 #endif
584 SYSCALL_TIME_WAKEUP(l);
585 LOCKDEBUG_BARRIER(NULL, 1);
586
587 return retval;
588 }
589
590 /*
591 * Change process state to be runnable, placing it on the run queue if it is
592 * in memory, and awakening the swapper if it isn't in memory.
593 *
594 * Call with the process and LWP locked. Will return with the LWP unlocked.
595 */
596 void
597 setrunnable(struct lwp *l)
598 {
599 struct proc *p = l->l_proc;
600 struct cpu_info *ci;
601 sigset_t *ss;
602
603 KASSERT((l->l_flag & LW_IDLE) == 0);
604 KASSERT(mutex_owned(&p->p_smutex));
605 KASSERT(lwp_locked(l, NULL));
606 KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
607
608 switch (l->l_stat) {
609 case LSSTOP:
610 /*
611 * If we're being traced (possibly because someone attached us
612 * while we were stopped), check for a signal from the debugger.
613 */
614 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
615 if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
616 ss = &l->l_sigpend.sp_set;
617 else
618 ss = &p->p_sigpend.sp_set;
619 sigaddset(ss, p->p_xstat);
620 signotify(l);
621 }
622 p->p_nrlwps++;
623 break;
624 case LSSUSPENDED:
625 l->l_flag &= ~LW_WSUSPEND;
626 p->p_nrlwps++;
627 cv_broadcast(&p->p_lwpcv);
628 break;
629 case LSSLEEP:
630 KASSERT(l->l_wchan != NULL);
631 break;
632 default:
633 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
634 }
635
636 /*
637 * If the LWP was sleeping interruptably, then it's OK to start it
638 * again. If not, mark it as still sleeping.
639 */
640 if (l->l_wchan != NULL) {
641 l->l_stat = LSSLEEP;
642 /* lwp_unsleep() will release the lock. */
643 lwp_unsleep(l);
644 return;
645 }
646
647 /*
648 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
649 * about to call mi_switch(), in which case it will yield.
650 */
651 if ((l->l_flag & LW_RUNNING) != 0) {
652 l->l_stat = LSONPROC;
653 l->l_slptime = 0;
654 lwp_unlock(l);
655 return;
656 }
657
658 /*
659 * Look for a CPU to run.
660 * Set the LWP runnable.
661 */
662 ci = sched_takecpu(l);
663 l->l_cpu = ci;
664 if (l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex) {
665 lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
666 lwp_lock(l);
667 }
668 sched_setrunnable(l);
669 l->l_stat = LSRUN;
670 l->l_slptime = 0;
671
672 /*
673 * If thread is swapped out - wake the swapper to bring it back in.
674 * Otherwise, enter it into a run queue.
675 */
676 if (l->l_flag & LW_INMEM) {
677 sched_enqueue(l, false);
678 resched_cpu(l);
679 lwp_unlock(l);
680 } else {
681 lwp_unlock(l);
682 uvm_kick_scheduler();
683 }
684 }
685
686 /*
687 * suspendsched:
688 *
689 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
690 */
691 void
692 suspendsched(void)
693 {
694 CPU_INFO_ITERATOR cii;
695 struct cpu_info *ci;
696 struct lwp *l;
697 struct proc *p;
698
699 /*
700 * We do this by process in order not to violate the locking rules.
701 */
702 mutex_enter(&proclist_lock);
703 PROCLIST_FOREACH(p, &allproc) {
704 mutex_enter(&p->p_smutex);
705
706 if ((p->p_flag & PK_SYSTEM) != 0) {
707 mutex_exit(&p->p_smutex);
708 continue;
709 }
710
711 p->p_stat = SSTOP;
712
713 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
714 if (l == curlwp)
715 continue;
716
717 lwp_lock(l);
718
719 /*
720 * Set L_WREBOOT so that the LWP will suspend itself
721 * when it tries to return to user mode. We want to
722 * try and get to get as many LWPs as possible to
723 * the user / kernel boundary, so that they will
724 * release any locks that they hold.
725 */
726 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
727
728 if (l->l_stat == LSSLEEP &&
729 (l->l_flag & LW_SINTR) != 0) {
730 /* setrunnable() will release the lock. */
731 setrunnable(l);
732 continue;
733 }
734
735 lwp_unlock(l);
736 }
737
738 mutex_exit(&p->p_smutex);
739 }
740 mutex_exit(&proclist_lock);
741
742 /*
743 * Kick all CPUs to make them preempt any LWPs running in user mode.
744 * They'll trap into the kernel and suspend themselves in userret().
745 */
746 for (CPU_INFO_FOREACH(cii, ci)) {
747 spc_lock(ci);
748 cpu_need_resched(ci, RESCHED_IMMED);
749 spc_unlock(ci);
750 }
751 }
752
753 /*
754 * sched_unsleep:
755 *
756 * The is called when the LWP has not been awoken normally but instead
757 * interrupted: for example, if the sleep timed out. Because of this,
758 * it's not a valid action for running or idle LWPs.
759 */
760 static void
761 sched_unsleep(struct lwp *l)
762 {
763
764 lwp_unlock(l);
765 panic("sched_unsleep");
766 }
767
768 void
769 resched_cpu(struct lwp *l)
770 {
771 struct cpu_info *ci;
772
773 /*
774 * XXXSMP
775 * Since l->l_cpu persists across a context switch,
776 * this gives us *very weak* processor affinity, in
777 * that we notify the CPU on which the process last
778 * ran that it should try to switch.
779 *
780 * This does not guarantee that the process will run on
781 * that processor next, because another processor might
782 * grab it the next time it performs a context switch.
783 *
784 * This also does not handle the case where its last
785 * CPU is running a higher-priority process, but every
786 * other CPU is running a lower-priority process. There
787 * are ways to handle this situation, but they're not
788 * currently very pretty, and we also need to weigh the
789 * cost of moving a process from one CPU to another.
790 */
791 ci = l->l_cpu;
792 if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority)
793 cpu_need_resched(ci, 0);
794 }
795
796 static void
797 sched_changepri(struct lwp *l, pri_t pri)
798 {
799
800 KASSERT(lwp_locked(l, NULL));
801
802 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
803 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
804 sched_dequeue(l);
805 l->l_priority = pri;
806 sched_enqueue(l, false);
807 } else {
808 l->l_priority = pri;
809 }
810 resched_cpu(l);
811 }
812
813 static void
814 sched_lendpri(struct lwp *l, pri_t pri)
815 {
816
817 KASSERT(lwp_locked(l, NULL));
818
819 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
820 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
821 sched_dequeue(l);
822 l->l_inheritedprio = pri;
823 sched_enqueue(l, false);
824 } else {
825 l->l_inheritedprio = pri;
826 }
827 resched_cpu(l);
828 }
829
830 struct lwp *
831 syncobj_noowner(wchan_t wchan)
832 {
833
834 return NULL;
835 }
836
837
838 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
839 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
840
841 /*
842 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
843 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
844 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
845 *
846 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
847 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
848 *
849 * If you dont want to bother with the faster/more-accurate formula, you
850 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
851 * (more general) method of calculating the %age of CPU used by a process.
852 */
853 #define CCPU_SHIFT (FSHIFT + 1)
854
855 /*
856 * sched_pstats:
857 *
858 * Update process statistics and check CPU resource allocation.
859 * Call scheduler-specific hook to eventually adjust process/LWP
860 * priorities.
861 */
862 /* ARGSUSED */
863 void
864 sched_pstats(void *arg)
865 {
866 struct rlimit *rlim;
867 struct lwp *l;
868 struct proc *p;
869 int sig, clkhz;
870 long runtm;
871
872 sched_pstats_ticks++;
873
874 mutex_enter(&proclist_lock);
875 PROCLIST_FOREACH(p, &allproc) {
876 /*
877 * Increment time in/out of memory and sleep time (if
878 * sleeping). We ignore overflow; with 16-bit int's
879 * (remember them?) overflow takes 45 days.
880 */
881 mutex_enter(&p->p_smutex);
882 mutex_spin_enter(&p->p_stmutex);
883 runtm = p->p_rtime.sec;
884 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
885 if ((l->l_flag & LW_IDLE) != 0)
886 continue;
887 lwp_lock(l);
888 runtm += l->l_rtime.sec;
889 l->l_swtime++;
890 sched_pstats_hook(l);
891 lwp_unlock(l);
892
893 /*
894 * p_pctcpu is only for ps.
895 */
896 l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
897 if (l->l_slptime < 1) {
898 clkhz = stathz != 0 ? stathz : hz;
899 #if (FSHIFT >= CCPU_SHIFT)
900 l->l_pctcpu += (clkhz == 100) ?
901 ((fixpt_t)l->l_cpticks) <<
902 (FSHIFT - CCPU_SHIFT) :
903 100 * (((fixpt_t) p->p_cpticks)
904 << (FSHIFT - CCPU_SHIFT)) / clkhz;
905 #else
906 l->l_pctcpu += ((FSCALE - ccpu) *
907 (l->l_cpticks * FSCALE / clkhz)) >> FSHIFT;
908 #endif
909 l->l_cpticks = 0;
910 }
911 }
912 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
913 mutex_spin_exit(&p->p_stmutex);
914
915 /*
916 * Check if the process exceeds its CPU resource allocation.
917 * If over max, kill it.
918 */
919 rlim = &p->p_rlimit[RLIMIT_CPU];
920 sig = 0;
921 if (runtm >= rlim->rlim_cur) {
922 if (runtm >= rlim->rlim_max)
923 sig = SIGKILL;
924 else {
925 sig = SIGXCPU;
926 if (rlim->rlim_cur < rlim->rlim_max)
927 rlim->rlim_cur += 5;
928 }
929 }
930 mutex_exit(&p->p_smutex);
931 if (sig) {
932 mutex_enter(&proclist_mutex);
933 psignal(p, sig);
934 mutex_exit(&proclist_mutex);
935 }
936 }
937 mutex_exit(&proclist_lock);
938 uvm_meter();
939 cv_wakeup(&lbolt);
940 callout_schedule(&sched_pstats_ch, hz);
941 }
942
943 void
944 sched_init(void)
945 {
946
947 cv_init(&lbolt, "lbolt");
948 callout_init(&sched_pstats_ch, 0);
949 callout_setfunc(&sched_pstats_ch, sched_pstats, NULL);
950 sched_setup();
951 sched_pstats(NULL);
952 }
953