kern_synch.c revision 1.209 1 /* $NetBSD: kern_synch.c,v 1.209 2007/12/02 14:55:32 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
10 * Daniel Sieger.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /*-
42 * Copyright (c) 1982, 1986, 1990, 1991, 1993
43 * The Regents of the University of California. All rights reserved.
44 * (c) UNIX System Laboratories, Inc.
45 * All or some portions of this file are derived from material licensed
46 * to the University of California by American Telephone and Telegraph
47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 * the permission of UNIX System Laboratories, Inc.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.209 2007/12/02 14:55:32 ad Exp $");
79
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #define __MUTEX_PRIVATE
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/proc.h>
90 #include <sys/kernel.h>
91 #if defined(PERFCTRS)
92 #include <sys/pmc.h>
93 #endif
94 #include <sys/cpu.h>
95 #include <sys/resourcevar.h>
96 #include <sys/sched.h>
97 #include <sys/syscall_stats.h>
98 #include <sys/sleepq.h>
99 #include <sys/lockdebug.h>
100 #include <sys/evcnt.h>
101 #include <sys/intr.h>
102 #include <sys/lwpctl.h>
103 #include <sys/atomic.h>
104
105 #include <uvm/uvm_extern.h>
106
107 callout_t sched_pstats_ch;
108 unsigned int sched_pstats_ticks;
109
110 kcondvar_t lbolt; /* once a second sleep address */
111
112 static void sched_unsleep(struct lwp *);
113 static void sched_changepri(struct lwp *, pri_t);
114 static void sched_lendpri(struct lwp *, pri_t);
115
116 syncobj_t sleep_syncobj = {
117 SOBJ_SLEEPQ_SORTED,
118 sleepq_unsleep,
119 sleepq_changepri,
120 sleepq_lendpri,
121 syncobj_noowner,
122 };
123
124 syncobj_t sched_syncobj = {
125 SOBJ_SLEEPQ_SORTED,
126 sched_unsleep,
127 sched_changepri,
128 sched_lendpri,
129 syncobj_noowner,
130 };
131
132 /*
133 * During autoconfiguration or after a panic, a sleep will simply lower the
134 * priority briefly to allow interrupts, then return. The priority to be
135 * used (safepri) is machine-dependent, thus this value is initialized and
136 * maintained in the machine-dependent layers. This priority will typically
137 * be 0, or the lowest priority that is safe for use on the interrupt stack;
138 * it can be made higher to block network software interrupts after panics.
139 */
140 int safepri;
141
142 /*
143 * OBSOLETE INTERFACE
144 *
145 * General sleep call. Suspends the current process until a wakeup is
146 * performed on the specified identifier. The process will then be made
147 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
148 * means no timeout). If pri includes PCATCH flag, signals are checked
149 * before and after sleeping, else signals are not checked. Returns 0 if
150 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
151 * signal needs to be delivered, ERESTART is returned if the current system
152 * call should be restarted if possible, and EINTR is returned if the system
153 * call should be interrupted by the signal (return EINTR).
154 *
155 * The interlock is held until we are on a sleep queue. The interlock will
156 * be locked before returning back to the caller unless the PNORELOCK flag
157 * is specified, in which case the interlock will always be unlocked upon
158 * return.
159 */
160 int
161 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
162 volatile struct simplelock *interlock)
163 {
164 struct lwp *l = curlwp;
165 sleepq_t *sq;
166 int error;
167
168 KASSERT((l->l_pflag & LP_INTR) == 0);
169
170 if (sleepq_dontsleep(l)) {
171 (void)sleepq_abort(NULL, 0);
172 if ((priority & PNORELOCK) != 0)
173 simple_unlock(interlock);
174 return 0;
175 }
176
177 l->l_kpriority = true;
178 sq = sleeptab_lookup(&sleeptab, ident);
179 sleepq_enter(sq, l);
180 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
181
182 if (interlock != NULL) {
183 KASSERT(simple_lock_held(interlock));
184 simple_unlock(interlock);
185 }
186
187 error = sleepq_block(timo, priority & PCATCH);
188
189 if (interlock != NULL && (priority & PNORELOCK) == 0)
190 simple_lock(interlock);
191
192 return error;
193 }
194
195 int
196 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
197 kmutex_t *mtx)
198 {
199 struct lwp *l = curlwp;
200 sleepq_t *sq;
201 int error;
202
203 KASSERT((l->l_pflag & LP_INTR) == 0);
204
205 if (sleepq_dontsleep(l)) {
206 (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
207 return 0;
208 }
209
210 l->l_kpriority = true;
211 sq = sleeptab_lookup(&sleeptab, ident);
212 sleepq_enter(sq, l);
213 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
214 mutex_exit(mtx);
215 error = sleepq_block(timo, priority & PCATCH);
216
217 if ((priority & PNORELOCK) == 0)
218 mutex_enter(mtx);
219
220 return error;
221 }
222
223 /*
224 * General sleep call for situations where a wake-up is not expected.
225 */
226 int
227 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
228 {
229 struct lwp *l = curlwp;
230 sleepq_t *sq;
231 int error;
232
233 if (sleepq_dontsleep(l))
234 return sleepq_abort(NULL, 0);
235
236 if (mtx != NULL)
237 mutex_exit(mtx);
238 l->l_kpriority = true;
239 sq = sleeptab_lookup(&sleeptab, l);
240 sleepq_enter(sq, l);
241 sleepq_enqueue(sq, l, wmesg, &sleep_syncobj);
242 error = sleepq_block(timo, intr);
243 if (mtx != NULL)
244 mutex_enter(mtx);
245
246 return error;
247 }
248
249 /*
250 * OBSOLETE INTERFACE
251 *
252 * Make all processes sleeping on the specified identifier runnable.
253 */
254 void
255 wakeup(wchan_t ident)
256 {
257 sleepq_t *sq;
258
259 if (cold)
260 return;
261
262 sq = sleeptab_lookup(&sleeptab, ident);
263 sleepq_wake(sq, ident, (u_int)-1);
264 }
265
266 /*
267 * OBSOLETE INTERFACE
268 *
269 * Make the highest priority process first in line on the specified
270 * identifier runnable.
271 */
272 void
273 wakeup_one(wchan_t ident)
274 {
275 sleepq_t *sq;
276
277 if (cold)
278 return;
279
280 sq = sleeptab_lookup(&sleeptab, ident);
281 sleepq_wake(sq, ident, 1);
282 }
283
284
285 /*
286 * General yield call. Puts the current process back on its run queue and
287 * performs a voluntary context switch. Should only be called when the
288 * current process explicitly requests it (eg sched_yield(2)).
289 */
290 void
291 yield(void)
292 {
293 struct lwp *l = curlwp;
294
295 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
296 lwp_lock(l);
297 KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
298 KASSERT(l->l_stat == LSONPROC);
299 l->l_kpriority = false;
300 if (l->l_class == SCHED_OTHER) {
301 /*
302 * Only for timeshared threads. It will be reset
303 * by the scheduler in due course.
304 */
305 l->l_priority = 0;
306 }
307 (void)mi_switch(l);
308 KERNEL_LOCK(l->l_biglocks, l);
309 }
310
311 /*
312 * General preemption call. Puts the current process back on its run queue
313 * and performs an involuntary context switch.
314 */
315 void
316 preempt(void)
317 {
318 struct lwp *l = curlwp;
319
320 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
321 lwp_lock(l);
322 KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock));
323 KASSERT(l->l_stat == LSONPROC);
324 l->l_kpriority = false;
325 l->l_nivcsw++;
326 (void)mi_switch(l);
327 KERNEL_LOCK(l->l_biglocks, l);
328 }
329
330 /*
331 * Compute the amount of time during which the current lwp was running.
332 *
333 * - update l_rtime unless it's an idle lwp.
334 */
335
336 void
337 updatertime(lwp_t *l, const struct timeval *tv)
338 {
339 long s, u;
340
341 if ((l->l_flag & LW_IDLE) != 0)
342 return;
343
344 u = l->l_rtime.tv_usec + (tv->tv_usec - l->l_stime.tv_usec);
345 s = l->l_rtime.tv_sec + (tv->tv_sec - l->l_stime.tv_sec);
346 if (u < 0) {
347 u += 1000000;
348 s--;
349 } else if (u >= 1000000) {
350 u -= 1000000;
351 s++;
352 }
353 l->l_rtime.tv_usec = u;
354 l->l_rtime.tv_sec = s;
355 }
356
357 /*
358 * The machine independent parts of context switch.
359 *
360 * Returns 1 if another LWP was actually run.
361 */
362 int
363 mi_switch(lwp_t *l)
364 {
365 struct schedstate_percpu *spc;
366 struct lwp *newl;
367 int retval, oldspl;
368 struct cpu_info *ci;
369 struct timeval tv;
370 bool returning;
371
372 KASSERT(lwp_locked(l, NULL));
373 LOCKDEBUG_BARRIER(l->l_mutex, 1);
374
375 #ifdef KSTACK_CHECK_MAGIC
376 kstack_check_magic(l);
377 #endif
378
379 microtime(&tv);
380
381 KDASSERT(l->l_cpu == curcpu());
382 ci = l->l_cpu;
383 spc = &ci->ci_schedstate;
384 returning = false;
385 newl = NULL;
386
387 /*
388 * If we have been asked to switch to a specific LWP, then there
389 * is no need to inspect the run queues. If a soft interrupt is
390 * blocking, then return to the interrupted thread without adjusting
391 * VM context or its start time: neither have been changed in order
392 * to take the interrupt.
393 */
394 if (l->l_switchto != NULL) {
395 if ((l->l_pflag & LP_INTR) != 0) {
396 returning = true;
397 softint_block(l);
398 if ((l->l_flag & LW_TIMEINTR) != 0)
399 updatertime(l, &tv);
400 }
401 newl = l->l_switchto;
402 l->l_switchto = NULL;
403 }
404 #ifndef __HAVE_FAST_SOFTINTS
405 else if (ci->ci_data.cpu_softints != 0) {
406 /* There are pending soft interrupts, so pick one. */
407 newl = softint_picklwp();
408 newl->l_stat = LSONPROC;
409 newl->l_flag |= LW_RUNNING;
410 }
411 #endif /* !__HAVE_FAST_SOFTINTS */
412
413 /* Count time spent in current system call */
414 if (!returning) {
415 SYSCALL_TIME_SLEEP(l);
416
417 /*
418 * XXXSMP If we are using h/w performance counters,
419 * save context.
420 */
421 #if PERFCTRS
422 if (PMC_ENABLED(l->l_proc)) {
423 pmc_save_context(l->l_proc);
424 }
425 #endif
426 updatertime(l, &tv);
427 }
428
429 /*
430 * If on the CPU and we have gotten this far, then we must yield.
431 */
432 mutex_spin_enter(spc->spc_mutex);
433 KASSERT(l->l_stat != LSRUN);
434 if (l->l_stat == LSONPROC && l != newl) {
435 KASSERT(lwp_locked(l, &spc->spc_lwplock));
436 if ((l->l_flag & LW_IDLE) == 0) {
437 l->l_stat = LSRUN;
438 lwp_setlock(l, spc->spc_mutex);
439 sched_enqueue(l, true);
440 } else
441 l->l_stat = LSIDL;
442 }
443
444 /*
445 * Let sched_nextlwp() select the LWP to run the CPU next.
446 * If no LWP is runnable, select the idle LWP.
447 *
448 * Note that spc_lwplock might not necessary be held, and
449 * new thread would be unlocked after setting the LWP-lock.
450 */
451 if (newl == NULL) {
452 newl = sched_nextlwp();
453 if (newl != NULL) {
454 sched_dequeue(newl);
455 KASSERT(lwp_locked(newl, spc->spc_mutex));
456 newl->l_stat = LSONPROC;
457 newl->l_cpu = ci;
458 newl->l_flag |= LW_RUNNING;
459 lwp_setlock(newl, &spc->spc_lwplock);
460 } else {
461 newl = ci->ci_data.cpu_idlelwp;
462 newl->l_stat = LSONPROC;
463 newl->l_flag |= LW_RUNNING;
464 }
465 /*
466 * Only clear want_resched if there are no
467 * pending (slow) software interrupts.
468 */
469 ci->ci_want_resched = ci->ci_data.cpu_softints;
470 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
471 spc->spc_curpriority = lwp_eprio(newl);
472 }
473
474 /* Items that must be updated with the CPU locked. */
475 if (!returning) {
476 /* Update the new LWP's start time. */
477 newl->l_stime = tv;
478
479 /*
480 * ci_curlwp changes when a fast soft interrupt occurs.
481 * We use cpu_onproc to keep track of which kernel or
482 * user thread is running 'underneath' the software
483 * interrupt. This is important for time accounting,
484 * itimers and forcing user threads to preempt (aston).
485 */
486 ci->ci_data.cpu_onproc = newl;
487 }
488
489 if (l != newl) {
490 struct lwp *prevlwp;
491
492 /* Release all locks, but leave the current LWP locked */
493 if (l->l_mutex == spc->spc_mutex) {
494 /*
495 * Drop spc_lwplock, if the current LWP has been moved
496 * to the run queue (it is now locked by spc_mutex).
497 */
498 mutex_spin_exit(&spc->spc_lwplock);
499 } else {
500 /*
501 * Otherwise, drop the spc_mutex, we are done with the
502 * run queues.
503 */
504 mutex_spin_exit(spc->spc_mutex);
505 }
506
507 /*
508 * Mark that context switch is going to be perfomed
509 * for this LWP, to protect it from being switched
510 * to on another CPU.
511 */
512 KASSERT(l->l_ctxswtch == 0);
513 l->l_ctxswtch = 1;
514 l->l_ncsw++;
515 l->l_flag &= ~LW_RUNNING;
516
517 /*
518 * Increase the count of spin-mutexes before the release
519 * of the last lock - we must remain at IPL_SCHED during
520 * the context switch.
521 */
522 oldspl = MUTEX_SPIN_OLDSPL(ci);
523 ci->ci_mtx_count--;
524 lwp_unlock(l);
525
526 /* Unlocked, but for statistics only. */
527 uvmexp.swtch++;
528
529 /* Update status for lwpctl, if present. */
530 if (l->l_lwpctl != NULL)
531 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE;
532
533 /*
534 * Save old VM context, unless a soft interrupt
535 * handler is blocking.
536 */
537 if (!returning)
538 pmap_deactivate(l);
539
540 /*
541 * We may need to spin-wait for if 'newl' is still
542 * context switching on another CPU.
543 */
544 if (newl->l_ctxswtch != 0) {
545 u_int count;
546 count = SPINLOCK_BACKOFF_MIN;
547 while (newl->l_ctxswtch)
548 SPINLOCK_BACKOFF(count);
549 }
550
551 /* Switch to the new LWP.. */
552 prevlwp = cpu_switchto(l, newl, returning);
553 ci = curcpu();
554
555 /*
556 * Switched away - we have new curlwp.
557 * Restore VM context and IPL.
558 */
559 pmap_activate(l);
560 if (prevlwp != NULL) {
561 /* Normalize the count of the spin-mutexes */
562 ci->ci_mtx_count++;
563 /* Unmark the state of context switch */
564 membar_exit();
565 prevlwp->l_ctxswtch = 0;
566 }
567 splx(oldspl);
568
569 /* Update status for lwpctl, if present. */
570 if (l->l_lwpctl != NULL)
571 l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
572
573 retval = 1;
574 } else {
575 /* Nothing to do - just unlock and return. */
576 mutex_spin_exit(spc->spc_mutex);
577 lwp_unlock(l);
578 retval = 0;
579 }
580
581 KASSERT(l == curlwp);
582 KASSERT(l->l_stat == LSONPROC);
583 KASSERT(l->l_cpu == ci);
584
585 /*
586 * XXXSMP If we are using h/w performance counters, restore context.
587 */
588 #if PERFCTRS
589 if (PMC_ENABLED(l->l_proc)) {
590 pmc_restore_context(l->l_proc);
591 }
592 #endif
593 SYSCALL_TIME_WAKEUP(l);
594 LOCKDEBUG_BARRIER(NULL, 1);
595
596 return retval;
597 }
598
599 /*
600 * Change process state to be runnable, placing it on the run queue if it is
601 * in memory, and awakening the swapper if it isn't in memory.
602 *
603 * Call with the process and LWP locked. Will return with the LWP unlocked.
604 */
605 void
606 setrunnable(struct lwp *l)
607 {
608 struct proc *p = l->l_proc;
609 struct cpu_info *ci;
610 sigset_t *ss;
611
612 KASSERT((l->l_flag & LW_IDLE) == 0);
613 KASSERT(mutex_owned(&p->p_smutex));
614 KASSERT(lwp_locked(l, NULL));
615 KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
616
617 switch (l->l_stat) {
618 case LSSTOP:
619 /*
620 * If we're being traced (possibly because someone attached us
621 * while we were stopped), check for a signal from the debugger.
622 */
623 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
624 if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
625 ss = &l->l_sigpend.sp_set;
626 else
627 ss = &p->p_sigpend.sp_set;
628 sigaddset(ss, p->p_xstat);
629 signotify(l);
630 }
631 p->p_nrlwps++;
632 break;
633 case LSSUSPENDED:
634 l->l_flag &= ~LW_WSUSPEND;
635 p->p_nrlwps++;
636 cv_broadcast(&p->p_lwpcv);
637 break;
638 case LSSLEEP:
639 KASSERT(l->l_wchan != NULL);
640 break;
641 default:
642 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
643 }
644
645 /*
646 * If the LWP was sleeping interruptably, then it's OK to start it
647 * again. If not, mark it as still sleeping.
648 */
649 if (l->l_wchan != NULL) {
650 l->l_stat = LSSLEEP;
651 /* lwp_unsleep() will release the lock. */
652 lwp_unsleep(l);
653 return;
654 }
655
656 /*
657 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
658 * about to call mi_switch(), in which case it will yield.
659 */
660 if ((l->l_flag & LW_RUNNING) != 0) {
661 l->l_stat = LSONPROC;
662 l->l_slptime = 0;
663 lwp_unlock(l);
664 return;
665 }
666
667 /*
668 * Look for a CPU to run.
669 * Set the LWP runnable.
670 */
671 ci = sched_takecpu(l);
672 l->l_cpu = ci;
673 if (l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex) {
674 lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
675 lwp_lock(l);
676 }
677 sched_setrunnable(l);
678 l->l_stat = LSRUN;
679 l->l_slptime = 0;
680
681 /*
682 * If thread is swapped out - wake the swapper to bring it back in.
683 * Otherwise, enter it into a run queue.
684 */
685 if (l->l_flag & LW_INMEM) {
686 sched_enqueue(l, false);
687 resched_cpu(l);
688 lwp_unlock(l);
689 } else {
690 lwp_unlock(l);
691 uvm_kick_scheduler();
692 }
693 }
694
695 /*
696 * suspendsched:
697 *
698 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
699 */
700 void
701 suspendsched(void)
702 {
703 CPU_INFO_ITERATOR cii;
704 struct cpu_info *ci;
705 struct lwp *l;
706 struct proc *p;
707
708 /*
709 * We do this by process in order not to violate the locking rules.
710 */
711 mutex_enter(&proclist_lock);
712 PROCLIST_FOREACH(p, &allproc) {
713 mutex_enter(&p->p_smutex);
714
715 if ((p->p_flag & PK_SYSTEM) != 0) {
716 mutex_exit(&p->p_smutex);
717 continue;
718 }
719
720 p->p_stat = SSTOP;
721
722 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
723 if (l == curlwp)
724 continue;
725
726 lwp_lock(l);
727
728 /*
729 * Set L_WREBOOT so that the LWP will suspend itself
730 * when it tries to return to user mode. We want to
731 * try and get to get as many LWPs as possible to
732 * the user / kernel boundary, so that they will
733 * release any locks that they hold.
734 */
735 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
736
737 if (l->l_stat == LSSLEEP &&
738 (l->l_flag & LW_SINTR) != 0) {
739 /* setrunnable() will release the lock. */
740 setrunnable(l);
741 continue;
742 }
743
744 lwp_unlock(l);
745 }
746
747 mutex_exit(&p->p_smutex);
748 }
749 mutex_exit(&proclist_lock);
750
751 /*
752 * Kick all CPUs to make them preempt any LWPs running in user mode.
753 * They'll trap into the kernel and suspend themselves in userret().
754 */
755 for (CPU_INFO_FOREACH(cii, ci)) {
756 spc_lock(ci);
757 cpu_need_resched(ci, RESCHED_IMMED);
758 spc_unlock(ci);
759 }
760 }
761
762 /*
763 * sched_kpri:
764 *
765 * Scale a priority level to a kernel priority level, usually
766 * for an LWP that is about to sleep.
767 */
768 pri_t
769 sched_kpri(struct lwp *l)
770 {
771 pri_t pri;
772
773 #ifndef __HAVE_FAST_SOFTINTS
774 /*
775 * Hack: if a user thread is being used to run a soft
776 * interrupt, we need to boost the priority here.
777 */
778 if ((l->l_pflag & LP_INTR) != 0 && l->l_priority < PRI_KERNEL_RT)
779 return softint_kpri(l);
780 #endif
781
782 /*
783 * Scale user priorities (0 -> 63) up to kernel priorities
784 * in the range (64 -> 95). This makes assumptions about
785 * the priority space and so should be kept in sync with
786 * param.h.
787 */
788 if ((pri = l->l_priority) >= PRI_KERNEL)
789 return pri;
790 return (pri >> 1) + PRI_KERNEL;
791 }
792
793 /*
794 * sched_unsleep:
795 *
796 * The is called when the LWP has not been awoken normally but instead
797 * interrupted: for example, if the sleep timed out. Because of this,
798 * it's not a valid action for running or idle LWPs.
799 */
800 static void
801 sched_unsleep(struct lwp *l)
802 {
803
804 lwp_unlock(l);
805 panic("sched_unsleep");
806 }
807
808 void
809 resched_cpu(struct lwp *l)
810 {
811 struct cpu_info *ci;
812
813 /*
814 * XXXSMP
815 * Since l->l_cpu persists across a context switch,
816 * this gives us *very weak* processor affinity, in
817 * that we notify the CPU on which the process last
818 * ran that it should try to switch.
819 *
820 * This does not guarantee that the process will run on
821 * that processor next, because another processor might
822 * grab it the next time it performs a context switch.
823 *
824 * This also does not handle the case where its last
825 * CPU is running a higher-priority process, but every
826 * other CPU is running a lower-priority process. There
827 * are ways to handle this situation, but they're not
828 * currently very pretty, and we also need to weigh the
829 * cost of moving a process from one CPU to another.
830 */
831 ci = l->l_cpu;
832 if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority)
833 cpu_need_resched(ci, 0);
834 }
835
836 static void
837 sched_changepri(struct lwp *l, pri_t pri)
838 {
839
840 KASSERT(lwp_locked(l, NULL));
841
842 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
843 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
844 sched_dequeue(l);
845 l->l_priority = pri;
846 sched_enqueue(l, false);
847 } else {
848 l->l_priority = pri;
849 }
850 resched_cpu(l);
851 }
852
853 static void
854 sched_lendpri(struct lwp *l, pri_t pri)
855 {
856
857 KASSERT(lwp_locked(l, NULL));
858
859 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
860 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
861 sched_dequeue(l);
862 l->l_inheritedprio = pri;
863 sched_enqueue(l, false);
864 } else {
865 l->l_inheritedprio = pri;
866 }
867 resched_cpu(l);
868 }
869
870 struct lwp *
871 syncobj_noowner(wchan_t wchan)
872 {
873
874 return NULL;
875 }
876
877
878 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
879 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
880
881 /*
882 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
883 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
884 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
885 *
886 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
887 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
888 *
889 * If you dont want to bother with the faster/more-accurate formula, you
890 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
891 * (more general) method of calculating the %age of CPU used by a process.
892 */
893 #define CCPU_SHIFT (FSHIFT + 1)
894
895 /*
896 * sched_pstats:
897 *
898 * Update process statistics and check CPU resource allocation.
899 * Call scheduler-specific hook to eventually adjust process/LWP
900 * priorities.
901 */
902 /* ARGSUSED */
903 void
904 sched_pstats(void *arg)
905 {
906 struct rlimit *rlim;
907 struct lwp *l;
908 struct proc *p;
909 int sig, clkhz;
910 long runtm;
911
912 sched_pstats_ticks++;
913
914 mutex_enter(&proclist_mutex);
915 PROCLIST_FOREACH(p, &allproc) {
916 /*
917 * Increment time in/out of memory and sleep time (if
918 * sleeping). We ignore overflow; with 16-bit int's
919 * (remember them?) overflow takes 45 days.
920 */
921 mutex_enter(&p->p_smutex);
922 mutex_spin_enter(&p->p_stmutex);
923 runtm = p->p_rtime.tv_sec;
924 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
925 if ((l->l_flag & LW_IDLE) != 0)
926 continue;
927 lwp_lock(l);
928 runtm += l->l_rtime.tv_sec;
929 l->l_swtime++;
930 sched_pstats_hook(l);
931 lwp_unlock(l);
932
933 /*
934 * p_pctcpu is only for ps.
935 */
936 l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
937 if (l->l_slptime < 1) {
938 clkhz = stathz != 0 ? stathz : hz;
939 #if (FSHIFT >= CCPU_SHIFT)
940 l->l_pctcpu += (clkhz == 100) ?
941 ((fixpt_t)l->l_cpticks) <<
942 (FSHIFT - CCPU_SHIFT) :
943 100 * (((fixpt_t) p->p_cpticks)
944 << (FSHIFT - CCPU_SHIFT)) / clkhz;
945 #else
946 l->l_pctcpu += ((FSCALE - ccpu) *
947 (l->l_cpticks * FSCALE / clkhz)) >> FSHIFT;
948 #endif
949 l->l_cpticks = 0;
950 }
951 }
952 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
953 mutex_spin_exit(&p->p_stmutex);
954
955 /*
956 * Check if the process exceeds its CPU resource allocation.
957 * If over max, kill it.
958 */
959 rlim = &p->p_rlimit[RLIMIT_CPU];
960 sig = 0;
961 if (runtm >= rlim->rlim_cur) {
962 if (runtm >= rlim->rlim_max)
963 sig = SIGKILL;
964 else {
965 sig = SIGXCPU;
966 if (rlim->rlim_cur < rlim->rlim_max)
967 rlim->rlim_cur += 5;
968 }
969 }
970 mutex_exit(&p->p_smutex);
971 if (sig) {
972 psignal(p, sig);
973 }
974 }
975 mutex_exit(&proclist_mutex);
976 uvm_meter();
977 cv_wakeup(&lbolt);
978 callout_schedule(&sched_pstats_ch, hz);
979 }
980
981 void
982 sched_init(void)
983 {
984
985 cv_init(&lbolt, "lbolt");
986 callout_init(&sched_pstats_ch, 0);
987 callout_setfunc(&sched_pstats_ch, sched_pstats, NULL);
988 sched_setup();
989 sched_pstats(NULL);
990 }
991