kern_synch.c revision 1.301 1 /* $NetBSD: kern_synch.c,v 1.301 2012/04/21 22:38:25 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009
5 * The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
11 * Daniel Sieger.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 /*-
36 * Copyright (c) 1982, 1986, 1990, 1991, 1993
37 * The Regents of the University of California. All rights reserved.
38 * (c) UNIX System Laboratories, Inc.
39 * All or some portions of this file are derived from material licensed
40 * to the University of California by American Telephone and Telegraph
41 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
42 * the permission of UNIX System Laboratories, Inc.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
67 *
68 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
69 */
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.301 2012/04/21 22:38:25 rmind Exp $");
73
74 #include "opt_kstack.h"
75 #include "opt_perfctrs.h"
76 #include "opt_dtrace.h"
77
78 #define __MUTEX_PRIVATE
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc.h>
83 #include <sys/kernel.h>
84 #if defined(PERFCTRS)
85 #include <sys/pmc.h>
86 #endif
87 #include <sys/cpu.h>
88 #include <sys/pserialize.h>
89 #include <sys/resourcevar.h>
90 #include <sys/sched.h>
91 #include <sys/syscall_stats.h>
92 #include <sys/sleepq.h>
93 #include <sys/lockdebug.h>
94 #include <sys/evcnt.h>
95 #include <sys/intr.h>
96 #include <sys/lwpctl.h>
97 #include <sys/atomic.h>
98 #include <sys/simplelock.h>
99 #include <sys/syslog.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <dev/lockstat.h>
104
105 #include <sys/dtrace_bsd.h>
106 int dtrace_vtime_active=0;
107 dtrace_vtime_switch_func_t dtrace_vtime_switch_func;
108
109 static void sched_unsleep(struct lwp *, bool);
110 static void sched_changepri(struct lwp *, pri_t);
111 static void sched_lendpri(struct lwp *, pri_t);
112 static void resched_cpu(struct lwp *);
113
114 syncobj_t sleep_syncobj = {
115 SOBJ_SLEEPQ_SORTED,
116 sleepq_unsleep,
117 sleepq_changepri,
118 sleepq_lendpri,
119 syncobj_noowner,
120 };
121
122 syncobj_t sched_syncobj = {
123 SOBJ_SLEEPQ_SORTED,
124 sched_unsleep,
125 sched_changepri,
126 sched_lendpri,
127 syncobj_noowner,
128 };
129
130 /* "Lightning bolt": once a second sleep address. */
131 kcondvar_t lbolt __cacheline_aligned;
132
133 u_int sched_pstats_ticks __cacheline_aligned;
134
135 /* Preemption event counters. */
136 static struct evcnt kpreempt_ev_crit __cacheline_aligned;
137 static struct evcnt kpreempt_ev_klock __cacheline_aligned;
138 static struct evcnt kpreempt_ev_immed __cacheline_aligned;
139
140 /*
141 * During autoconfiguration or after a panic, a sleep will simply lower the
142 * priority briefly to allow interrupts, then return. The priority to be
143 * used (safepri) is machine-dependent, thus this value is initialized and
144 * maintained in the machine-dependent layers. This priority will typically
145 * be 0, or the lowest priority that is safe for use on the interrupt stack;
146 * it can be made higher to block network software interrupts after panics.
147 */
148 #ifdef IPL_SAFEPRI
149 int safepri = IPL_SAFEPRI;
150 #else
151 int safepri;
152 #endif
153
154 void
155 synch_init(void)
156 {
157
158 cv_init(&lbolt, "lbolt");
159
160 evcnt_attach_dynamic(&kpreempt_ev_crit, EVCNT_TYPE_MISC, NULL,
161 "kpreempt", "defer: critical section");
162 evcnt_attach_dynamic(&kpreempt_ev_klock, EVCNT_TYPE_MISC, NULL,
163 "kpreempt", "defer: kernel_lock");
164 evcnt_attach_dynamic(&kpreempt_ev_immed, EVCNT_TYPE_MISC, NULL,
165 "kpreempt", "immediate");
166 }
167
168 /*
169 * OBSOLETE INTERFACE
170 *
171 * General sleep call. Suspends the current LWP until a wakeup is
172 * performed on the specified identifier. The LWP will then be made
173 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
174 * means no timeout). If pri includes PCATCH flag, signals are checked
175 * before and after sleeping, else signals are not checked. Returns 0 if
176 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
177 * signal needs to be delivered, ERESTART is returned if the current system
178 * call should be restarted if possible, and EINTR is returned if the system
179 * call should be interrupted by the signal (return EINTR).
180 */
181 int
182 tsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo)
183 {
184 struct lwp *l = curlwp;
185 sleepq_t *sq;
186 kmutex_t *mp;
187
188 KASSERT((l->l_pflag & LP_INTR) == 0);
189 KASSERT(ident != &lbolt);
190
191 if (sleepq_dontsleep(l)) {
192 (void)sleepq_abort(NULL, 0);
193 return 0;
194 }
195
196 l->l_kpriority = true;
197 sq = sleeptab_lookup(&sleeptab, ident, &mp);
198 sleepq_enter(sq, l, mp);
199 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
200 return sleepq_block(timo, priority & PCATCH);
201 }
202
203 int
204 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
205 kmutex_t *mtx)
206 {
207 struct lwp *l = curlwp;
208 sleepq_t *sq;
209 kmutex_t *mp;
210 int error;
211
212 KASSERT((l->l_pflag & LP_INTR) == 0);
213 KASSERT(ident != &lbolt);
214
215 if (sleepq_dontsleep(l)) {
216 (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
217 return 0;
218 }
219
220 l->l_kpriority = true;
221 sq = sleeptab_lookup(&sleeptab, ident, &mp);
222 sleepq_enter(sq, l, mp);
223 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
224 mutex_exit(mtx);
225 error = sleepq_block(timo, priority & PCATCH);
226
227 if ((priority & PNORELOCK) == 0)
228 mutex_enter(mtx);
229
230 return error;
231 }
232
233 /*
234 * General sleep call for situations where a wake-up is not expected.
235 */
236 int
237 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
238 {
239 struct lwp *l = curlwp;
240 kmutex_t *mp;
241 sleepq_t *sq;
242 int error;
243
244 KASSERT(!(timo == 0 && intr == false));
245
246 if (sleepq_dontsleep(l))
247 return sleepq_abort(NULL, 0);
248
249 if (mtx != NULL)
250 mutex_exit(mtx);
251 l->l_kpriority = true;
252 sq = sleeptab_lookup(&sleeptab, l, &mp);
253 sleepq_enter(sq, l, mp);
254 sleepq_enqueue(sq, l, wmesg, &sleep_syncobj);
255 error = sleepq_block(timo, intr);
256 if (mtx != NULL)
257 mutex_enter(mtx);
258
259 return error;
260 }
261
262 /*
263 * OBSOLETE INTERFACE
264 *
265 * Make all LWPs sleeping on the specified identifier runnable.
266 */
267 void
268 wakeup(wchan_t ident)
269 {
270 sleepq_t *sq;
271 kmutex_t *mp;
272
273 if (__predict_false(cold))
274 return;
275
276 sq = sleeptab_lookup(&sleeptab, ident, &mp);
277 sleepq_wake(sq, ident, (u_int)-1, mp);
278 }
279
280 /*
281 * General yield call. Puts the current LWP back on its run queue and
282 * performs a voluntary context switch. Should only be called when the
283 * current LWP explicitly requests it (eg sched_yield(2)).
284 */
285 void
286 yield(void)
287 {
288 struct lwp *l = curlwp;
289
290 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
291 lwp_lock(l);
292 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
293 KASSERT(l->l_stat == LSONPROC);
294 l->l_kpriority = false;
295 (void)mi_switch(l);
296 KERNEL_LOCK(l->l_biglocks, l);
297 }
298
299 /*
300 * General preemption call. Puts the current LWP back on its run queue
301 * and performs an involuntary context switch.
302 */
303 void
304 preempt(void)
305 {
306 struct lwp *l = curlwp;
307
308 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
309 lwp_lock(l);
310 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
311 KASSERT(l->l_stat == LSONPROC);
312 l->l_kpriority = false;
313 l->l_nivcsw++;
314 (void)mi_switch(l);
315 KERNEL_LOCK(l->l_biglocks, l);
316 }
317
318 /*
319 * Handle a request made by another agent to preempt the current LWP
320 * in-kernel. Usually called when l_dopreempt may be non-zero.
321 *
322 * Character addresses for lockstat only.
323 */
324 static char in_critical_section;
325 static char kernel_lock_held;
326 static char is_softint;
327 static char cpu_kpreempt_enter_fail;
328
329 bool
330 kpreempt(uintptr_t where)
331 {
332 uintptr_t failed;
333 lwp_t *l;
334 int s, dop, lsflag;
335
336 l = curlwp;
337 failed = 0;
338 while ((dop = l->l_dopreempt) != 0) {
339 if (l->l_stat != LSONPROC) {
340 /*
341 * About to block (or die), let it happen.
342 * Doesn't really count as "preemption has
343 * been blocked", since we're going to
344 * context switch.
345 */
346 l->l_dopreempt = 0;
347 return true;
348 }
349 if (__predict_false((l->l_flag & LW_IDLE) != 0)) {
350 /* Can't preempt idle loop, don't count as failure. */
351 l->l_dopreempt = 0;
352 return true;
353 }
354 if (__predict_false(l->l_nopreempt != 0)) {
355 /* LWP holds preemption disabled, explicitly. */
356 if ((dop & DOPREEMPT_COUNTED) == 0) {
357 kpreempt_ev_crit.ev_count++;
358 }
359 failed = (uintptr_t)&in_critical_section;
360 break;
361 }
362 if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
363 /* Can't preempt soft interrupts yet. */
364 l->l_dopreempt = 0;
365 failed = (uintptr_t)&is_softint;
366 break;
367 }
368 s = splsched();
369 if (__predict_false(l->l_blcnt != 0 ||
370 curcpu()->ci_biglock_wanted != NULL)) {
371 /* Hold or want kernel_lock, code is not MT safe. */
372 splx(s);
373 if ((dop & DOPREEMPT_COUNTED) == 0) {
374 kpreempt_ev_klock.ev_count++;
375 }
376 failed = (uintptr_t)&kernel_lock_held;
377 break;
378 }
379 if (__predict_false(!cpu_kpreempt_enter(where, s))) {
380 /*
381 * It may be that the IPL is too high.
382 * kpreempt_enter() can schedule an
383 * interrupt to retry later.
384 */
385 splx(s);
386 failed = (uintptr_t)&cpu_kpreempt_enter_fail;
387 break;
388 }
389 /* Do it! */
390 if (__predict_true((dop & DOPREEMPT_COUNTED) == 0)) {
391 kpreempt_ev_immed.ev_count++;
392 }
393 lwp_lock(l);
394 mi_switch(l);
395 l->l_nopreempt++;
396 splx(s);
397
398 /* Take care of any MD cleanup. */
399 cpu_kpreempt_exit(where);
400 l->l_nopreempt--;
401 }
402
403 if (__predict_true(!failed)) {
404 return false;
405 }
406
407 /* Record preemption failure for reporting via lockstat. */
408 atomic_or_uint(&l->l_dopreempt, DOPREEMPT_COUNTED);
409 lsflag = 0;
410 LOCKSTAT_ENTER(lsflag);
411 if (__predict_false(lsflag)) {
412 if (where == 0) {
413 where = (uintptr_t)__builtin_return_address(0);
414 }
415 /* Preemption is on, might recurse, so make it atomic. */
416 if (atomic_cas_ptr_ni((void *)&l->l_pfailaddr, NULL,
417 (void *)where) == NULL) {
418 LOCKSTAT_START_TIMER(lsflag, l->l_pfailtime);
419 l->l_pfaillock = failed;
420 }
421 }
422 LOCKSTAT_EXIT(lsflag);
423 return true;
424 }
425
426 /*
427 * Return true if preemption is explicitly disabled.
428 */
429 bool
430 kpreempt_disabled(void)
431 {
432 const lwp_t *l = curlwp;
433
434 return l->l_nopreempt != 0 || l->l_stat == LSZOMB ||
435 (l->l_flag & LW_IDLE) != 0 || cpu_kpreempt_disabled();
436 }
437
438 /*
439 * Disable kernel preemption.
440 */
441 void
442 kpreempt_disable(void)
443 {
444
445 KPREEMPT_DISABLE(curlwp);
446 }
447
448 /*
449 * Reenable kernel preemption.
450 */
451 void
452 kpreempt_enable(void)
453 {
454
455 KPREEMPT_ENABLE(curlwp);
456 }
457
458 /*
459 * Compute the amount of time during which the current lwp was running.
460 *
461 * - update l_rtime unless it's an idle lwp.
462 */
463
464 void
465 updatertime(lwp_t *l, const struct bintime *now)
466 {
467
468 if (__predict_false(l->l_flag & LW_IDLE))
469 return;
470
471 /* rtime += now - stime */
472 bintime_add(&l->l_rtime, now);
473 bintime_sub(&l->l_rtime, &l->l_stime);
474 }
475
476 /*
477 * Select next LWP from the current CPU to run..
478 */
479 static inline lwp_t *
480 nextlwp(struct cpu_info *ci, struct schedstate_percpu *spc)
481 {
482 lwp_t *newl;
483
484 /*
485 * Let sched_nextlwp() select the LWP to run the CPU next.
486 * If no LWP is runnable, select the idle LWP.
487 *
488 * Note that spc_lwplock might not necessary be held, and
489 * new thread would be unlocked after setting the LWP-lock.
490 */
491 newl = sched_nextlwp();
492 if (newl != NULL) {
493 sched_dequeue(newl);
494 KASSERT(lwp_locked(newl, spc->spc_mutex));
495 KASSERT(newl->l_cpu == ci);
496 newl->l_stat = LSONPROC;
497 newl->l_pflag |= LP_RUNNING;
498 lwp_setlock(newl, spc->spc_lwplock);
499 } else {
500 newl = ci->ci_data.cpu_idlelwp;
501 newl->l_stat = LSONPROC;
502 newl->l_pflag |= LP_RUNNING;
503 }
504
505 /*
506 * Only clear want_resched if there are no pending (slow)
507 * software interrupts.
508 */
509 ci->ci_want_resched = ci->ci_data.cpu_softints;
510 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
511 spc->spc_curpriority = lwp_eprio(newl);
512
513 return newl;
514 }
515
516 /*
517 * The machine independent parts of context switch.
518 *
519 * Returns 1 if another LWP was actually run.
520 */
521 int
522 mi_switch(lwp_t *l)
523 {
524 struct cpu_info *ci;
525 struct schedstate_percpu *spc;
526 struct lwp *newl;
527 int retval, oldspl;
528 struct bintime bt;
529 bool returning;
530
531 KASSERT(lwp_locked(l, NULL));
532 KASSERT(kpreempt_disabled());
533 LOCKDEBUG_BARRIER(l->l_mutex, 1);
534
535 kstack_check_magic(l);
536
537 binuptime(&bt);
538
539 KASSERT((l->l_pflag & LP_RUNNING) != 0);
540 KASSERT(l->l_cpu == curcpu());
541 ci = l->l_cpu;
542 spc = &ci->ci_schedstate;
543 returning = false;
544 newl = NULL;
545
546 /*
547 * If we have been asked to switch to a specific LWP, then there
548 * is no need to inspect the run queues. If a soft interrupt is
549 * blocking, then return to the interrupted thread without adjusting
550 * VM context or its start time: neither have been changed in order
551 * to take the interrupt.
552 */
553 if (l->l_switchto != NULL) {
554 if ((l->l_pflag & LP_INTR) != 0) {
555 returning = true;
556 softint_block(l);
557 if ((l->l_pflag & LP_TIMEINTR) != 0)
558 updatertime(l, &bt);
559 }
560 newl = l->l_switchto;
561 l->l_switchto = NULL;
562 }
563 #ifndef __HAVE_FAST_SOFTINTS
564 else if (ci->ci_data.cpu_softints != 0) {
565 /* There are pending soft interrupts, so pick one. */
566 newl = softint_picklwp();
567 newl->l_stat = LSONPROC;
568 newl->l_pflag |= LP_RUNNING;
569 }
570 #endif /* !__HAVE_FAST_SOFTINTS */
571
572 /* Count time spent in current system call */
573 if (!returning) {
574 SYSCALL_TIME_SLEEP(l);
575
576 /*
577 * XXXSMP If we are using h/w performance counters,
578 * save context.
579 */
580 #if PERFCTRS
581 if (PMC_ENABLED(l->l_proc)) {
582 pmc_save_context(l->l_proc);
583 }
584 #endif
585 updatertime(l, &bt);
586 }
587
588 /* Lock the runqueue */
589 KASSERT(l->l_stat != LSRUN);
590 mutex_spin_enter(spc->spc_mutex);
591
592 /*
593 * If on the CPU and we have gotten this far, then we must yield.
594 */
595 if (l->l_stat == LSONPROC && l != newl) {
596 KASSERT(lwp_locked(l, spc->spc_lwplock));
597 if ((l->l_flag & LW_IDLE) == 0) {
598 l->l_stat = LSRUN;
599 lwp_setlock(l, spc->spc_mutex);
600 sched_enqueue(l, true);
601 /*
602 * Handle migration. Note that "migrating LWP" may
603 * be reset here, if interrupt/preemption happens
604 * early in idle LWP.
605 */
606 if (l->l_target_cpu != NULL) {
607 KASSERT((l->l_pflag & LP_INTR) == 0);
608 spc->spc_migrating = l;
609 }
610 } else
611 l->l_stat = LSIDL;
612 }
613
614 /* Pick new LWP to run. */
615 if (newl == NULL) {
616 newl = nextlwp(ci, spc);
617 }
618
619 /* Items that must be updated with the CPU locked. */
620 if (!returning) {
621 /* Update the new LWP's start time. */
622 newl->l_stime = bt;
623
624 /*
625 * ci_curlwp changes when a fast soft interrupt occurs.
626 * We use cpu_onproc to keep track of which kernel or
627 * user thread is running 'underneath' the software
628 * interrupt. This is important for time accounting,
629 * itimers and forcing user threads to preempt (aston).
630 */
631 ci->ci_data.cpu_onproc = newl;
632 }
633
634 /*
635 * Preemption related tasks. Must be done with the current
636 * CPU locked.
637 */
638 cpu_did_resched(l);
639 l->l_dopreempt = 0;
640 if (__predict_false(l->l_pfailaddr != 0)) {
641 LOCKSTAT_FLAG(lsflag);
642 LOCKSTAT_ENTER(lsflag);
643 LOCKSTAT_STOP_TIMER(lsflag, l->l_pfailtime);
644 LOCKSTAT_EVENT_RA(lsflag, l->l_pfaillock, LB_NOPREEMPT|LB_SPIN,
645 1, l->l_pfailtime, l->l_pfailaddr);
646 LOCKSTAT_EXIT(lsflag);
647 l->l_pfailtime = 0;
648 l->l_pfaillock = 0;
649 l->l_pfailaddr = 0;
650 }
651
652 if (l != newl) {
653 struct lwp *prevlwp;
654
655 /* Release all locks, but leave the current LWP locked */
656 if (l->l_mutex == spc->spc_mutex) {
657 /*
658 * Drop spc_lwplock, if the current LWP has been moved
659 * to the run queue (it is now locked by spc_mutex).
660 */
661 mutex_spin_exit(spc->spc_lwplock);
662 } else {
663 /*
664 * Otherwise, drop the spc_mutex, we are done with the
665 * run queues.
666 */
667 mutex_spin_exit(spc->spc_mutex);
668 }
669
670 /*
671 * Mark that context switch is going to be performed
672 * for this LWP, to protect it from being switched
673 * to on another CPU.
674 */
675 KASSERT(l->l_ctxswtch == 0);
676 l->l_ctxswtch = 1;
677 l->l_ncsw++;
678 KASSERT((l->l_pflag & LP_RUNNING) != 0);
679 l->l_pflag &= ~LP_RUNNING;
680
681 /*
682 * Increase the count of spin-mutexes before the release
683 * of the last lock - we must remain at IPL_SCHED during
684 * the context switch.
685 */
686 KASSERTMSG(ci->ci_mtx_count == -1,
687 "%s: cpu%u: ci_mtx_count (%d) != -1 "
688 "(block with spin-mutex held)",
689 __func__, cpu_index(ci), ci->ci_mtx_count);
690 oldspl = MUTEX_SPIN_OLDSPL(ci);
691 ci->ci_mtx_count--;
692 lwp_unlock(l);
693
694 /* Count the context switch on this CPU. */
695 ci->ci_data.cpu_nswtch++;
696
697 /* Update status for lwpctl, if present. */
698 if (l->l_lwpctl != NULL)
699 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE;
700
701 /*
702 * Save old VM context, unless a soft interrupt
703 * handler is blocking.
704 */
705 if (!returning)
706 pmap_deactivate(l);
707
708 /*
709 * We may need to spin-wait if 'newl' is still
710 * context switching on another CPU.
711 */
712 if (__predict_false(newl->l_ctxswtch != 0)) {
713 u_int count;
714 count = SPINLOCK_BACKOFF_MIN;
715 while (newl->l_ctxswtch)
716 SPINLOCK_BACKOFF(count);
717 }
718
719 /*
720 * If DTrace has set the active vtime enum to anything
721 * other than INACTIVE (0), then it should have set the
722 * function to call.
723 */
724 if (__predict_false(dtrace_vtime_active)) {
725 (*dtrace_vtime_switch_func)(newl);
726 }
727
728 /* Switch to the new LWP.. */
729 prevlwp = cpu_switchto(l, newl, returning);
730 ci = curcpu();
731
732 /*
733 * Switched away - we have new curlwp.
734 * Restore VM context and IPL.
735 */
736 pmap_activate(l);
737 uvm_emap_switch(l);
738 pcu_switchpoint(l);
739
740 if (prevlwp != NULL) {
741 /* Normalize the count of the spin-mutexes */
742 ci->ci_mtx_count++;
743 /* Unmark the state of context switch */
744 membar_exit();
745 prevlwp->l_ctxswtch = 0;
746 }
747
748 /* Update status for lwpctl, if present. */
749 if (l->l_lwpctl != NULL) {
750 l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
751 l->l_lwpctl->lc_pctr++;
752 }
753
754 /* Note trip through cpu_switchto(). */
755 pserialize_switchpoint();
756
757 KASSERT(l->l_cpu == ci);
758 splx(oldspl);
759 /*
760 * note that, unless the caller disabled preemption,
761 * we can be preempted at any time after the above splx() call.
762 */
763 retval = 1;
764 } else {
765 /* Nothing to do - just unlock and return. */
766 mutex_spin_exit(spc->spc_mutex);
767 lwp_unlock(l);
768 retval = 0;
769 }
770
771 KASSERT(l == curlwp);
772 KASSERT(l->l_stat == LSONPROC);
773
774 /*
775 * XXXSMP If we are using h/w performance counters, restore context.
776 * XXXSMP preemption problem.
777 */
778 #if PERFCTRS
779 if (PMC_ENABLED(l->l_proc)) {
780 pmc_restore_context(l->l_proc);
781 }
782 #endif
783 SYSCALL_TIME_WAKEUP(l);
784 LOCKDEBUG_BARRIER(NULL, 1);
785
786 return retval;
787 }
788
789 /*
790 * The machine independent parts of context switch to oblivion.
791 * Does not return. Call with the LWP unlocked.
792 */
793 void
794 lwp_exit_switchaway(lwp_t *l)
795 {
796 struct cpu_info *ci;
797 struct lwp *newl;
798 struct bintime bt;
799
800 ci = l->l_cpu;
801
802 KASSERT(kpreempt_disabled());
803 KASSERT(l->l_stat == LSZOMB || l->l_stat == LSIDL);
804 KASSERT(ci == curcpu());
805 LOCKDEBUG_BARRIER(NULL, 0);
806
807 kstack_check_magic(l);
808
809 /* Count time spent in current system call */
810 SYSCALL_TIME_SLEEP(l);
811 binuptime(&bt);
812 updatertime(l, &bt);
813
814 /* Must stay at IPL_SCHED even after releasing run queue lock. */
815 (void)splsched();
816
817 /*
818 * Let sched_nextlwp() select the LWP to run the CPU next.
819 * If no LWP is runnable, select the idle LWP.
820 *
821 * Note that spc_lwplock might not necessary be held, and
822 * new thread would be unlocked after setting the LWP-lock.
823 */
824 spc_lock(ci);
825 #ifndef __HAVE_FAST_SOFTINTS
826 if (ci->ci_data.cpu_softints != 0) {
827 /* There are pending soft interrupts, so pick one. */
828 newl = softint_picklwp();
829 newl->l_stat = LSONPROC;
830 newl->l_pflag |= LP_RUNNING;
831 } else
832 #endif /* !__HAVE_FAST_SOFTINTS */
833 {
834 newl = nextlwp(ci, &ci->ci_schedstate);
835 }
836
837 /* Update the new LWP's start time. */
838 newl->l_stime = bt;
839 l->l_pflag &= ~LP_RUNNING;
840
841 /*
842 * ci_curlwp changes when a fast soft interrupt occurs.
843 * We use cpu_onproc to keep track of which kernel or
844 * user thread is running 'underneath' the software
845 * interrupt. This is important for time accounting,
846 * itimers and forcing user threads to preempt (aston).
847 */
848 ci->ci_data.cpu_onproc = newl;
849
850 /*
851 * Preemption related tasks. Must be done with the current
852 * CPU locked.
853 */
854 cpu_did_resched(l);
855
856 /* Unlock the run queue. */
857 spc_unlock(ci);
858
859 /* Count the context switch on this CPU. */
860 ci->ci_data.cpu_nswtch++;
861
862 /* Update status for lwpctl, if present. */
863 if (l->l_lwpctl != NULL)
864 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
865
866 /*
867 * We may need to spin-wait if 'newl' is still
868 * context switching on another CPU.
869 */
870 if (__predict_false(newl->l_ctxswtch != 0)) {
871 u_int count;
872 count = SPINLOCK_BACKOFF_MIN;
873 while (newl->l_ctxswtch)
874 SPINLOCK_BACKOFF(count);
875 }
876
877 /*
878 * If DTrace has set the active vtime enum to anything
879 * other than INACTIVE (0), then it should have set the
880 * function to call.
881 */
882 if (__predict_false(dtrace_vtime_active)) {
883 (*dtrace_vtime_switch_func)(newl);
884 }
885
886 /* Switch to the new LWP.. */
887 (void)cpu_switchto(NULL, newl, false);
888
889 for (;;) continue; /* XXX: convince gcc about "noreturn" */
890 /* NOTREACHED */
891 }
892
893 /*
894 * setrunnable: change LWP state to be runnable, placing it on the run queue.
895 *
896 * Call with the process and LWP locked. Will return with the LWP unlocked.
897 */
898 void
899 setrunnable(struct lwp *l)
900 {
901 struct proc *p = l->l_proc;
902 struct cpu_info *ci;
903
904 KASSERT((l->l_flag & LW_IDLE) == 0);
905 KASSERT(mutex_owned(p->p_lock));
906 KASSERT(lwp_locked(l, NULL));
907 KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
908
909 switch (l->l_stat) {
910 case LSSTOP:
911 /*
912 * If we're being traced (possibly because someone attached us
913 * while we were stopped), check for a signal from the debugger.
914 */
915 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0)
916 signotify(l);
917 p->p_nrlwps++;
918 break;
919 case LSSUSPENDED:
920 l->l_flag &= ~LW_WSUSPEND;
921 p->p_nrlwps++;
922 cv_broadcast(&p->p_lwpcv);
923 break;
924 case LSSLEEP:
925 KASSERT(l->l_wchan != NULL);
926 break;
927 default:
928 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
929 }
930
931 /*
932 * If the LWP was sleeping, start it again.
933 */
934 if (l->l_wchan != NULL) {
935 l->l_stat = LSSLEEP;
936 /* lwp_unsleep() will release the lock. */
937 lwp_unsleep(l, true);
938 return;
939 }
940
941 /*
942 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
943 * about to call mi_switch(), in which case it will yield.
944 */
945 if ((l->l_pflag & LP_RUNNING) != 0) {
946 l->l_stat = LSONPROC;
947 l->l_slptime = 0;
948 lwp_unlock(l);
949 return;
950 }
951
952 /*
953 * Look for a CPU to run.
954 * Set the LWP runnable.
955 */
956 ci = sched_takecpu(l);
957 l->l_cpu = ci;
958 spc_lock(ci);
959 lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
960 sched_setrunnable(l);
961 l->l_stat = LSRUN;
962 l->l_slptime = 0;
963
964 sched_enqueue(l, false);
965 resched_cpu(l);
966 lwp_unlock(l);
967 }
968
969 /*
970 * suspendsched:
971 *
972 * Convert all non-LW_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
973 */
974 void
975 suspendsched(void)
976 {
977 CPU_INFO_ITERATOR cii;
978 struct cpu_info *ci;
979 struct lwp *l;
980 struct proc *p;
981
982 /*
983 * We do this by process in order not to violate the locking rules.
984 */
985 mutex_enter(proc_lock);
986 PROCLIST_FOREACH(p, &allproc) {
987 mutex_enter(p->p_lock);
988 if ((p->p_flag & PK_SYSTEM) != 0) {
989 mutex_exit(p->p_lock);
990 continue;
991 }
992
993 p->p_stat = SSTOP;
994
995 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
996 if (l == curlwp)
997 continue;
998
999 lwp_lock(l);
1000
1001 /*
1002 * Set L_WREBOOT so that the LWP will suspend itself
1003 * when it tries to return to user mode. We want to
1004 * try and get to get as many LWPs as possible to
1005 * the user / kernel boundary, so that they will
1006 * release any locks that they hold.
1007 */
1008 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
1009
1010 if (l->l_stat == LSSLEEP &&
1011 (l->l_flag & LW_SINTR) != 0) {
1012 /* setrunnable() will release the lock. */
1013 setrunnable(l);
1014 continue;
1015 }
1016
1017 lwp_unlock(l);
1018 }
1019
1020 mutex_exit(p->p_lock);
1021 }
1022 mutex_exit(proc_lock);
1023
1024 /*
1025 * Kick all CPUs to make them preempt any LWPs running in user mode.
1026 * They'll trap into the kernel and suspend themselves in userret().
1027 */
1028 for (CPU_INFO_FOREACH(cii, ci)) {
1029 spc_lock(ci);
1030 cpu_need_resched(ci, RESCHED_IMMED);
1031 spc_unlock(ci);
1032 }
1033 }
1034
1035 /*
1036 * sched_unsleep:
1037 *
1038 * The is called when the LWP has not been awoken normally but instead
1039 * interrupted: for example, if the sleep timed out. Because of this,
1040 * it's not a valid action for running or idle LWPs.
1041 */
1042 static void
1043 sched_unsleep(struct lwp *l, bool cleanup)
1044 {
1045
1046 lwp_unlock(l);
1047 panic("sched_unsleep");
1048 }
1049
1050 static void
1051 resched_cpu(struct lwp *l)
1052 {
1053 struct cpu_info *ci = l->l_cpu;
1054
1055 KASSERT(lwp_locked(l, NULL));
1056 if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority)
1057 cpu_need_resched(ci, 0);
1058 }
1059
1060 static void
1061 sched_changepri(struct lwp *l, pri_t pri)
1062 {
1063
1064 KASSERT(lwp_locked(l, NULL));
1065
1066 if (l->l_stat == LSRUN) {
1067 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
1068 sched_dequeue(l);
1069 l->l_priority = pri;
1070 sched_enqueue(l, false);
1071 } else {
1072 l->l_priority = pri;
1073 }
1074 resched_cpu(l);
1075 }
1076
1077 static void
1078 sched_lendpri(struct lwp *l, pri_t pri)
1079 {
1080
1081 KASSERT(lwp_locked(l, NULL));
1082
1083 if (l->l_stat == LSRUN) {
1084 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
1085 sched_dequeue(l);
1086 l->l_inheritedprio = pri;
1087 sched_enqueue(l, false);
1088 } else {
1089 l->l_inheritedprio = pri;
1090 }
1091 resched_cpu(l);
1092 }
1093
1094 struct lwp *
1095 syncobj_noowner(wchan_t wchan)
1096 {
1097
1098 return NULL;
1099 }
1100
1101 /* Decay 95% of proc::p_pctcpu in 60 seconds, ccpu = exp(-1/20) */
1102 const fixpt_t ccpu = 0.95122942450071400909 * FSCALE;
1103
1104 /*
1105 * Constants for averages over 1, 5 and 15 minutes when sampling at
1106 * 5 second intervals.
1107 */
1108 static const fixpt_t cexp[ ] = {
1109 0.9200444146293232 * FSCALE, /* exp(-1/12) */
1110 0.9834714538216174 * FSCALE, /* exp(-1/60) */
1111 0.9944598480048967 * FSCALE, /* exp(-1/180) */
1112 };
1113
1114 /*
1115 * sched_pstats:
1116 *
1117 * => Update process statistics and check CPU resource allocation.
1118 * => Call scheduler-specific hook to eventually adjust LWP priorities.
1119 * => Compute load average of a quantity on 1, 5 and 15 minute intervals.
1120 */
1121 void
1122 sched_pstats(void)
1123 {
1124 extern struct loadavg averunnable;
1125 struct loadavg *avg = &averunnable;
1126 const int clkhz = (stathz != 0 ? stathz : hz);
1127 static bool backwards = false;
1128 static u_int lavg_count = 0;
1129 struct proc *p;
1130 int nrun;
1131
1132 sched_pstats_ticks++;
1133 if (++lavg_count >= 5) {
1134 lavg_count = 0;
1135 nrun = 0;
1136 }
1137 mutex_enter(proc_lock);
1138 PROCLIST_FOREACH(p, &allproc) {
1139 struct lwp *l;
1140 struct rlimit *rlim;
1141 time_t runtm;
1142 int sig;
1143
1144 /* Increment sleep time (if sleeping), ignore overflow. */
1145 mutex_enter(p->p_lock);
1146 runtm = p->p_rtime.sec;
1147 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1148 fixpt_t lpctcpu;
1149 u_int lcpticks;
1150
1151 if (__predict_false((l->l_flag & LW_IDLE) != 0))
1152 continue;
1153 lwp_lock(l);
1154 runtm += l->l_rtime.sec;
1155 l->l_swtime++;
1156 sched_lwp_stats(l);
1157
1158 /* For load average calculation. */
1159 if (__predict_false(lavg_count == 0) &&
1160 (l->l_flag & (LW_SINTR | LW_SYSTEM)) == 0) {
1161 switch (l->l_stat) {
1162 case LSSLEEP:
1163 if (l->l_slptime > 1) {
1164 break;
1165 }
1166 case LSRUN:
1167 case LSONPROC:
1168 case LSIDL:
1169 nrun++;
1170 }
1171 }
1172 lwp_unlock(l);
1173
1174 l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
1175 if (l->l_slptime != 0)
1176 continue;
1177
1178 lpctcpu = l->l_pctcpu;
1179 lcpticks = atomic_swap_uint(&l->l_cpticks, 0);
1180 lpctcpu += ((FSCALE - ccpu) *
1181 (lcpticks * FSCALE / clkhz)) >> FSHIFT;
1182 l->l_pctcpu = lpctcpu;
1183 }
1184 /* Calculating p_pctcpu only for ps(1) */
1185 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
1186
1187 /*
1188 * Check if the process exceeds its CPU resource allocation.
1189 * If over the hard limit, kill it with SIGKILL.
1190 * If over the soft limit, send SIGXCPU and raise
1191 * the soft limit a little.
1192 */
1193 rlim = &p->p_rlimit[RLIMIT_CPU];
1194 sig = 0;
1195 if (__predict_false(runtm >= rlim->rlim_cur)) {
1196 if (runtm >= rlim->rlim_max) {
1197 sig = SIGKILL;
1198 log(LOG_NOTICE, "pid %d is killed: %s\n",
1199 p->p_pid, "exceeded RLIMIT_CPU");
1200 uprintf("pid %d, command %s, is killed: %s\n",
1201 p->p_pid, p->p_comm,
1202 "exceeded RLIMIT_CPU");
1203 } else {
1204 sig = SIGXCPU;
1205 if (rlim->rlim_cur < rlim->rlim_max)
1206 rlim->rlim_cur += 5;
1207 }
1208 }
1209 mutex_exit(p->p_lock);
1210 if (__predict_false(runtm < 0)) {
1211 if (!backwards) {
1212 backwards = true;
1213 printf("WARNING: negative runtime; "
1214 "monotonic clock has gone backwards\n");
1215 }
1216 } else if (__predict_false(sig)) {
1217 KASSERT((p->p_flag & PK_SYSTEM) == 0);
1218 psignal(p, sig);
1219 }
1220 }
1221 mutex_exit(proc_lock);
1222
1223 /* Load average calculation. */
1224 if (__predict_false(lavg_count == 0)) {
1225 int i;
1226 CTASSERT(__arraycount(cexp) == __arraycount(avg->ldavg));
1227 for (i = 0; i < __arraycount(cexp); i++) {
1228 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
1229 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
1230 }
1231 }
1232
1233 /* Lightning bolt. */
1234 cv_broadcast(&lbolt);
1235 }
1236