kern_synch.c revision 1.177.2.16 1 /* $NetBSD: kern_synch.c,v 1.177.2.16 2007/03/17 16:54:37 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
10 * Daniel Sieger.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /*-
42 * Copyright (c) 1982, 1986, 1990, 1991, 1993
43 * The Regents of the University of California. All rights reserved.
44 * (c) UNIX System Laboratories, Inc.
45 * All or some portions of this file are derived from material licensed
46 * to the University of California by American Telephone and Telegraph
47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 * the permission of UNIX System Laboratories, Inc.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.177.2.16 2007/03/17 16:54:37 rmind Exp $");
79
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #define __MUTEX_PRIVATE
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/proc.h>
90 #include <sys/kernel.h>
91 #if defined(PERFCTRS)
92 #include <sys/pmc.h>
93 #endif
94 #include <sys/cpu.h>
95 #include <sys/resourcevar.h>
96 #include <sys/sched.h>
97 #include <sys/syscall_stats.h>
98 #include <sys/sleepq.h>
99 #include <sys/lockdebug.h>
100
101 #include <uvm/uvm_extern.h>
102
103 int lbolt; /* once a second sleep address */
104
105 /*
106 * The global scheduler state.
107 */
108 kmutex_t sched_mutex; /* global sched state mutex */
109
110 static void sched_unsleep(struct lwp *);
111 static void sched_changepri(struct lwp *, pri_t);
112 static void sched_lendpri(struct lwp *, pri_t);
113
114 syncobj_t sleep_syncobj = {
115 SOBJ_SLEEPQ_SORTED,
116 sleepq_unsleep,
117 sleepq_changepri,
118 sleepq_lendpri,
119 syncobj_noowner,
120 };
121
122 syncobj_t sched_syncobj = {
123 SOBJ_SLEEPQ_SORTED,
124 sched_unsleep,
125 sched_changepri,
126 sched_lendpri,
127 syncobj_noowner,
128 };
129
130 /*
131 * During autoconfiguration or after a panic, a sleep will simply lower the
132 * priority briefly to allow interrupts, then return. The priority to be
133 * used (safepri) is machine-dependent, thus this value is initialized and
134 * maintained in the machine-dependent layers. This priority will typically
135 * be 0, or the lowest priority that is safe for use on the interrupt stack;
136 * it can be made higher to block network software interrupts after panics.
137 */
138 int safepri;
139
140 /*
141 * OBSOLETE INTERFACE
142 *
143 * General sleep call. Suspends the current process until a wakeup is
144 * performed on the specified identifier. The process will then be made
145 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
146 * means no timeout). If pri includes PCATCH flag, signals are checked
147 * before and after sleeping, else signals are not checked. Returns 0 if
148 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
149 * signal needs to be delivered, ERESTART is returned if the current system
150 * call should be restarted if possible, and EINTR is returned if the system
151 * call should be interrupted by the signal (return EINTR).
152 *
153 * The interlock is held until we are on a sleep queue. The interlock will
154 * be locked before returning back to the caller unless the PNORELOCK flag
155 * is specified, in which case the interlock will always be unlocked upon
156 * return.
157 */
158 int
159 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
160 volatile struct simplelock *interlock)
161 {
162 struct lwp *l = curlwp;
163 sleepq_t *sq;
164 int error, catch;
165
166 if (sleepq_dontsleep(l)) {
167 (void)sleepq_abort(NULL, 0);
168 if ((priority & PNORELOCK) != 0)
169 simple_unlock(interlock);
170 return 0;
171 }
172
173 sq = sleeptab_lookup(&sleeptab, ident);
174 sleepq_enter(sq, l);
175
176 if (interlock != NULL) {
177 LOCK_ASSERT(simple_lock_held(interlock));
178 simple_unlock(interlock);
179 }
180
181 catch = priority & PCATCH;
182 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
183 &sleep_syncobj);
184 error = sleepq_unblock(timo, catch);
185
186 if (interlock != NULL && (priority & PNORELOCK) == 0)
187 simple_lock(interlock);
188
189 return error;
190 }
191
192 int
193 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
194 kmutex_t *mtx)
195 {
196 struct lwp *l = curlwp;
197 sleepq_t *sq;
198 int error, catch;
199
200 if (sleepq_dontsleep(l)) {
201 (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
202 return 0;
203 }
204
205 sq = sleeptab_lookup(&sleeptab, ident);
206 sleepq_enter(sq, l);
207 mutex_exit(mtx);
208
209 catch = priority & PCATCH;
210 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
211 &sleep_syncobj);
212 error = sleepq_unblock(timo, catch);
213
214 if ((priority & PNORELOCK) == 0)
215 mutex_enter(mtx);
216
217 return error;
218 }
219
220 /*
221 * General sleep call for situations where a wake-up is not expected.
222 */
223 int
224 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
225 {
226 struct lwp *l = curlwp;
227 sleepq_t *sq;
228 int error;
229
230 if (sleepq_dontsleep(l))
231 return sleepq_abort(NULL, 0);
232
233 if (mtx != NULL)
234 mutex_exit(mtx);
235 sq = sleeptab_lookup(&sleeptab, l);
236 sleepq_enter(sq, l);
237 sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
238 error = sleepq_unblock(timo, intr);
239 if (mtx != NULL)
240 mutex_enter(mtx);
241
242 return error;
243 }
244
245 /*
246 * OBSOLETE INTERFACE
247 *
248 * Make all processes sleeping on the specified identifier runnable.
249 */
250 void
251 wakeup(wchan_t ident)
252 {
253 sleepq_t *sq;
254
255 if (cold)
256 return;
257
258 sq = sleeptab_lookup(&sleeptab, ident);
259 sleepq_wake(sq, ident, (u_int)-1);
260 }
261
262 /*
263 * OBSOLETE INTERFACE
264 *
265 * Make the highest priority process first in line on the specified
266 * identifier runnable.
267 */
268 void
269 wakeup_one(wchan_t ident)
270 {
271 sleepq_t *sq;
272
273 if (cold)
274 return;
275
276 sq = sleeptab_lookup(&sleeptab, ident);
277 sleepq_wake(sq, ident, 1);
278 }
279
280
281 /*
282 * General yield call. Puts the current process back on its run queue and
283 * performs a voluntary context switch. Should only be called when the
284 * current process explicitly requests it (eg sched_yield(2) in compat code).
285 */
286 void
287 yield(void)
288 {
289 struct lwp *l = curlwp;
290
291 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
292 lwp_lock(l);
293 if (l->l_stat == LSONPROC) {
294 KASSERT(lwp_locked(l, &sched_mutex));
295 l->l_priority = l->l_usrpri;
296 }
297 l->l_nvcsw++;
298 mi_switch(l);
299 KERNEL_LOCK(l->l_biglocks, l);
300 }
301
302 /*
303 * General preemption call. Puts the current process back on its run queue
304 * and performs an involuntary context switch.
305 */
306 void
307 preempt(void)
308 {
309 struct lwp *l = curlwp;
310
311 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
312 lwp_lock(l);
313 if (l->l_stat == LSONPROC) {
314 KASSERT(lwp_locked(l, &sched_mutex));
315 l->l_priority = l->l_usrpri;
316 }
317 l->l_nivcsw++;
318 (void)mi_switch(l);
319 KERNEL_LOCK(l->l_biglocks, l);
320 }
321
322 /*
323 * sched_switch_unlock: update 'curlwp' and release old lwp.
324 */
325
326 void
327 sched_switch_unlock(struct lwp *old, struct lwp *new)
328 {
329
330 KASSERT(old == NULL || old == curlwp);
331 KASSERT(new != NULL);
332
333 if (old != NULL) {
334 LOCKDEBUG_BARRIER(old->l_mutex, 1);
335 } else {
336 LOCKDEBUG_BARRIER(NULL, 1);
337 }
338
339 curlwp = new;
340 if (old != NULL) {
341 lwp_unlock(old);
342 }
343 spl0();
344 }
345
346 /*
347 * Compute the amount of time during which the current lwp was running.
348 *
349 * - update l_rtime unless it's an idle lwp.
350 * - update spc_runtime for the next lwp.
351 */
352
353 static inline void
354 updatertime(struct lwp *l, struct schedstate_percpu *spc)
355 {
356 struct timeval tv;
357 long s, u;
358
359 if ((l->l_flag & LW_IDLE) != 0) {
360 microtime(&spc->spc_runtime);
361 return;
362 }
363
364 microtime(&tv);
365 u = l->l_rtime.tv_usec + (tv.tv_usec - spc->spc_runtime.tv_usec);
366 s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
367 if (u < 0) {
368 u += 1000000;
369 s--;
370 } else if (u >= 1000000) {
371 u -= 1000000;
372 s++;
373 }
374 l->l_rtime.tv_usec = u;
375 l->l_rtime.tv_sec = s;
376
377 spc->spc_runtime = tv;
378 }
379
380 /*
381 * The machine independent parts of context switch.
382 *
383 * Returns 1 if another process was actually run.
384 */
385 int
386 mi_switch(struct lwp *l)
387 {
388 struct schedstate_percpu *spc;
389 struct lwp *newl;
390 int retval, oldspl;
391
392 LOCK_ASSERT(lwp_locked(l, NULL));
393
394 #ifdef LOCKDEBUG
395 spinlock_switchcheck();
396 simple_lock_switchcheck();
397 #endif
398 #ifdef KSTACK_CHECK_MAGIC
399 kstack_check_magic(l);
400 #endif
401
402 /*
403 * It's safe to read the per CPU schedstate unlocked here, as all we
404 * are after is the run time and that's guarenteed to have been last
405 * updated by this CPU.
406 */
407 KDASSERT(l->l_cpu == curcpu());
408 spc = &l->l_cpu->ci_schedstate;
409
410 /* Count time spent in current system call */
411 SYSCALL_TIME_SLEEP(l);
412
413 /*
414 * XXXSMP If we are using h/w performance counters, save context.
415 */
416 #if PERFCTRS
417 if (PMC_ENABLED(l->l_proc)) {
418 pmc_save_context(l->l_proc);
419 }
420 #endif
421
422 /*
423 * If on the CPU and we have gotten this far, then we must yield.
424 */
425 KASSERT(l->l_stat != LSRUN);
426 if (l->l_stat == LSONPROC) {
427 KASSERT(lwp_locked(l, &sched_mutex));
428 l->l_stat = LSRUN;
429 if ((l->l_flag & LW_IDLE) == 0) {
430 sched_enqueue(l, true);
431 }
432 }
433
434 /*
435 * Process is about to yield the CPU; clear the appropriate
436 * scheduling flags.
437 */
438 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
439
440 LOCKDEBUG_BARRIER(l->l_mutex, 1);
441
442 /*
443 * Switch to the new LWP if necessary.
444 * When we run again, we'll return back here.
445 */
446 oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
447
448 /*
449 * Acquire the sched_mutex if necessary.
450 */
451 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
452 if (l->l_mutex != &sched_mutex) {
453 mutex_enter(&sched_mutex);
454 }
455 #endif
456 /*
457 * Let sched_nextlwp() select the LWP to run the CPU next.
458 * If no LWP is runnable, switch to the idle LWP.
459 */
460 newl = sched_nextlwp(l);
461 if (newl) {
462 sched_dequeue(newl);
463 } else {
464 newl = l->l_cpu->ci_data.cpu_idlelwp;
465 KASSERT(newl != NULL);
466 }
467 KASSERT(lwp_locked(newl, &sched_mutex));
468 newl->l_stat = LSONPROC;
469 newl->l_cpu = l->l_cpu;
470
471 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
472 if (l->l_mutex != &sched_mutex) {
473 mutex_exit(&sched_mutex);
474 }
475 #endif
476
477 updatertime(l, spc);
478 if (l != newl) {
479 struct lwp *prevlwp;
480
481 uvmexp.swtch++;
482 pmap_deactivate(l);
483 prevlwp = cpu_switchto(l, newl);
484 sched_switch_unlock(prevlwp, l);
485 pmap_activate(l);
486 retval = 1;
487 } else {
488 sched_switch_unlock(l, l);
489 retval = 0;
490 }
491
492 KASSERT(l == curlwp);
493 KASSERT(l->l_stat == LSONPROC);
494
495 /*
496 * XXXSMP If we are using h/w performance counters, restore context.
497 */
498 #if PERFCTRS
499 if (PMC_ENABLED(l->l_proc)) {
500 pmc_restore_context(l->l_proc);
501 }
502 #endif
503
504 /*
505 * We're running again; record our new start time. We might
506 * be running on a new CPU now, so don't use the cached
507 * schedstate_percpu pointer.
508 */
509 SYSCALL_TIME_WAKEUP(l);
510 KDASSERT(l->l_cpu == curcpu());
511
512 (void)splsched();
513 splx(oldspl);
514 return retval;
515 }
516
517 /*
518 * Change process state to be runnable, placing it on the run queue if it is
519 * in memory, and awakening the swapper if it isn't in memory.
520 *
521 * Call with the process and LWP locked. Will return with the LWP unlocked.
522 */
523 void
524 setrunnable(struct lwp *l)
525 {
526 struct proc *p = l->l_proc;
527 sigset_t *ss;
528
529 KASSERT((l->l_flag & LW_IDLE) == 0);
530 KASSERT(mutex_owned(&p->p_smutex));
531 KASSERT(lwp_locked(l, NULL));
532
533 switch (l->l_stat) {
534 case LSSTOP:
535 /*
536 * If we're being traced (possibly because someone attached us
537 * while we were stopped), check for a signal from the debugger.
538 */
539 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
540 if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
541 ss = &l->l_sigpend.sp_set;
542 else
543 ss = &p->p_sigpend.sp_set;
544 sigaddset(ss, p->p_xstat);
545 signotify(l);
546 }
547 p->p_nrlwps++;
548 break;
549 case LSSUSPENDED:
550 l->l_flag &= ~LW_WSUSPEND;
551 p->p_nrlwps++;
552 break;
553 case LSSLEEP:
554 KASSERT(l->l_wchan != NULL);
555 break;
556 default:
557 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
558 }
559
560 /*
561 * If the LWP was sleeping interruptably, then it's OK to start it
562 * again. If not, mark it as still sleeping.
563 */
564 if (l->l_wchan != NULL) {
565 l->l_stat = LSSLEEP;
566 /* lwp_unsleep() will release the lock. */
567 lwp_unsleep(l);
568 return;
569 }
570
571 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
572
573 /*
574 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
575 * about to call mi_switch(), in which case it will yield.
576 *
577 * XXXSMP Will need to change for preemption.
578 */
579 #ifdef MULTIPROCESSOR
580 if (l->l_cpu->ci_curlwp == l) {
581 #else
582 if (l == curlwp) {
583 #endif
584 l->l_stat = LSONPROC;
585 l->l_slptime = 0;
586 lwp_unlock(l);
587 return;
588 }
589
590 /*
591 * Set the LWP runnable. If it's swapped out, we need to wake the swapper
592 * to bring it back in. Otherwise, enter it into a run queue.
593 */
594 sched_setrunnable(l);
595 l->l_stat = LSRUN;
596 l->l_slptime = 0;
597
598 if (l->l_flag & LW_INMEM) {
599 sched_enqueue(l, false);
600 resched_cpu(l);
601 lwp_unlock(l);
602 } else {
603 lwp_unlock(l);
604 uvm_kick_scheduler();
605 }
606 }
607
608 /*
609 * suspendsched:
610 *
611 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
612 */
613 void
614 suspendsched(void)
615 {
616 #ifdef MULTIPROCESSOR
617 CPU_INFO_ITERATOR cii;
618 struct cpu_info *ci;
619 #endif
620 struct lwp *l;
621 struct proc *p;
622
623 /*
624 * We do this by process in order not to violate the locking rules.
625 */
626 mutex_enter(&proclist_mutex);
627 PROCLIST_FOREACH(p, &allproc) {
628 mutex_enter(&p->p_smutex);
629
630 if ((p->p_flag & PK_SYSTEM) != 0) {
631 mutex_exit(&p->p_smutex);
632 continue;
633 }
634
635 p->p_stat = SSTOP;
636
637 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
638 if (l == curlwp)
639 continue;
640
641 lwp_lock(l);
642
643 /*
644 * Set L_WREBOOT so that the LWP will suspend itself
645 * when it tries to return to user mode. We want to
646 * try and get to get as many LWPs as possible to
647 * the user / kernel boundary, so that they will
648 * release any locks that they hold.
649 */
650 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
651
652 if (l->l_stat == LSSLEEP &&
653 (l->l_flag & LW_SINTR) != 0) {
654 /* setrunnable() will release the lock. */
655 setrunnable(l);
656 continue;
657 }
658
659 lwp_unlock(l);
660 }
661
662 mutex_exit(&p->p_smutex);
663 }
664 mutex_exit(&proclist_mutex);
665
666 /*
667 * Kick all CPUs to make them preempt any LWPs running in user mode.
668 * They'll trap into the kernel and suspend themselves in userret().
669 */
670 sched_lock(0);
671 #ifdef MULTIPROCESSOR
672 for (CPU_INFO_FOREACH(cii, ci))
673 cpu_need_resched(ci, 0);
674 #else
675 cpu_need_resched(curcpu(), 0);
676 #endif
677 sched_unlock(0);
678 }
679
680 /*
681 * sched_kpri:
682 *
683 * Scale a priority level to a kernel priority level, usually
684 * for an LWP that is about to sleep.
685 */
686 pri_t
687 sched_kpri(struct lwp *l)
688 {
689 /*
690 * Scale user priorities (127 -> 50) up to kernel priorities
691 * in the range (49 -> 8). Reserve the top 8 kernel priorities
692 * for high priority kthreads. Kernel priorities passed in
693 * are left "as is". XXX This is somewhat arbitrary.
694 */
695 static const uint8_t kpri_tab[] = {
696 0, 1, 2, 3, 4, 5, 6, 7,
697 8, 9, 10, 11, 12, 13, 14, 15,
698 16, 17, 18, 19, 20, 21, 22, 23,
699 24, 25, 26, 27, 28, 29, 30, 31,
700 32, 33, 34, 35, 36, 37, 38, 39,
701 40, 41, 42, 43, 44, 45, 46, 47,
702 48, 49, 8, 8, 9, 9, 10, 10,
703 11, 11, 12, 12, 13, 14, 14, 15,
704 15, 16, 16, 17, 17, 18, 18, 19,
705 20, 20, 21, 21, 22, 22, 23, 23,
706 24, 24, 25, 26, 26, 27, 27, 28,
707 28, 29, 29, 30, 30, 31, 32, 32,
708 33, 33, 34, 34, 35, 35, 36, 36,
709 37, 38, 38, 39, 39, 40, 40, 41,
710 41, 42, 42, 43, 44, 44, 45, 45,
711 46, 46, 47, 47, 48, 48, 49, 49,
712 };
713
714 return (pri_t)kpri_tab[l->l_usrpri];
715 }
716
717 /*
718 * sched_unsleep:
719 *
720 * The is called when the LWP has not been awoken normally but instead
721 * interrupted: for example, if the sleep timed out. Because of this,
722 * it's not a valid action for running or idle LWPs.
723 */
724 static void
725 sched_unsleep(struct lwp *l)
726 {
727
728 lwp_unlock(l);
729 panic("sched_unsleep");
730 }
731
732 inline void
733 resched_cpu(struct lwp *l)
734 {
735 struct cpu_info *ci;
736 const pri_t pri = lwp_eprio(l);
737
738 /*
739 * XXXSMP
740 * Since l->l_cpu persists across a context switch,
741 * this gives us *very weak* processor affinity, in
742 * that we notify the CPU on which the process last
743 * ran that it should try to switch.
744 *
745 * This does not guarantee that the process will run on
746 * that processor next, because another processor might
747 * grab it the next time it performs a context switch.
748 *
749 * This also does not handle the case where its last
750 * CPU is running a higher-priority process, but every
751 * other CPU is running a lower-priority process. There
752 * are ways to handle this situation, but they're not
753 * currently very pretty, and we also need to weigh the
754 * cost of moving a process from one CPU to another.
755 *
756 * XXXSMP
757 * There is also the issue of locking the other CPU's
758 * sched state, which we currently do not do.
759 */
760 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
761 if (pri < ci->ci_schedstate.spc_curpriority)
762 cpu_need_resched(ci, 0);
763 }
764
765 static void
766 sched_changepri(struct lwp *l, pri_t pri)
767 {
768
769 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
770
771 l->l_usrpri = pri;
772 if (l->l_priority < PUSER)
773 return;
774
775 if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
776 l->l_priority = pri;
777 return;
778 }
779
780 sched_dequeue(l);
781 l->l_priority = pri;
782 sched_enqueue(l, false);
783 resched_cpu(l);
784 }
785
786 static void
787 sched_lendpri(struct lwp *l, pri_t pri)
788 {
789
790 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
791
792 if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
793 l->l_inheritedprio = pri;
794 return;
795 }
796
797 sched_dequeue(l);
798 l->l_inheritedprio = pri;
799 sched_enqueue(l, false);
800 resched_cpu(l);
801 }
802
803 struct lwp *
804 syncobj_noowner(wchan_t wchan)
805 {
806
807 return NULL;
808 }
809