kern_synch.c revision 1.177.2.17 1 /* $NetBSD: kern_synch.c,v 1.177.2.17 2007/03/21 22:04:18 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
10 * Daniel Sieger.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /*-
42 * Copyright (c) 1982, 1986, 1990, 1991, 1993
43 * The Regents of the University of California. All rights reserved.
44 * (c) UNIX System Laboratories, Inc.
45 * All or some portions of this file are derived from material licensed
46 * to the University of California by American Telephone and Telegraph
47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 * the permission of UNIX System Laboratories, Inc.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.177.2.17 2007/03/21 22:04:18 ad Exp $");
79
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #define __MUTEX_PRIVATE
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/proc.h>
90 #include <sys/kernel.h>
91 #if defined(PERFCTRS)
92 #include <sys/pmc.h>
93 #endif
94 #include <sys/cpu.h>
95 #include <sys/resourcevar.h>
96 #include <sys/sched.h>
97 #include <sys/syscall_stats.h>
98 #include <sys/sleepq.h>
99 #include <sys/lockdebug.h>
100
101 #include <uvm/uvm_extern.h>
102
103 int lbolt; /* once a second sleep address */
104
105 /*
106 * The global scheduler state.
107 */
108 kmutex_t sched_mutex; /* global sched state mutex */
109
110 static void sched_unsleep(struct lwp *);
111 static void sched_changepri(struct lwp *, pri_t);
112 static void sched_lendpri(struct lwp *, pri_t);
113
114 syncobj_t sleep_syncobj = {
115 SOBJ_SLEEPQ_SORTED,
116 sleepq_unsleep,
117 sleepq_changepri,
118 sleepq_lendpri,
119 syncobj_noowner,
120 };
121
122 syncobj_t sched_syncobj = {
123 SOBJ_SLEEPQ_SORTED,
124 sched_unsleep,
125 sched_changepri,
126 sched_lendpri,
127 syncobj_noowner,
128 };
129
130 /*
131 * During autoconfiguration or after a panic, a sleep will simply lower the
132 * priority briefly to allow interrupts, then return. The priority to be
133 * used (safepri) is machine-dependent, thus this value is initialized and
134 * maintained in the machine-dependent layers. This priority will typically
135 * be 0, or the lowest priority that is safe for use on the interrupt stack;
136 * it can be made higher to block network software interrupts after panics.
137 */
138 int safepri;
139
140 /*
141 * OBSOLETE INTERFACE
142 *
143 * General sleep call. Suspends the current process until a wakeup is
144 * performed on the specified identifier. The process will then be made
145 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
146 * means no timeout). If pri includes PCATCH flag, signals are checked
147 * before and after sleeping, else signals are not checked. Returns 0 if
148 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
149 * signal needs to be delivered, ERESTART is returned if the current system
150 * call should be restarted if possible, and EINTR is returned if the system
151 * call should be interrupted by the signal (return EINTR).
152 *
153 * The interlock is held until we are on a sleep queue. The interlock will
154 * be locked before returning back to the caller unless the PNORELOCK flag
155 * is specified, in which case the interlock will always be unlocked upon
156 * return.
157 */
158 int
159 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
160 volatile struct simplelock *interlock)
161 {
162 struct lwp *l = curlwp;
163 sleepq_t *sq;
164 int error, catch;
165
166 if (sleepq_dontsleep(l)) {
167 (void)sleepq_abort(NULL, 0);
168 if ((priority & PNORELOCK) != 0)
169 simple_unlock(interlock);
170 return 0;
171 }
172
173 sq = sleeptab_lookup(&sleeptab, ident);
174 sleepq_enter(sq, l);
175
176 if (interlock != NULL) {
177 LOCK_ASSERT(simple_lock_held(interlock));
178 simple_unlock(interlock);
179 }
180
181 catch = priority & PCATCH;
182 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
183 &sleep_syncobj);
184 error = sleepq_unblock(timo, catch);
185
186 if (interlock != NULL && (priority & PNORELOCK) == 0)
187 simple_lock(interlock);
188
189 return error;
190 }
191
192 int
193 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
194 kmutex_t *mtx)
195 {
196 struct lwp *l = curlwp;
197 sleepq_t *sq;
198 int error, catch;
199
200 if (sleepq_dontsleep(l)) {
201 (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
202 return 0;
203 }
204
205 sq = sleeptab_lookup(&sleeptab, ident);
206 sleepq_enter(sq, l);
207 mutex_exit(mtx);
208
209 catch = priority & PCATCH;
210 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
211 &sleep_syncobj);
212 error = sleepq_unblock(timo, catch);
213
214 if ((priority & PNORELOCK) == 0)
215 mutex_enter(mtx);
216
217 return error;
218 }
219
220 /*
221 * General sleep call for situations where a wake-up is not expected.
222 */
223 int
224 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
225 {
226 struct lwp *l = curlwp;
227 sleepq_t *sq;
228 int error;
229
230 if (sleepq_dontsleep(l))
231 return sleepq_abort(NULL, 0);
232
233 if (mtx != NULL)
234 mutex_exit(mtx);
235 sq = sleeptab_lookup(&sleeptab, l);
236 sleepq_enter(sq, l);
237 sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
238 error = sleepq_unblock(timo, intr);
239 if (mtx != NULL)
240 mutex_enter(mtx);
241
242 return error;
243 }
244
245 /*
246 * OBSOLETE INTERFACE
247 *
248 * Make all processes sleeping on the specified identifier runnable.
249 */
250 void
251 wakeup(wchan_t ident)
252 {
253 sleepq_t *sq;
254
255 if (cold)
256 return;
257
258 sq = sleeptab_lookup(&sleeptab, ident);
259 sleepq_wake(sq, ident, (u_int)-1);
260 }
261
262 /*
263 * OBSOLETE INTERFACE
264 *
265 * Make the highest priority process first in line on the specified
266 * identifier runnable.
267 */
268 void
269 wakeup_one(wchan_t ident)
270 {
271 sleepq_t *sq;
272
273 if (cold)
274 return;
275
276 sq = sleeptab_lookup(&sleeptab, ident);
277 sleepq_wake(sq, ident, 1);
278 }
279
280
281 /*
282 * General yield call. Puts the current process back on its run queue and
283 * performs a voluntary context switch. Should only be called when the
284 * current process explicitly requests it (eg sched_yield(2) in compat code).
285 */
286 void
287 yield(void)
288 {
289 struct lwp *l = curlwp;
290
291 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
292 lwp_lock(l);
293 if (l->l_stat == LSONPROC) {
294 KASSERT(lwp_locked(l, &sched_mutex));
295 l->l_priority = l->l_usrpri;
296 }
297 l->l_nvcsw++;
298 mi_switch(l);
299 KERNEL_LOCK(l->l_biglocks, l);
300 }
301
302 /*
303 * General preemption call. Puts the current process back on its run queue
304 * and performs an involuntary context switch.
305 */
306 void
307 preempt(void)
308 {
309 struct lwp *l = curlwp;
310
311 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
312 lwp_lock(l);
313 if (l->l_stat == LSONPROC) {
314 KASSERT(lwp_locked(l, &sched_mutex));
315 l->l_priority = l->l_usrpri;
316 }
317 l->l_nivcsw++;
318 (void)mi_switch(l);
319 KERNEL_LOCK(l->l_biglocks, l);
320 }
321
322 /*
323 * sched_switch_unlock: update 'curlwp' and release old lwp.
324 */
325
326 void
327 sched_switch_unlock(struct lwp *old, struct lwp *new)
328 {
329
330 KASSERT(old == NULL || old == curlwp);
331 KASSERT(new != NULL);
332
333 if (old != NULL) {
334 LOCKDEBUG_BARRIER(old->l_mutex, 1);
335 lwp_unlock(old);
336 } else {
337 LOCKDEBUG_BARRIER(NULL, 1);
338 }
339 curlwp = new;
340 spl0();
341 }
342
343 /*
344 * Compute the amount of time during which the current lwp was running.
345 *
346 * - update l_rtime unless it's an idle lwp.
347 * - update spc_runtime for the next lwp.
348 */
349
350 static inline void
351 updatertime(struct lwp *l, struct schedstate_percpu *spc)
352 {
353 struct timeval tv;
354 long s, u;
355
356 if ((l->l_flag & LW_IDLE) != 0) {
357 microtime(&spc->spc_runtime);
358 return;
359 }
360
361 microtime(&tv);
362 u = l->l_rtime.tv_usec + (tv.tv_usec - spc->spc_runtime.tv_usec);
363 s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
364 if (u < 0) {
365 u += 1000000;
366 s--;
367 } else if (u >= 1000000) {
368 u -= 1000000;
369 s++;
370 }
371 l->l_rtime.tv_usec = u;
372 l->l_rtime.tv_sec = s;
373
374 spc->spc_runtime = tv;
375 }
376
377 /*
378 * The machine independent parts of context switch.
379 *
380 * Returns 1 if another process was actually run.
381 */
382 int
383 mi_switch(struct lwp *l)
384 {
385 struct schedstate_percpu *spc;
386 struct lwp *newl;
387 int retval, oldspl;
388
389 LOCK_ASSERT(lwp_locked(l, NULL));
390
391 #ifdef LOCKDEBUG
392 spinlock_switchcheck();
393 simple_lock_switchcheck();
394 #endif
395 #ifdef KSTACK_CHECK_MAGIC
396 kstack_check_magic(l);
397 #endif
398
399 /*
400 * It's safe to read the per CPU schedstate unlocked here, as all we
401 * are after is the run time and that's guarenteed to have been last
402 * updated by this CPU.
403 */
404 KDASSERT(l->l_cpu == curcpu());
405 spc = &l->l_cpu->ci_schedstate;
406
407 /* Count time spent in current system call */
408 SYSCALL_TIME_SLEEP(l);
409
410 /*
411 * XXXSMP If we are using h/w performance counters, save context.
412 */
413 #if PERFCTRS
414 if (PMC_ENABLED(l->l_proc)) {
415 pmc_save_context(l->l_proc);
416 }
417 #endif
418
419 /*
420 * If on the CPU and we have gotten this far, then we must yield.
421 */
422 KASSERT(l->l_stat != LSRUN);
423 if (l->l_stat == LSONPROC) {
424 KASSERT(lwp_locked(l, &sched_mutex));
425 l->l_stat = LSRUN;
426 if ((l->l_flag & LW_IDLE) == 0) {
427 sched_enqueue(l, true);
428 }
429 }
430
431 /*
432 * Process is about to yield the CPU; clear the appropriate
433 * scheduling flags.
434 */
435 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
436
437 LOCKDEBUG_BARRIER(l->l_mutex, 1);
438
439 /*
440 * Switch to the new LWP if necessary.
441 * When we run again, we'll return back here.
442 */
443 oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
444
445 /*
446 * Acquire the sched_mutex if necessary.
447 */
448 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
449 if (l->l_mutex != &sched_mutex) {
450 mutex_enter(&sched_mutex);
451 }
452 #endif
453 /*
454 * Let sched_nextlwp() select the LWP to run the CPU next.
455 * If no LWP is runnable, switch to the idle LWP.
456 */
457 newl = sched_nextlwp(l);
458 if (newl) {
459 sched_dequeue(newl);
460 } else {
461 newl = l->l_cpu->ci_data.cpu_idlelwp;
462 KASSERT(newl != NULL);
463 }
464 KASSERT(lwp_locked(newl, &sched_mutex));
465 newl->l_stat = LSONPROC;
466 newl->l_cpu = l->l_cpu;
467 newl->l_flag |= LW_RUNNING;
468
469 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
470 if (l->l_mutex != &sched_mutex) {
471 mutex_exit(&sched_mutex);
472 }
473 #endif
474
475 updatertime(l, spc);
476 if (l != newl) {
477 struct lwp *prevlwp;
478
479 uvmexp.swtch++;
480 pmap_deactivate(l);
481 l->l_flag &= ~LW_RUNNING;
482 prevlwp = cpu_switchto(l, newl);
483 sched_switch_unlock(prevlwp, l);
484 pmap_activate(l);
485 retval = 1;
486 } else {
487 sched_switch_unlock(l, l);
488 retval = 0;
489 }
490
491 KASSERT(l == curlwp);
492 KASSERT(l->l_stat == LSONPROC);
493
494 /*
495 * XXXSMP If we are using h/w performance counters, restore context.
496 */
497 #if PERFCTRS
498 if (PMC_ENABLED(l->l_proc)) {
499 pmc_restore_context(l->l_proc);
500 }
501 #endif
502
503 /*
504 * We're running again; record our new start time. We might
505 * be running on a new CPU now, so don't use the cached
506 * schedstate_percpu pointer.
507 */
508 SYSCALL_TIME_WAKEUP(l);
509 KDASSERT(l->l_cpu == curcpu());
510
511 (void)splsched();
512 splx(oldspl);
513 return retval;
514 }
515
516 /*
517 * Change process state to be runnable, placing it on the run queue if it is
518 * in memory, and awakening the swapper if it isn't in memory.
519 *
520 * Call with the process and LWP locked. Will return with the LWP unlocked.
521 */
522 void
523 setrunnable(struct lwp *l)
524 {
525 struct proc *p = l->l_proc;
526 sigset_t *ss;
527
528 KASSERT((l->l_flag & LW_IDLE) == 0);
529 KASSERT(mutex_owned(&p->p_smutex));
530 KASSERT(lwp_locked(l, NULL));
531
532 switch (l->l_stat) {
533 case LSSTOP:
534 /*
535 * If we're being traced (possibly because someone attached us
536 * while we were stopped), check for a signal from the debugger.
537 */
538 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
539 if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
540 ss = &l->l_sigpend.sp_set;
541 else
542 ss = &p->p_sigpend.sp_set;
543 sigaddset(ss, p->p_xstat);
544 signotify(l);
545 }
546 p->p_nrlwps++;
547 break;
548 case LSSUSPENDED:
549 l->l_flag &= ~LW_WSUSPEND;
550 p->p_nrlwps++;
551 break;
552 case LSSLEEP:
553 KASSERT(l->l_wchan != NULL);
554 break;
555 default:
556 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
557 }
558
559 /*
560 * If the LWP was sleeping interruptably, then it's OK to start it
561 * again. If not, mark it as still sleeping.
562 */
563 if (l->l_wchan != NULL) {
564 l->l_stat = LSSLEEP;
565 /* lwp_unsleep() will release the lock. */
566 lwp_unsleep(l);
567 return;
568 }
569
570 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
571
572 /*
573 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
574 * about to call mi_switch(), in which case it will yield.
575 */
576 if ((l->l_flag & LW_RUNNING) != 0) {
577 l->l_stat = LSONPROC;
578 l->l_slptime = 0;
579 lwp_unlock(l);
580 return;
581 }
582
583 /*
584 * Set the LWP runnable. If it's swapped out, we need to wake the swapper
585 * to bring it back in. Otherwise, enter it into a run queue.
586 */
587 sched_setrunnable(l);
588 l->l_stat = LSRUN;
589 l->l_slptime = 0;
590
591 if (l->l_flag & LW_INMEM) {
592 sched_enqueue(l, false);
593 resched_cpu(l);
594 lwp_unlock(l);
595 } else {
596 lwp_unlock(l);
597 uvm_kick_scheduler();
598 }
599 }
600
601 /*
602 * suspendsched:
603 *
604 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
605 */
606 void
607 suspendsched(void)
608 {
609 #ifdef MULTIPROCESSOR
610 CPU_INFO_ITERATOR cii;
611 struct cpu_info *ci;
612 #endif
613 struct lwp *l;
614 struct proc *p;
615
616 /*
617 * We do this by process in order not to violate the locking rules.
618 */
619 mutex_enter(&proclist_mutex);
620 PROCLIST_FOREACH(p, &allproc) {
621 mutex_enter(&p->p_smutex);
622
623 if ((p->p_flag & PK_SYSTEM) != 0) {
624 mutex_exit(&p->p_smutex);
625 continue;
626 }
627
628 p->p_stat = SSTOP;
629
630 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
631 if (l == curlwp)
632 continue;
633
634 lwp_lock(l);
635
636 /*
637 * Set L_WREBOOT so that the LWP will suspend itself
638 * when it tries to return to user mode. We want to
639 * try and get to get as many LWPs as possible to
640 * the user / kernel boundary, so that they will
641 * release any locks that they hold.
642 */
643 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
644
645 if (l->l_stat == LSSLEEP &&
646 (l->l_flag & LW_SINTR) != 0) {
647 /* setrunnable() will release the lock. */
648 setrunnable(l);
649 continue;
650 }
651
652 lwp_unlock(l);
653 }
654
655 mutex_exit(&p->p_smutex);
656 }
657 mutex_exit(&proclist_mutex);
658
659 /*
660 * Kick all CPUs to make them preempt any LWPs running in user mode.
661 * They'll trap into the kernel and suspend themselves in userret().
662 */
663 sched_lock(0);
664 #ifdef MULTIPROCESSOR
665 for (CPU_INFO_FOREACH(cii, ci))
666 cpu_need_resched(ci, 0);
667 #else
668 cpu_need_resched(curcpu(), 0);
669 #endif
670 sched_unlock(0);
671 }
672
673 /*
674 * sched_kpri:
675 *
676 * Scale a priority level to a kernel priority level, usually
677 * for an LWP that is about to sleep.
678 */
679 pri_t
680 sched_kpri(struct lwp *l)
681 {
682 /*
683 * Scale user priorities (127 -> 50) up to kernel priorities
684 * in the range (49 -> 8). Reserve the top 8 kernel priorities
685 * for high priority kthreads. Kernel priorities passed in
686 * are left "as is". XXX This is somewhat arbitrary.
687 */
688 static const uint8_t kpri_tab[] = {
689 0, 1, 2, 3, 4, 5, 6, 7,
690 8, 9, 10, 11, 12, 13, 14, 15,
691 16, 17, 18, 19, 20, 21, 22, 23,
692 24, 25, 26, 27, 28, 29, 30, 31,
693 32, 33, 34, 35, 36, 37, 38, 39,
694 40, 41, 42, 43, 44, 45, 46, 47,
695 48, 49, 8, 8, 9, 9, 10, 10,
696 11, 11, 12, 12, 13, 14, 14, 15,
697 15, 16, 16, 17, 17, 18, 18, 19,
698 20, 20, 21, 21, 22, 22, 23, 23,
699 24, 24, 25, 26, 26, 27, 27, 28,
700 28, 29, 29, 30, 30, 31, 32, 32,
701 33, 33, 34, 34, 35, 35, 36, 36,
702 37, 38, 38, 39, 39, 40, 40, 41,
703 41, 42, 42, 43, 44, 44, 45, 45,
704 46, 46, 47, 47, 48, 48, 49, 49,
705 };
706
707 return (pri_t)kpri_tab[l->l_usrpri];
708 }
709
710 /*
711 * sched_unsleep:
712 *
713 * The is called when the LWP has not been awoken normally but instead
714 * interrupted: for example, if the sleep timed out. Because of this,
715 * it's not a valid action for running or idle LWPs.
716 */
717 static void
718 sched_unsleep(struct lwp *l)
719 {
720
721 lwp_unlock(l);
722 panic("sched_unsleep");
723 }
724
725 inline void
726 resched_cpu(struct lwp *l)
727 {
728 struct cpu_info *ci;
729 const pri_t pri = lwp_eprio(l);
730
731 /*
732 * XXXSMP
733 * Since l->l_cpu persists across a context switch,
734 * this gives us *very weak* processor affinity, in
735 * that we notify the CPU on which the process last
736 * ran that it should try to switch.
737 *
738 * This does not guarantee that the process will run on
739 * that processor next, because another processor might
740 * grab it the next time it performs a context switch.
741 *
742 * This also does not handle the case where its last
743 * CPU is running a higher-priority process, but every
744 * other CPU is running a lower-priority process. There
745 * are ways to handle this situation, but they're not
746 * currently very pretty, and we also need to weigh the
747 * cost of moving a process from one CPU to another.
748 */
749 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
750 if (pri < ci->ci_schedstate.spc_curpriority)
751 cpu_need_resched(ci, 0);
752 }
753
754 static void
755 sched_changepri(struct lwp *l, pri_t pri)
756 {
757
758 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
759
760 l->l_usrpri = pri;
761 if (l->l_priority < PUSER)
762 return;
763
764 if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
765 l->l_priority = pri;
766 return;
767 }
768
769 sched_dequeue(l);
770 l->l_priority = pri;
771 sched_enqueue(l, false);
772 resched_cpu(l);
773 }
774
775 static void
776 sched_lendpri(struct lwp *l, pri_t pri)
777 {
778
779 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
780
781 if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
782 l->l_inheritedprio = pri;
783 return;
784 }
785
786 sched_dequeue(l);
787 l->l_inheritedprio = pri;
788 sched_enqueue(l, false);
789 resched_cpu(l);
790 }
791
792 struct lwp *
793 syncobj_noowner(wchan_t wchan)
794 {
795
796 return NULL;
797 }
798