kern_synch.c revision 1.177.2.15 1 /* $NetBSD: kern_synch.c,v 1.177.2.15 2007/03/12 05:58:38 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
10 * Daniel Sieger.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /*-
42 * Copyright (c) 1982, 1986, 1990, 1991, 1993
43 * The Regents of the University of California. All rights reserved.
44 * (c) UNIX System Laboratories, Inc.
45 * All or some portions of this file are derived from material licensed
46 * to the University of California by American Telephone and Telegraph
47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 * the permission of UNIX System Laboratories, Inc.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.177.2.15 2007/03/12 05:58:38 rmind Exp $");
79
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #define __MUTEX_PRIVATE
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/proc.h>
90 #include <sys/kernel.h>
91 #if defined(PERFCTRS)
92 #include <sys/pmc.h>
93 #endif
94 #include <sys/cpu.h>
95 #include <sys/resourcevar.h>
96 #include <sys/sched.h>
97 #include <sys/syscall_stats.h>
98 #include <sys/sleepq.h>
99 #include <sys/lockdebug.h>
100
101 #include <uvm/uvm_extern.h>
102
103 int lbolt; /* once a second sleep address */
104
105 /*
106 * The global scheduler state.
107 */
108 kmutex_t sched_mutex; /* global sched state mutex */
109
110 static void sched_unsleep(struct lwp *);
111 static void sched_changepri(struct lwp *, pri_t);
112 static void sched_lendpri(struct lwp *, pri_t);
113
114 syncobj_t sleep_syncobj = {
115 SOBJ_SLEEPQ_SORTED,
116 sleepq_unsleep,
117 sleepq_changepri,
118 sleepq_lendpri,
119 syncobj_noowner,
120 };
121
122 syncobj_t sched_syncobj = {
123 SOBJ_SLEEPQ_SORTED,
124 sched_unsleep,
125 sched_changepri,
126 sched_lendpri,
127 syncobj_noowner,
128 };
129
130 /*
131 * During autoconfiguration or after a panic, a sleep will simply lower the
132 * priority briefly to allow interrupts, then return. The priority to be
133 * used (safepri) is machine-dependent, thus this value is initialized and
134 * maintained in the machine-dependent layers. This priority will typically
135 * be 0, or the lowest priority that is safe for use on the interrupt stack;
136 * it can be made higher to block network software interrupts after panics.
137 */
138 int safepri;
139
140 /*
141 * OBSOLETE INTERFACE
142 *
143 * General sleep call. Suspends the current process until a wakeup is
144 * performed on the specified identifier. The process will then be made
145 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
146 * means no timeout). If pri includes PCATCH flag, signals are checked
147 * before and after sleeping, else signals are not checked. Returns 0 if
148 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
149 * signal needs to be delivered, ERESTART is returned if the current system
150 * call should be restarted if possible, and EINTR is returned if the system
151 * call should be interrupted by the signal (return EINTR).
152 *
153 * The interlock is held until we are on a sleep queue. The interlock will
154 * be locked before returning back to the caller unless the PNORELOCK flag
155 * is specified, in which case the interlock will always be unlocked upon
156 * return.
157 */
158 int
159 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
160 volatile struct simplelock *interlock)
161 {
162 struct lwp *l = curlwp;
163 sleepq_t *sq;
164 int error, catch;
165
166 if (sleepq_dontsleep(l)) {
167 (void)sleepq_abort(NULL, 0);
168 if ((priority & PNORELOCK) != 0)
169 simple_unlock(interlock);
170 return 0;
171 }
172
173 sq = sleeptab_lookup(&sleeptab, ident);
174 sleepq_enter(sq, l);
175
176 if (interlock != NULL) {
177 LOCK_ASSERT(simple_lock_held(interlock));
178 simple_unlock(interlock);
179 }
180
181 catch = priority & PCATCH;
182 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
183 &sleep_syncobj);
184 error = sleepq_unblock(timo, catch);
185
186 if (interlock != NULL && (priority & PNORELOCK) == 0)
187 simple_lock(interlock);
188
189 return error;
190 }
191
192 int
193 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
194 kmutex_t *mtx)
195 {
196 struct lwp *l = curlwp;
197 sleepq_t *sq;
198 int error, catch;
199
200 if (sleepq_dontsleep(l)) {
201 (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
202 return 0;
203 }
204
205 sq = sleeptab_lookup(&sleeptab, ident);
206 sleepq_enter(sq, l);
207 mutex_exit(mtx);
208
209 catch = priority & PCATCH;
210 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
211 &sleep_syncobj);
212 error = sleepq_unblock(timo, catch);
213
214 if ((priority & PNORELOCK) == 0)
215 mutex_enter(mtx);
216
217 return error;
218 }
219
220 /*
221 * General sleep call for situations where a wake-up is not expected.
222 */
223 int
224 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
225 {
226 struct lwp *l = curlwp;
227 sleepq_t *sq;
228 int error;
229
230 if (sleepq_dontsleep(l))
231 return sleepq_abort(NULL, 0);
232
233 if (mtx != NULL)
234 mutex_exit(mtx);
235 sq = sleeptab_lookup(&sleeptab, l);
236 sleepq_enter(sq, l);
237 sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
238 error = sleepq_unblock(timo, intr);
239 if (mtx != NULL)
240 mutex_enter(mtx);
241
242 return error;
243 }
244
245 /*
246 * OBSOLETE INTERFACE
247 *
248 * Make all processes sleeping on the specified identifier runnable.
249 */
250 void
251 wakeup(wchan_t ident)
252 {
253 sleepq_t *sq;
254
255 if (cold)
256 return;
257
258 sq = sleeptab_lookup(&sleeptab, ident);
259 sleepq_wake(sq, ident, (u_int)-1);
260 }
261
262 /*
263 * OBSOLETE INTERFACE
264 *
265 * Make the highest priority process first in line on the specified
266 * identifier runnable.
267 */
268 void
269 wakeup_one(wchan_t ident)
270 {
271 sleepq_t *sq;
272
273 if (cold)
274 return;
275
276 sq = sleeptab_lookup(&sleeptab, ident);
277 sleepq_wake(sq, ident, 1);
278 }
279
280
281 /*
282 * General yield call. Puts the current process back on its run queue and
283 * performs a voluntary context switch. Should only be called when the
284 * current process explicitly requests it (eg sched_yield(2) in compat code).
285 */
286 void
287 yield(void)
288 {
289 struct lwp *l = curlwp;
290
291 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
292 lwp_lock(l);
293 if (l->l_stat == LSONPROC) {
294 KASSERT(lwp_locked(l, &sched_mutex));
295 l->l_priority = l->l_usrpri;
296 }
297 l->l_nvcsw++;
298 mi_switch(l);
299 KERNEL_LOCK(l->l_biglocks, l);
300 }
301
302 /*
303 * General preemption call. Puts the current process back on its run queue
304 * and performs an involuntary context switch.
305 */
306 void
307 preempt(void)
308 {
309 struct lwp *l = curlwp;
310
311 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
312 lwp_lock(l);
313 if (l->l_stat == LSONPROC) {
314 KASSERT(lwp_locked(l, &sched_mutex));
315 l->l_priority = l->l_usrpri;
316 }
317 l->l_nivcsw++;
318 (void)mi_switch(l);
319 KERNEL_LOCK(l->l_biglocks, l);
320 }
321
322 /*
323 * sched_switch_unlock: update 'curlwp' and release old lwp.
324 */
325
326 void
327 sched_switch_unlock(struct lwp *old, struct lwp *new)
328 {
329
330 KASSERT(old == NULL || old == curlwp);
331 KASSERT(new != NULL);
332
333 if (old != NULL) {
334 LOCKDEBUG_BARRIER(old->l_mutex, 1);
335 } else {
336 LOCKDEBUG_BARRIER(NULL, 1);
337 }
338
339 curlwp = new;
340 if (old != NULL) {
341 lwp_unlock(old);
342 }
343 spl0();
344 }
345
346 /*
347 * Compute the amount of time during which the current lwp was running.
348 *
349 * - update l_rtime unless it's an idle lwp.
350 * - update spc_runtime for the next lwp.
351 */
352
353 static inline void
354 updatertime(struct lwp *l, struct schedstate_percpu *spc)
355 {
356 struct timeval tv;
357 long s, u;
358
359 if ((l->l_flag & LW_IDLE) != 0) {
360 microtime(&spc->spc_runtime);
361 return;
362 }
363
364 microtime(&tv);
365 u = l->l_rtime.tv_usec + (tv.tv_usec - spc->spc_runtime.tv_usec);
366 s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
367 if (u < 0) {
368 u += 1000000;
369 s--;
370 } else if (u >= 1000000) {
371 u -= 1000000;
372 s++;
373 }
374 l->l_rtime.tv_usec = u;
375 l->l_rtime.tv_sec = s;
376
377 spc->spc_runtime = tv;
378 }
379
380 /*
381 * The machine independent parts of context switch.
382 *
383 * Returns 1 if another process was actually run.
384 */
385 int
386 mi_switch(struct lwp *l)
387 {
388 struct schedstate_percpu *spc;
389 struct lwp *newl;
390 int retval, oldspl;
391
392 LOCK_ASSERT(lwp_locked(l, NULL));
393
394 #ifdef LOCKDEBUG
395 spinlock_switchcheck();
396 simple_lock_switchcheck();
397 #endif
398 #ifdef KSTACK_CHECK_MAGIC
399 kstack_check_magic(l);
400 #endif
401
402 /*
403 * It's safe to read the per CPU schedstate unlocked here, as all we
404 * are after is the run time and that's guarenteed to have been last
405 * updated by this CPU.
406 */
407 KDASSERT(l->l_cpu == curcpu());
408 spc = &l->l_cpu->ci_schedstate;
409
410 /* Count time spent in current system call */
411 SYSCALL_TIME_SLEEP(l);
412
413 /*
414 * XXXSMP If we are using h/w performance counters, save context.
415 */
416 #if PERFCTRS
417 if (PMC_ENABLED(l->l_proc)) {
418 pmc_save_context(l->l_proc);
419 }
420 #endif
421
422 /*
423 * Process is about to yield the CPU; clear the appropriate
424 * scheduling flags.
425 */
426 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
427
428 LOCKDEBUG_BARRIER(l->l_mutex, 1);
429
430 /*
431 * Switch to the new LWP if necessary.
432 * When we run again, we'll return back here.
433 */
434 oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
435
436 /*
437 * Acquire the sched_mutex if necessary.
438 */
439 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
440 if (l->l_mutex != &sched_mutex) {
441 mutex_enter(&sched_mutex);
442 }
443 #endif
444 /*
445 * Let sched_switch() select the LWP to run the CPU next.
446 * If no LWP is runnable, switch to the idle LWP.
447 * Please note that sched_switch() will enqueue the LWP.
448 */
449 newl = sched_switch(l);
450 if (newl == NULL) {
451 newl = l->l_cpu->ci_data.cpu_idlelwp;
452 KASSERT(newl != NULL);
453 } else {
454 sched_dequeue(newl);
455 }
456 KASSERT(lwp_locked(newl, &sched_mutex));
457 newl->l_stat = LSONPROC;
458 newl->l_cpu = l->l_cpu;
459
460 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
461 if (l->l_mutex != &sched_mutex) {
462 mutex_exit(&sched_mutex);
463 }
464 #endif
465
466 updatertime(l, spc);
467 if (l != newl) {
468 struct lwp *prevlwp;
469
470 uvmexp.swtch++;
471 pmap_deactivate(l);
472 prevlwp = cpu_switchto(l, newl);
473 sched_switch_unlock(prevlwp, l);
474 pmap_activate(l);
475 retval = 1;
476 } else {
477 sched_switch_unlock(l, l);
478 retval = 0;
479 }
480
481 KASSERT(l == curlwp);
482 KASSERT(l->l_stat == LSONPROC);
483
484 /*
485 * XXXSMP If we are using h/w performance counters, restore context.
486 */
487 #if PERFCTRS
488 if (PMC_ENABLED(l->l_proc)) {
489 pmc_restore_context(l->l_proc);
490 }
491 #endif
492
493 /*
494 * We're running again; record our new start time. We might
495 * be running on a new CPU now, so don't use the cached
496 * schedstate_percpu pointer.
497 */
498 SYSCALL_TIME_WAKEUP(l);
499 KDASSERT(l->l_cpu == curcpu());
500
501 (void)splsched();
502 splx(oldspl);
503 return retval;
504 }
505
506 /*
507 * Change process state to be runnable, placing it on the run queue if it is
508 * in memory, and awakening the swapper if it isn't in memory.
509 *
510 * Call with the process and LWP locked. Will return with the LWP unlocked.
511 */
512 void
513 setrunnable(struct lwp *l)
514 {
515 struct proc *p = l->l_proc;
516 sigset_t *ss;
517
518 KASSERT((l->l_flag & LW_IDLE) == 0);
519 KASSERT(mutex_owned(&p->p_smutex));
520 KASSERT(lwp_locked(l, NULL));
521
522 switch (l->l_stat) {
523 case LSSTOP:
524 /*
525 * If we're being traced (possibly because someone attached us
526 * while we were stopped), check for a signal from the debugger.
527 */
528 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
529 if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
530 ss = &l->l_sigpend.sp_set;
531 else
532 ss = &p->p_sigpend.sp_set;
533 sigaddset(ss, p->p_xstat);
534 signotify(l);
535 }
536 p->p_nrlwps++;
537 break;
538 case LSSUSPENDED:
539 l->l_flag &= ~LW_WSUSPEND;
540 p->p_nrlwps++;
541 break;
542 case LSSLEEP:
543 KASSERT(l->l_wchan != NULL);
544 break;
545 default:
546 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
547 }
548
549 /*
550 * If the LWP was sleeping interruptably, then it's OK to start it
551 * again. If not, mark it as still sleeping.
552 */
553 if (l->l_wchan != NULL) {
554 l->l_stat = LSSLEEP;
555 /* lwp_unsleep() will release the lock. */
556 lwp_unsleep(l);
557 return;
558 }
559
560 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
561
562 /*
563 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
564 * about to call mi_switch(), in which case it will yield.
565 *
566 * XXXSMP Will need to change for preemption.
567 */
568 #ifdef MULTIPROCESSOR
569 if (l->l_cpu->ci_curlwp == l) {
570 #else
571 if (l == curlwp) {
572 #endif
573 l->l_stat = LSONPROC;
574 l->l_slptime = 0;
575 lwp_unlock(l);
576 return;
577 }
578
579 /*
580 * Set the LWP runnable. If it's swapped out, we need to wake the swapper
581 * to bring it back in. Otherwise, enter it into a run queue.
582 */
583 sched_setrunnable(l);
584 l->l_stat = LSRUN;
585 l->l_slptime = 0;
586
587 if (l->l_flag & LW_INMEM) {
588 sched_enqueue(l);
589 resched_cpu(l);
590 lwp_unlock(l);
591 } else {
592 lwp_unlock(l);
593 uvm_kick_scheduler();
594 }
595 }
596
597 /*
598 * suspendsched:
599 *
600 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
601 */
602 void
603 suspendsched(void)
604 {
605 #ifdef MULTIPROCESSOR
606 CPU_INFO_ITERATOR cii;
607 struct cpu_info *ci;
608 #endif
609 struct lwp *l;
610 struct proc *p;
611
612 /*
613 * We do this by process in order not to violate the locking rules.
614 */
615 mutex_enter(&proclist_mutex);
616 PROCLIST_FOREACH(p, &allproc) {
617 mutex_enter(&p->p_smutex);
618
619 if ((p->p_flag & PK_SYSTEM) != 0) {
620 mutex_exit(&p->p_smutex);
621 continue;
622 }
623
624 p->p_stat = SSTOP;
625
626 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
627 if (l == curlwp)
628 continue;
629
630 lwp_lock(l);
631
632 /*
633 * Set L_WREBOOT so that the LWP will suspend itself
634 * when it tries to return to user mode. We want to
635 * try and get to get as many LWPs as possible to
636 * the user / kernel boundary, so that they will
637 * release any locks that they hold.
638 */
639 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
640
641 if (l->l_stat == LSSLEEP &&
642 (l->l_flag & LW_SINTR) != 0) {
643 /* setrunnable() will release the lock. */
644 setrunnable(l);
645 continue;
646 }
647
648 lwp_unlock(l);
649 }
650
651 mutex_exit(&p->p_smutex);
652 }
653 mutex_exit(&proclist_mutex);
654
655 /*
656 * Kick all CPUs to make them preempt any LWPs running in user mode.
657 * They'll trap into the kernel and suspend themselves in userret().
658 */
659 sched_lock(0);
660 #ifdef MULTIPROCESSOR
661 for (CPU_INFO_FOREACH(cii, ci))
662 cpu_need_resched(ci, 0);
663 #else
664 cpu_need_resched(curcpu(), 0);
665 #endif
666 sched_unlock(0);
667 }
668
669 /*
670 * sched_kpri:
671 *
672 * Scale a priority level to a kernel priority level, usually
673 * for an LWP that is about to sleep.
674 */
675 pri_t
676 sched_kpri(struct lwp *l)
677 {
678 /*
679 * Scale user priorities (127 -> 50) up to kernel priorities
680 * in the range (49 -> 8). Reserve the top 8 kernel priorities
681 * for high priority kthreads. Kernel priorities passed in
682 * are left "as is". XXX This is somewhat arbitrary.
683 */
684 static const uint8_t kpri_tab[] = {
685 0, 1, 2, 3, 4, 5, 6, 7,
686 8, 9, 10, 11, 12, 13, 14, 15,
687 16, 17, 18, 19, 20, 21, 22, 23,
688 24, 25, 26, 27, 28, 29, 30, 31,
689 32, 33, 34, 35, 36, 37, 38, 39,
690 40, 41, 42, 43, 44, 45, 46, 47,
691 48, 49, 8, 8, 9, 9, 10, 10,
692 11, 11, 12, 12, 13, 14, 14, 15,
693 15, 16, 16, 17, 17, 18, 18, 19,
694 20, 20, 21, 21, 22, 22, 23, 23,
695 24, 24, 25, 26, 26, 27, 27, 28,
696 28, 29, 29, 30, 30, 31, 32, 32,
697 33, 33, 34, 34, 35, 35, 36, 36,
698 37, 38, 38, 39, 39, 40, 40, 41,
699 41, 42, 42, 43, 44, 44, 45, 45,
700 46, 46, 47, 47, 48, 48, 49, 49,
701 };
702
703 return (pri_t)kpri_tab[l->l_usrpri];
704 }
705
706 /*
707 * sched_unsleep:
708 *
709 * The is called when the LWP has not been awoken normally but instead
710 * interrupted: for example, if the sleep timed out. Because of this,
711 * it's not a valid action for running or idle LWPs.
712 */
713 static void
714 sched_unsleep(struct lwp *l)
715 {
716
717 lwp_unlock(l);
718 panic("sched_unsleep");
719 }
720
721 inline void
722 resched_cpu(struct lwp *l)
723 {
724 struct cpu_info *ci;
725 const pri_t pri = lwp_eprio(l);
726
727 /*
728 * XXXSMP
729 * Since l->l_cpu persists across a context switch,
730 * this gives us *very weak* processor affinity, in
731 * that we notify the CPU on which the process last
732 * ran that it should try to switch.
733 *
734 * This does not guarantee that the process will run on
735 * that processor next, because another processor might
736 * grab it the next time it performs a context switch.
737 *
738 * This also does not handle the case where its last
739 * CPU is running a higher-priority process, but every
740 * other CPU is running a lower-priority process. There
741 * are ways to handle this situation, but they're not
742 * currently very pretty, and we also need to weigh the
743 * cost of moving a process from one CPU to another.
744 *
745 * XXXSMP
746 * There is also the issue of locking the other CPU's
747 * sched state, which we currently do not do.
748 */
749 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
750 if (pri < ci->ci_schedstate.spc_curpriority)
751 cpu_need_resched(ci, 0);
752 }
753
754 static void
755 sched_changepri(struct lwp *l, pri_t pri)
756 {
757
758 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
759
760 l->l_usrpri = pri;
761 if (l->l_priority < PUSER)
762 return;
763
764 if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
765 l->l_priority = pri;
766 return;
767 }
768
769 sched_dequeue(l);
770 l->l_priority = pri;
771 sched_enqueue(l);
772 resched_cpu(l);
773 }
774
775 static void
776 sched_lendpri(struct lwp *l, pri_t pri)
777 {
778
779 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
780
781 if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
782 l->l_inheritedprio = pri;
783 return;
784 }
785
786 sched_dequeue(l);
787 l->l_inheritedprio = pri;
788 sched_enqueue(l);
789 resched_cpu(l);
790 }
791
792 struct lwp *
793 syncobj_noowner(wchan_t wchan)
794 {
795
796 return NULL;
797 }
798