kern_synch.c revision 1.177.2.11 1 /* $NetBSD: kern_synch.c,v 1.177.2.11 2007/02/27 17:23:24 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
10 * Daniel Sieger.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /*-
42 * Copyright (c) 1982, 1986, 1990, 1991, 1993
43 * The Regents of the University of California. All rights reserved.
44 * (c) UNIX System Laboratories, Inc.
45 * All or some portions of this file are derived from material licensed
46 * to the University of California by American Telephone and Telegraph
47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 * the permission of UNIX System Laboratories, Inc.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.177.2.11 2007/02/27 17:23:24 yamt Exp $");
79
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #define __MUTEX_PRIVATE
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/proc.h>
90 #include <sys/kernel.h>
91 #if defined(PERFCTRS)
92 #include <sys/pmc.h>
93 #endif
94 #include <sys/cpu.h>
95 #include <sys/resourcevar.h>
96 #include <sys/sched.h>
97 #include <sys/syscall_stats.h>
98 #include <sys/sleepq.h>
99 #include <sys/lockdebug.h>
100
101 #include <uvm/uvm_extern.h>
102
103 int lbolt; /* once a second sleep address */
104
105 /*
106 * The global scheduler state.
107 */
108 kmutex_t sched_mutex; /* global sched state mutex */
109
110 static void sched_unsleep(struct lwp *);
111 static void sched_changepri(struct lwp *, pri_t);
112 static void sched_lendpri(struct lwp *, pri_t);
113
114 syncobj_t sleep_syncobj = {
115 SOBJ_SLEEPQ_SORTED,
116 sleepq_unsleep,
117 sleepq_changepri,
118 sleepq_lendpri,
119 syncobj_noowner,
120 };
121
122 syncobj_t sched_syncobj = {
123 SOBJ_SLEEPQ_SORTED,
124 sched_unsleep,
125 sched_changepri,
126 sched_lendpri,
127 syncobj_noowner,
128 };
129
130 /*
131 * During autoconfiguration or after a panic, a sleep will simply lower the
132 * priority briefly to allow interrupts, then return. The priority to be
133 * used (safepri) is machine-dependent, thus this value is initialized and
134 * maintained in the machine-dependent layers. This priority will typically
135 * be 0, or the lowest priority that is safe for use on the interrupt stack;
136 * it can be made higher to block network software interrupts after panics.
137 */
138 int safepri;
139
140 /*
141 * OBSOLETE INTERFACE
142 *
143 * General sleep call. Suspends the current process until a wakeup is
144 * performed on the specified identifier. The process will then be made
145 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
146 * means no timeout). If pri includes PCATCH flag, signals are checked
147 * before and after sleeping, else signals are not checked. Returns 0 if
148 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
149 * signal needs to be delivered, ERESTART is returned if the current system
150 * call should be restarted if possible, and EINTR is returned if the system
151 * call should be interrupted by the signal (return EINTR).
152 *
153 * The interlock is held until we are on a sleep queue. The interlock will
154 * be locked before returning back to the caller unless the PNORELOCK flag
155 * is specified, in which case the interlock will always be unlocked upon
156 * return.
157 */
158 int
159 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
160 volatile struct simplelock *interlock)
161 {
162 struct lwp *l = curlwp;
163 sleepq_t *sq;
164 int error, catch;
165
166 if (sleepq_dontsleep(l)) {
167 (void)sleepq_abort(NULL, 0);
168 if ((priority & PNORELOCK) != 0)
169 simple_unlock(interlock);
170 return 0;
171 }
172
173 sq = sleeptab_lookup(&sleeptab, ident);
174 sleepq_enter(sq, l);
175
176 if (interlock != NULL) {
177 LOCK_ASSERT(simple_lock_held(interlock));
178 simple_unlock(interlock);
179 }
180
181 catch = priority & PCATCH;
182 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
183 &sleep_syncobj);
184 error = sleepq_unblock(timo, catch);
185
186 if (interlock != NULL && (priority & PNORELOCK) == 0)
187 simple_lock(interlock);
188
189 return error;
190 }
191
192 /*
193 * General sleep call for situations where a wake-up is not expected.
194 */
195 int
196 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
197 {
198 struct lwp *l = curlwp;
199 sleepq_t *sq;
200 int error;
201
202 if (sleepq_dontsleep(l))
203 return sleepq_abort(NULL, 0);
204
205 if (mtx != NULL)
206 mutex_exit(mtx);
207 sq = sleeptab_lookup(&sleeptab, l);
208 sleepq_enter(sq, l);
209 sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
210 error = sleepq_unblock(timo, intr);
211 if (mtx != NULL)
212 mutex_enter(mtx);
213
214 return error;
215 }
216
217 /*
218 * OBSOLETE INTERFACE
219 *
220 * Make all processes sleeping on the specified identifier runnable.
221 */
222 void
223 wakeup(wchan_t ident)
224 {
225 sleepq_t *sq;
226
227 if (cold)
228 return;
229
230 sq = sleeptab_lookup(&sleeptab, ident);
231 sleepq_wake(sq, ident, (u_int)-1);
232 }
233
234 /*
235 * OBSOLETE INTERFACE
236 *
237 * Make the highest priority process first in line on the specified
238 * identifier runnable.
239 */
240 void
241 wakeup_one(wchan_t ident)
242 {
243 sleepq_t *sq;
244
245 if (cold)
246 return;
247
248 sq = sleeptab_lookup(&sleeptab, ident);
249 sleepq_wake(sq, ident, 1);
250 }
251
252
253 /*
254 * General yield call. Puts the current process back on its run queue and
255 * performs a voluntary context switch. Should only be called when the
256 * current process explicitly requests it (eg sched_yield(2) in compat code).
257 */
258 void
259 yield(void)
260 {
261 struct lwp *l = curlwp;
262
263 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
264 lwp_lock(l);
265 if (l->l_stat == LSONPROC) {
266 KASSERT(lwp_locked(l, &sched_mutex));
267 l->l_priority = l->l_usrpri;
268 }
269 l->l_nvcsw++;
270 mi_switch(l, NULL);
271 KERNEL_LOCK(l->l_biglocks, l);
272 }
273
274 /*
275 * General preemption call. Puts the current process back on its run queue
276 * and performs an involuntary context switch.
277 */
278 void
279 preempt(void)
280 {
281 struct lwp *l = curlwp;
282
283 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
284 lwp_lock(l);
285 if (l->l_stat == LSONPROC) {
286 KASSERT(lwp_locked(l, &sched_mutex));
287 l->l_priority = l->l_usrpri;
288 }
289 l->l_nivcsw++;
290 (void)mi_switch(l, NULL);
291 KERNEL_LOCK(l->l_biglocks, l);
292 }
293
294 /*
295 * sched_switch_unlock: update 'curlwp' and release old lwp.
296 */
297
298 void
299 sched_switch_unlock(struct lwp *old, struct lwp *new)
300 {
301
302 KASSERT(old == NULL || old == curlwp);
303
304 if (old != NULL) {
305 LOCKDEBUG_BARRIER(old->l_mutex, 1);
306 } else {
307 LOCKDEBUG_BARRIER(NULL, 1);
308 }
309
310 curlwp = new;
311 if (old != NULL) {
312 lwp_unlock(old);
313 }
314 spl0();
315 }
316
317 /*
318 * Compute the amount of time during which the current lwp was running.
319 *
320 * - update l_rtime unless it's an idle lwp.
321 * - update spc_runtime for the next lwp.
322 */
323
324 static inline void
325 updatertime(struct lwp *l, struct schedstate_percpu *spc)
326 {
327 struct timeval tv;
328 long s, u;
329
330 if ((l->l_flag & LW_IDLE) != 0) {
331 microtime(&spc->spc_runtime);
332 return;
333 }
334
335 microtime(&tv);
336 u = l->l_rtime.tv_usec + (tv.tv_usec - spc->spc_runtime.tv_usec);
337 s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
338 if (u < 0) {
339 u += 1000000;
340 s--;
341 } else if (u >= 1000000) {
342 u -= 1000000;
343 s++;
344 }
345 l->l_rtime.tv_usec = u;
346 l->l_rtime.tv_sec = s;
347
348 spc->spc_runtime = tv;
349 }
350
351 /*
352 * The machine independent parts of context switch. Switch to "new"
353 * if non-NULL, otherwise let cpu_switch choose the next lwp.
354 *
355 * Returns 1 if another process was actually run.
356 */
357 int
358 mi_switch(struct lwp *l, struct lwp *newl)
359 {
360 struct schedstate_percpu *spc;
361 int retval, oldspl;
362
363 LOCK_ASSERT(lwp_locked(l, NULL));
364
365 #ifdef LOCKDEBUG
366 spinlock_switchcheck();
367 simple_lock_switchcheck();
368 #endif
369 #ifdef KSTACK_CHECK_MAGIC
370 kstack_check_magic(l);
371 #endif
372
373 /*
374 * It's safe to read the per CPU schedstate unlocked here, as all we
375 * are after is the run time and that's guarenteed to have been last
376 * updated by this CPU.
377 */
378 KDASSERT(l->l_cpu == curcpu());
379 spc = &l->l_cpu->ci_schedstate;
380
381 /* Count time spent in current system call */
382 SYSCALL_TIME_SLEEP(l);
383
384 /*
385 * XXXSMP If we are using h/w performance counters, save context.
386 */
387 #if PERFCTRS
388 if (PMC_ENABLED(l->l_proc)) {
389 pmc_save_context(l->l_proc);
390 }
391 #endif
392
393 /*
394 * If on the CPU and we have gotten this far, then we must yield.
395 */
396 KASSERT(l->l_stat != LSRUN);
397 if (l->l_stat == LSONPROC) {
398 KASSERT(lwp_locked(l, &sched_mutex));
399 l->l_stat = LSRUN;
400 if ((l->l_flag & LW_IDLE) == 0) {
401 sched_enqueue(l);
402 }
403 }
404 uvmexp.swtch++;
405
406 /*
407 * Process is about to yield the CPU; clear the appropriate
408 * scheduling flags.
409 */
410 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
411
412 LOCKDEBUG_BARRIER(l->l_mutex, 1);
413
414 /*
415 * Switch to the new LWP if necessary.
416 * When we run again, we'll return back here.
417 */
418 oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
419
420 /*
421 * Acquire the sched_mutex if necessary.
422 */
423 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
424 if (l->l_mutex != &sched_mutex) {
425 mutex_enter(&sched_mutex);
426 }
427 #endif
428
429 if (newl == NULL) {
430 newl = sched_nextlwp();
431 }
432 if (newl != NULL) {
433 KASSERT(lwp_locked(newl, &sched_mutex));
434 sched_dequeue(newl);
435 } else {
436 newl = l->l_cpu->ci_data.cpu_idlelwp;
437 KASSERT(newl != NULL);
438 }
439 KASSERT(lwp_locked(newl, &sched_mutex));
440 newl->l_stat = LSONPROC;
441 newl->l_cpu = l->l_cpu;
442
443 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
444 if (l->l_mutex != &sched_mutex) {
445 mutex_exit(&sched_mutex);
446 }
447 #endif
448
449 updatertime(l, spc);
450 if (l != newl) {
451 struct lwp *prevlwp;
452
453 uvmexp.swtch++;
454 pmap_deactivate(l);
455 prevlwp = cpu_switchto(l, newl);
456 sched_switch_unlock(prevlwp, l);
457 pmap_activate(l);
458 retval = 1;
459 } else {
460 sched_switch_unlock(l, l);
461 retval = 0;
462 }
463
464 KASSERT(l == curlwp);
465 KASSERT(l->l_stat == LSONPROC);
466
467 /*
468 * XXXSMP If we are using h/w performance counters, restore context.
469 */
470 #if PERFCTRS
471 if (PMC_ENABLED(l->l_proc)) {
472 pmc_restore_context(l->l_proc);
473 }
474 #endif
475
476 /*
477 * We're running again; record our new start time. We might
478 * be running on a new CPU now, so don't use the cached
479 * schedstate_percpu pointer.
480 */
481 SYSCALL_TIME_WAKEUP(l);
482 KDASSERT(l->l_cpu == curcpu());
483
484 (void)splsched();
485 splx(oldspl);
486 return retval;
487 }
488
489 /*
490 * Change process state to be runnable, placing it on the run queue if it is
491 * in memory, and awakening the swapper if it isn't in memory.
492 *
493 * Call with the process and LWP locked. Will return with the LWP unlocked.
494 */
495 void
496 setrunnable(struct lwp *l)
497 {
498 struct proc *p = l->l_proc;
499 sigset_t *ss;
500
501 KASSERT((l->l_flag & LW_IDLE) == 0);
502 KASSERT(mutex_owned(&p->p_smutex));
503 KASSERT(lwp_locked(l, NULL));
504
505 switch (l->l_stat) {
506 case LSSTOP:
507 /*
508 * If we're being traced (possibly because someone attached us
509 * while we were stopped), check for a signal from the debugger.
510 */
511 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
512 if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
513 ss = &l->l_sigpend.sp_set;
514 else
515 ss = &p->p_sigpend.sp_set;
516 sigaddset(ss, p->p_xstat);
517 signotify(l);
518 }
519 p->p_nrlwps++;
520 break;
521 case LSSUSPENDED:
522 l->l_flag &= ~LW_WSUSPEND;
523 p->p_nrlwps++;
524 break;
525 case LSSLEEP:
526 KASSERT(l->l_wchan != NULL);
527 break;
528 default:
529 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
530 }
531
532 /*
533 * If the LWP was sleeping interruptably, then it's OK to start it
534 * again. If not, mark it as still sleeping.
535 */
536 if (l->l_wchan != NULL) {
537 l->l_stat = LSSLEEP;
538 /* lwp_unsleep() will release the lock. */
539 lwp_unsleep(l);
540 return;
541 }
542
543 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
544
545 /*
546 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
547 * about to call mi_switch(), in which case it will yield.
548 *
549 * XXXSMP Will need to change for preemption.
550 */
551 #ifdef MULTIPROCESSOR
552 if (l->l_cpu->ci_curlwp == l) {
553 #else
554 if (l == curlwp) {
555 #endif
556 l->l_stat = LSONPROC;
557 l->l_slptime = 0;
558 lwp_unlock(l);
559 return;
560 }
561
562 /*
563 * Set the LWP runnable. If it's swapped out, we need to wake the swapper
564 * to bring it back in. Otherwise, enter it into a run queue.
565 */
566 sched_setrunnable(l);
567 l->l_stat = LSRUN;
568 l->l_slptime = 0;
569
570 if (l->l_flag & LW_INMEM) {
571 sched_enqueue(l);
572 resched_cpu(l);
573 lwp_unlock(l);
574 } else {
575 lwp_unlock(l);
576 uvm_kick_scheduler();
577 }
578 }
579
580 /*
581 * suspendsched:
582 *
583 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
584 */
585 void
586 suspendsched(void)
587 {
588 #ifdef MULTIPROCESSOR
589 CPU_INFO_ITERATOR cii;
590 struct cpu_info *ci;
591 #endif
592 struct lwp *l;
593 struct proc *p;
594
595 /*
596 * We do this by process in order not to violate the locking rules.
597 */
598 mutex_enter(&proclist_mutex);
599 PROCLIST_FOREACH(p, &allproc) {
600 mutex_enter(&p->p_smutex);
601
602 if ((p->p_flag & PK_SYSTEM) != 0) {
603 mutex_exit(&p->p_smutex);
604 continue;
605 }
606
607 p->p_stat = SSTOP;
608
609 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
610 if (l == curlwp)
611 continue;
612
613 lwp_lock(l);
614
615 /*
616 * Set L_WREBOOT so that the LWP will suspend itself
617 * when it tries to return to user mode. We want to
618 * try and get to get as many LWPs as possible to
619 * the user / kernel boundary, so that they will
620 * release any locks that they hold.
621 */
622 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
623
624 if (l->l_stat == LSSLEEP &&
625 (l->l_flag & LW_SINTR) != 0) {
626 /* setrunnable() will release the lock. */
627 setrunnable(l);
628 continue;
629 }
630
631 lwp_unlock(l);
632 }
633
634 mutex_exit(&p->p_smutex);
635 }
636 mutex_exit(&proclist_mutex);
637
638 /*
639 * Kick all CPUs to make them preempt any LWPs running in user mode.
640 * They'll trap into the kernel and suspend themselves in userret().
641 */
642 sched_lock(0);
643 #ifdef MULTIPROCESSOR
644 for (CPU_INFO_FOREACH(cii, ci))
645 cpu_need_resched(ci, 0);
646 #else
647 cpu_need_resched(curcpu(), 0);
648 #endif
649 sched_unlock(0);
650 }
651
652 /*
653 * sched_kpri:
654 *
655 * Scale a priority level to a kernel priority level, usually
656 * for an LWP that is about to sleep.
657 */
658 pri_t
659 sched_kpri(struct lwp *l)
660 {
661 /*
662 * Scale user priorities (127 -> 50) up to kernel priorities
663 * in the range (49 -> 8). Reserve the top 8 kernel priorities
664 * for high priority kthreads. Kernel priorities passed in
665 * are left "as is". XXX This is somewhat arbitrary.
666 */
667 static const uint8_t kpri_tab[] = {
668 0, 1, 2, 3, 4, 5, 6, 7,
669 8, 9, 10, 11, 12, 13, 14, 15,
670 16, 17, 18, 19, 20, 21, 22, 23,
671 24, 25, 26, 27, 28, 29, 30, 31,
672 32, 33, 34, 35, 36, 37, 38, 39,
673 40, 41, 42, 43, 44, 45, 46, 47,
674 48, 49, 8, 8, 9, 9, 10, 10,
675 11, 11, 12, 12, 13, 14, 14, 15,
676 15, 16, 16, 17, 17, 18, 18, 19,
677 20, 20, 21, 21, 22, 22, 23, 23,
678 24, 24, 25, 26, 26, 27, 27, 28,
679 28, 29, 29, 30, 30, 31, 32, 32,
680 33, 33, 34, 34, 35, 35, 36, 36,
681 37, 38, 38, 39, 39, 40, 40, 41,
682 41, 42, 42, 43, 44, 44, 45, 45,
683 46, 46, 47, 47, 48, 48, 49, 49,
684 };
685
686 return (pri_t)kpri_tab[l->l_usrpri];
687 }
688
689 /*
690 * sched_unsleep:
691 *
692 * The is called when the LWP has not been awoken normally but instead
693 * interrupted: for example, if the sleep timed out. Because of this,
694 * it's not a valid action for running or idle LWPs.
695 */
696 static void
697 sched_unsleep(struct lwp *l)
698 {
699
700 lwp_unlock(l);
701 panic("sched_unsleep");
702 }
703
704 inline void
705 resched_cpu(struct lwp *l)
706 {
707 struct cpu_info *ci;
708 const pri_t pri = lwp_eprio(l);
709
710 /*
711 * XXXSMP
712 * Since l->l_cpu persists across a context switch,
713 * this gives us *very weak* processor affinity, in
714 * that we notify the CPU on which the process last
715 * ran that it should try to switch.
716 *
717 * This does not guarantee that the process will run on
718 * that processor next, because another processor might
719 * grab it the next time it performs a context switch.
720 *
721 * This also does not handle the case where its last
722 * CPU is running a higher-priority process, but every
723 * other CPU is running a lower-priority process. There
724 * are ways to handle this situation, but they're not
725 * currently very pretty, and we also need to weigh the
726 * cost of moving a process from one CPU to another.
727 *
728 * XXXSMP
729 * There is also the issue of locking the other CPU's
730 * sched state, which we currently do not do.
731 */
732 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
733 if (pri < ci->ci_schedstate.spc_curpriority)
734 cpu_need_resched(ci, 0);
735 }
736
737 static void
738 sched_changepri(struct lwp *l, pri_t pri)
739 {
740
741 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
742
743 l->l_usrpri = pri;
744 if (l->l_priority < PUSER)
745 return;
746
747 if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
748 l->l_priority = pri;
749 return;
750 }
751
752 sched_dequeue(l);
753 l->l_priority = pri;
754 sched_enqueue(l);
755 resched_cpu(l);
756 }
757
758 static void
759 sched_lendpri(struct lwp *l, pri_t pri)
760 {
761
762 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
763
764 if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
765 l->l_inheritedprio = pri;
766 return;
767 }
768
769 sched_dequeue(l);
770 l->l_inheritedprio = pri;
771 sched_enqueue(l);
772 resched_cpu(l);
773 }
774
775 struct lwp *
776 syncobj_noowner(wchan_t wchan)
777 {
778
779 return NULL;
780 }
781