kern_synch.c revision 1.177.2.13 1 /* $NetBSD: kern_synch.c,v 1.177.2.13 2007/03/09 15:16:25 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
10 * Daniel Sieger.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /*-
42 * Copyright (c) 1982, 1986, 1990, 1991, 1993
43 * The Regents of the University of California. All rights reserved.
44 * (c) UNIX System Laboratories, Inc.
45 * All or some portions of this file are derived from material licensed
46 * to the University of California by American Telephone and Telegraph
47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48 * the permission of UNIX System Laboratories, Inc.
49 *
50 * Redistribution and use in source and binary forms, with or without
51 * modification, are permitted provided that the following conditions
52 * are met:
53 * 1. Redistributions of source code must retain the above copyright
54 * notice, this list of conditions and the following disclaimer.
55 * 2. Redistributions in binary form must reproduce the above copyright
56 * notice, this list of conditions and the following disclaimer in the
57 * documentation and/or other materials provided with the distribution.
58 * 3. Neither the name of the University nor the names of its contributors
59 * may be used to endorse or promote products derived from this software
60 * without specific prior written permission.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 *
74 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.177.2.13 2007/03/09 15:16:25 rmind Exp $");
79
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84
85 #define __MUTEX_PRIVATE
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/proc.h>
90 #include <sys/kernel.h>
91 #if defined(PERFCTRS)
92 #include <sys/pmc.h>
93 #endif
94 #include <sys/cpu.h>
95 #include <sys/resourcevar.h>
96 #include <sys/sched.h>
97 #include <sys/syscall_stats.h>
98 #include <sys/sleepq.h>
99 #include <sys/lockdebug.h>
100
101 #include <uvm/uvm_extern.h>
102
103 int lbolt; /* once a second sleep address */
104
105 /*
106 * The global scheduler state.
107 */
108 kmutex_t sched_mutex; /* global sched state mutex */
109
110 static void sched_unsleep(struct lwp *);
111 static void sched_changepri(struct lwp *, pri_t);
112 static void sched_lendpri(struct lwp *, pri_t);
113
114 syncobj_t sleep_syncobj = {
115 SOBJ_SLEEPQ_SORTED,
116 sleepq_unsleep,
117 sleepq_changepri,
118 sleepq_lendpri,
119 syncobj_noowner,
120 };
121
122 syncobj_t sched_syncobj = {
123 SOBJ_SLEEPQ_SORTED,
124 sched_unsleep,
125 sched_changepri,
126 sched_lendpri,
127 syncobj_noowner,
128 };
129
130 /*
131 * During autoconfiguration or after a panic, a sleep will simply lower the
132 * priority briefly to allow interrupts, then return. The priority to be
133 * used (safepri) is machine-dependent, thus this value is initialized and
134 * maintained in the machine-dependent layers. This priority will typically
135 * be 0, or the lowest priority that is safe for use on the interrupt stack;
136 * it can be made higher to block network software interrupts after panics.
137 */
138 int safepri;
139
140 /*
141 * OBSOLETE INTERFACE
142 *
143 * General sleep call. Suspends the current process until a wakeup is
144 * performed on the specified identifier. The process will then be made
145 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
146 * means no timeout). If pri includes PCATCH flag, signals are checked
147 * before and after sleeping, else signals are not checked. Returns 0 if
148 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
149 * signal needs to be delivered, ERESTART is returned if the current system
150 * call should be restarted if possible, and EINTR is returned if the system
151 * call should be interrupted by the signal (return EINTR).
152 *
153 * The interlock is held until we are on a sleep queue. The interlock will
154 * be locked before returning back to the caller unless the PNORELOCK flag
155 * is specified, in which case the interlock will always be unlocked upon
156 * return.
157 */
158 int
159 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
160 volatile struct simplelock *interlock)
161 {
162 struct lwp *l = curlwp;
163 sleepq_t *sq;
164 int error, catch;
165
166 if (sleepq_dontsleep(l)) {
167 (void)sleepq_abort(NULL, 0);
168 if ((priority & PNORELOCK) != 0)
169 simple_unlock(interlock);
170 return 0;
171 }
172
173 sq = sleeptab_lookup(&sleeptab, ident);
174 sleepq_enter(sq, l);
175
176 if (interlock != NULL) {
177 LOCK_ASSERT(simple_lock_held(interlock));
178 simple_unlock(interlock);
179 }
180
181 catch = priority & PCATCH;
182 sleepq_block(sq, priority & PRIMASK, ident, wmesg, timo, catch,
183 &sleep_syncobj);
184 error = sleepq_unblock(timo, catch);
185
186 if (interlock != NULL && (priority & PNORELOCK) == 0)
187 simple_lock(interlock);
188
189 return error;
190 }
191
192 /*
193 * General sleep call for situations where a wake-up is not expected.
194 */
195 int
196 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
197 {
198 struct lwp *l = curlwp;
199 sleepq_t *sq;
200 int error;
201
202 if (sleepq_dontsleep(l))
203 return sleepq_abort(NULL, 0);
204
205 if (mtx != NULL)
206 mutex_exit(mtx);
207 sq = sleeptab_lookup(&sleeptab, l);
208 sleepq_enter(sq, l);
209 sleepq_block(sq, sched_kpri(l), l, wmesg, timo, intr, &sleep_syncobj);
210 error = sleepq_unblock(timo, intr);
211 if (mtx != NULL)
212 mutex_enter(mtx);
213
214 return error;
215 }
216
217 /*
218 * OBSOLETE INTERFACE
219 *
220 * Make all processes sleeping on the specified identifier runnable.
221 */
222 void
223 wakeup(wchan_t ident)
224 {
225 sleepq_t *sq;
226
227 if (cold)
228 return;
229
230 sq = sleeptab_lookup(&sleeptab, ident);
231 sleepq_wake(sq, ident, (u_int)-1);
232 }
233
234 /*
235 * OBSOLETE INTERFACE
236 *
237 * Make the highest priority process first in line on the specified
238 * identifier runnable.
239 */
240 void
241 wakeup_one(wchan_t ident)
242 {
243 sleepq_t *sq;
244
245 if (cold)
246 return;
247
248 sq = sleeptab_lookup(&sleeptab, ident);
249 sleepq_wake(sq, ident, 1);
250 }
251
252
253 /*
254 * General yield call. Puts the current process back on its run queue and
255 * performs a voluntary context switch. Should only be called when the
256 * current process explicitly requests it (eg sched_yield(2) in compat code).
257 */
258 void
259 yield(void)
260 {
261 struct lwp *l = curlwp;
262
263 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
264 lwp_lock(l);
265 if (l->l_stat == LSONPROC) {
266 KASSERT(lwp_locked(l, &sched_mutex));
267 l->l_priority = l->l_usrpri;
268 }
269 l->l_nvcsw++;
270 mi_switch(l);
271 KERNEL_LOCK(l->l_biglocks, l);
272 }
273
274 /*
275 * General preemption call. Puts the current process back on its run queue
276 * and performs an involuntary context switch.
277 */
278 void
279 preempt(void)
280 {
281 struct lwp *l = curlwp;
282
283 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
284 lwp_lock(l);
285 if (l->l_stat == LSONPROC) {
286 KASSERT(lwp_locked(l, &sched_mutex));
287 l->l_priority = l->l_usrpri;
288 }
289 l->l_nivcsw++;
290 (void)mi_switch(l);
291 KERNEL_LOCK(l->l_biglocks, l);
292 }
293
294 /*
295 * sched_switch_unlock: update 'curlwp' and release old lwp.
296 */
297
298 void
299 sched_switch_unlock(struct lwp *old, struct lwp *new)
300 {
301
302 KASSERT(old == NULL || old == curlwp);
303 KASSERT(new != NULL);
304
305 if (old != NULL) {
306 LOCKDEBUG_BARRIER(old->l_mutex, 1);
307 } else {
308 LOCKDEBUG_BARRIER(NULL, 1);
309 }
310
311 curlwp = new;
312 if (old != NULL) {
313 lwp_unlock(old);
314 }
315 spl0();
316 }
317
318 /*
319 * Compute the amount of time during which the current lwp was running.
320 *
321 * - update l_rtime unless it's an idle lwp.
322 * - update spc_runtime for the next lwp.
323 */
324
325 static inline void
326 updatertime(struct lwp *l, struct schedstate_percpu *spc)
327 {
328 struct timeval tv;
329 long s, u;
330
331 if ((l->l_flag & LW_IDLE) != 0) {
332 microtime(&spc->spc_runtime);
333 return;
334 }
335
336 microtime(&tv);
337 u = l->l_rtime.tv_usec + (tv.tv_usec - spc->spc_runtime.tv_usec);
338 s = l->l_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
339 if (u < 0) {
340 u += 1000000;
341 s--;
342 } else if (u >= 1000000) {
343 u -= 1000000;
344 s++;
345 }
346 l->l_rtime.tv_usec = u;
347 l->l_rtime.tv_sec = s;
348
349 spc->spc_runtime = tv;
350 }
351
352 /*
353 * The machine independent parts of context switch. Switch to "new"
354 * if non-NULL, otherwise let cpu_switch choose the next lwp.
355 *
356 * Returns 1 if another process was actually run.
357 */
358 int
359 mi_switch(struct lwp *l)
360 {
361 struct schedstate_percpu *spc;
362 struct lwp *newl;
363 int retval, oldspl;
364
365 LOCK_ASSERT(lwp_locked(l, NULL));
366
367 #ifdef LOCKDEBUG
368 spinlock_switchcheck();
369 simple_lock_switchcheck();
370 #endif
371 #ifdef KSTACK_CHECK_MAGIC
372 kstack_check_magic(l);
373 #endif
374
375 /*
376 * It's safe to read the per CPU schedstate unlocked here, as all we
377 * are after is the run time and that's guarenteed to have been last
378 * updated by this CPU.
379 */
380 KDASSERT(l->l_cpu == curcpu());
381 spc = &l->l_cpu->ci_schedstate;
382
383 /* Count time spent in current system call */
384 SYSCALL_TIME_SLEEP(l);
385
386 /*
387 * XXXSMP If we are using h/w performance counters, save context.
388 */
389 #if PERFCTRS
390 if (PMC_ENABLED(l->l_proc)) {
391 pmc_save_context(l->l_proc);
392 }
393 #endif
394
395 /*
396 * Process is about to yield the CPU; clear the appropriate
397 * scheduling flags.
398 */
399 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
400
401 LOCKDEBUG_BARRIER(l->l_mutex, 1);
402
403 /*
404 * Switch to the new LWP if necessary.
405 * When we run again, we'll return back here.
406 */
407 oldspl = MUTEX_SPIN_OLDSPL(l->l_cpu);
408
409 /*
410 * Acquire the sched_mutex if necessary.
411 */
412 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
413 if (l->l_mutex != &sched_mutex) {
414 mutex_enter(&sched_mutex);
415 }
416 #endif
417
418 /* Please note that sched_switch() will enqueue the LWP */
419 newl = sched_switch(l);
420 if (newl == NULL) {
421 newl = l->l_cpu->ci_data.cpu_idlelwp;
422 KASSERT(newl != NULL);
423 } else {
424 sched_dequeue(newl);
425 }
426 KASSERT(lwp_locked(newl, &sched_mutex));
427 newl->l_stat = LSONPROC;
428 newl->l_cpu = l->l_cpu;
429
430 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
431 if (l->l_mutex != &sched_mutex) {
432 mutex_exit(&sched_mutex);
433 }
434 #endif
435
436 updatertime(l, spc);
437 if (l != newl) {
438 struct lwp *prevlwp;
439
440 uvmexp.swtch++;
441 pmap_deactivate(l);
442 prevlwp = cpu_switchto(l, newl);
443 sched_switch_unlock(prevlwp, l);
444 pmap_activate(l);
445 retval = 1;
446 } else {
447 sched_switch_unlock(l, l);
448 retval = 0;
449 }
450
451 KASSERT(l == curlwp);
452 KASSERT(l->l_stat == LSONPROC);
453
454 /*
455 * XXXSMP If we are using h/w performance counters, restore context.
456 */
457 #if PERFCTRS
458 if (PMC_ENABLED(l->l_proc)) {
459 pmc_restore_context(l->l_proc);
460 }
461 #endif
462
463 /*
464 * We're running again; record our new start time. We might
465 * be running on a new CPU now, so don't use the cached
466 * schedstate_percpu pointer.
467 */
468 SYSCALL_TIME_WAKEUP(l);
469 KDASSERT(l->l_cpu == curcpu());
470
471 (void)splsched();
472 splx(oldspl);
473 return retval;
474 }
475
476 /*
477 * Change process state to be runnable, placing it on the run queue if it is
478 * in memory, and awakening the swapper if it isn't in memory.
479 *
480 * Call with the process and LWP locked. Will return with the LWP unlocked.
481 */
482 void
483 setrunnable(struct lwp *l)
484 {
485 struct proc *p = l->l_proc;
486 sigset_t *ss;
487
488 KASSERT((l->l_flag & LW_IDLE) == 0);
489 KASSERT(mutex_owned(&p->p_smutex));
490 KASSERT(lwp_locked(l, NULL));
491
492 switch (l->l_stat) {
493 case LSSTOP:
494 /*
495 * If we're being traced (possibly because someone attached us
496 * while we were stopped), check for a signal from the debugger.
497 */
498 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
499 if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
500 ss = &l->l_sigpend.sp_set;
501 else
502 ss = &p->p_sigpend.sp_set;
503 sigaddset(ss, p->p_xstat);
504 signotify(l);
505 }
506 p->p_nrlwps++;
507 break;
508 case LSSUSPENDED:
509 l->l_flag &= ~LW_WSUSPEND;
510 p->p_nrlwps++;
511 break;
512 case LSSLEEP:
513 KASSERT(l->l_wchan != NULL);
514 break;
515 default:
516 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
517 }
518
519 /*
520 * If the LWP was sleeping interruptably, then it's OK to start it
521 * again. If not, mark it as still sleeping.
522 */
523 if (l->l_wchan != NULL) {
524 l->l_stat = LSSLEEP;
525 /* lwp_unsleep() will release the lock. */
526 lwp_unsleep(l);
527 return;
528 }
529
530 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
531
532 /*
533 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
534 * about to call mi_switch(), in which case it will yield.
535 *
536 * XXXSMP Will need to change for preemption.
537 */
538 #ifdef MULTIPROCESSOR
539 if (l->l_cpu->ci_curlwp == l) {
540 #else
541 if (l == curlwp) {
542 #endif
543 l->l_stat = LSONPROC;
544 l->l_slptime = 0;
545 lwp_unlock(l);
546 return;
547 }
548
549 /*
550 * Set the LWP runnable. If it's swapped out, we need to wake the swapper
551 * to bring it back in. Otherwise, enter it into a run queue.
552 */
553 sched_setrunnable(l);
554 l->l_stat = LSRUN;
555 l->l_slptime = 0;
556
557 if (l->l_flag & LW_INMEM) {
558 sched_enqueue(l);
559 resched_cpu(l);
560 lwp_unlock(l);
561 } else {
562 lwp_unlock(l);
563 uvm_kick_scheduler();
564 }
565 }
566
567 /*
568 * suspendsched:
569 *
570 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
571 */
572 void
573 suspendsched(void)
574 {
575 #ifdef MULTIPROCESSOR
576 CPU_INFO_ITERATOR cii;
577 struct cpu_info *ci;
578 #endif
579 struct lwp *l;
580 struct proc *p;
581
582 /*
583 * We do this by process in order not to violate the locking rules.
584 */
585 mutex_enter(&proclist_mutex);
586 PROCLIST_FOREACH(p, &allproc) {
587 mutex_enter(&p->p_smutex);
588
589 if ((p->p_flag & PK_SYSTEM) != 0) {
590 mutex_exit(&p->p_smutex);
591 continue;
592 }
593
594 p->p_stat = SSTOP;
595
596 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
597 if (l == curlwp)
598 continue;
599
600 lwp_lock(l);
601
602 /*
603 * Set L_WREBOOT so that the LWP will suspend itself
604 * when it tries to return to user mode. We want to
605 * try and get to get as many LWPs as possible to
606 * the user / kernel boundary, so that they will
607 * release any locks that they hold.
608 */
609 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
610
611 if (l->l_stat == LSSLEEP &&
612 (l->l_flag & LW_SINTR) != 0) {
613 /* setrunnable() will release the lock. */
614 setrunnable(l);
615 continue;
616 }
617
618 lwp_unlock(l);
619 }
620
621 mutex_exit(&p->p_smutex);
622 }
623 mutex_exit(&proclist_mutex);
624
625 /*
626 * Kick all CPUs to make them preempt any LWPs running in user mode.
627 * They'll trap into the kernel and suspend themselves in userret().
628 */
629 sched_lock(0);
630 #ifdef MULTIPROCESSOR
631 for (CPU_INFO_FOREACH(cii, ci))
632 cpu_need_resched(ci, 0);
633 #else
634 cpu_need_resched(curcpu(), 0);
635 #endif
636 sched_unlock(0);
637 }
638
639 /*
640 * sched_kpri:
641 *
642 * Scale a priority level to a kernel priority level, usually
643 * for an LWP that is about to sleep.
644 */
645 pri_t
646 sched_kpri(struct lwp *l)
647 {
648 /*
649 * Scale user priorities (127 -> 50) up to kernel priorities
650 * in the range (49 -> 8). Reserve the top 8 kernel priorities
651 * for high priority kthreads. Kernel priorities passed in
652 * are left "as is". XXX This is somewhat arbitrary.
653 */
654 static const uint8_t kpri_tab[] = {
655 0, 1, 2, 3, 4, 5, 6, 7,
656 8, 9, 10, 11, 12, 13, 14, 15,
657 16, 17, 18, 19, 20, 21, 22, 23,
658 24, 25, 26, 27, 28, 29, 30, 31,
659 32, 33, 34, 35, 36, 37, 38, 39,
660 40, 41, 42, 43, 44, 45, 46, 47,
661 48, 49, 8, 8, 9, 9, 10, 10,
662 11, 11, 12, 12, 13, 14, 14, 15,
663 15, 16, 16, 17, 17, 18, 18, 19,
664 20, 20, 21, 21, 22, 22, 23, 23,
665 24, 24, 25, 26, 26, 27, 27, 28,
666 28, 29, 29, 30, 30, 31, 32, 32,
667 33, 33, 34, 34, 35, 35, 36, 36,
668 37, 38, 38, 39, 39, 40, 40, 41,
669 41, 42, 42, 43, 44, 44, 45, 45,
670 46, 46, 47, 47, 48, 48, 49, 49,
671 };
672
673 return (pri_t)kpri_tab[l->l_usrpri];
674 }
675
676 /*
677 * sched_unsleep:
678 *
679 * The is called when the LWP has not been awoken normally but instead
680 * interrupted: for example, if the sleep timed out. Because of this,
681 * it's not a valid action for running or idle LWPs.
682 */
683 static void
684 sched_unsleep(struct lwp *l)
685 {
686
687 lwp_unlock(l);
688 panic("sched_unsleep");
689 }
690
691 inline void
692 resched_cpu(struct lwp *l)
693 {
694 struct cpu_info *ci;
695 const pri_t pri = lwp_eprio(l);
696
697 /*
698 * XXXSMP
699 * Since l->l_cpu persists across a context switch,
700 * this gives us *very weak* processor affinity, in
701 * that we notify the CPU on which the process last
702 * ran that it should try to switch.
703 *
704 * This does not guarantee that the process will run on
705 * that processor next, because another processor might
706 * grab it the next time it performs a context switch.
707 *
708 * This also does not handle the case where its last
709 * CPU is running a higher-priority process, but every
710 * other CPU is running a lower-priority process. There
711 * are ways to handle this situation, but they're not
712 * currently very pretty, and we also need to weigh the
713 * cost of moving a process from one CPU to another.
714 *
715 * XXXSMP
716 * There is also the issue of locking the other CPU's
717 * sched state, which we currently do not do.
718 */
719 ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
720 if (pri < ci->ci_schedstate.spc_curpriority)
721 cpu_need_resched(ci, 0);
722 }
723
724 static void
725 sched_changepri(struct lwp *l, pri_t pri)
726 {
727
728 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
729
730 l->l_usrpri = pri;
731 if (l->l_priority < PUSER)
732 return;
733
734 if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
735 l->l_priority = pri;
736 return;
737 }
738
739 sched_dequeue(l);
740 l->l_priority = pri;
741 sched_enqueue(l);
742 resched_cpu(l);
743 }
744
745 static void
746 sched_lendpri(struct lwp *l, pri_t pri)
747 {
748
749 LOCK_ASSERT(lwp_locked(l, &sched_mutex));
750
751 if (l->l_stat != LSRUN || (l->l_flag & LW_INMEM) == 0) {
752 l->l_inheritedprio = pri;
753 return;
754 }
755
756 sched_dequeue(l);
757 l->l_inheritedprio = pri;
758 sched_enqueue(l);
759 resched_cpu(l);
760 }
761
762 struct lwp *
763 syncobj_noowner(wchan_t wchan)
764 {
765
766 return NULL;
767 }
768