HomeSort by: relevance | last modified time | path
    Searched refs:cpu_switchto (Results 1 - 22 of 22) sorted by relevancy

  /src/sys/sys/
cpu.h 79 struct lwp *cpu_switchto(struct lwp *, struct lwp *, bool);
  /src/sys/arch/arm/arm32/
cpuswitch.S 126 * cpu_switchto(struct lwp *current, struct lwp *next)
135 ENTRY(cpu_switchto) function
305 /* cpu_switchto returns the old lwp */
315 * Pull the registers that got pushed when cpu_switchto() was called,
320 END(cpu_switchto)
324 * cpu_switchto gives us:
417 dmb /* for mutex_enter; see cpu_switchto */
460 dmb /* for mutex_enter; see cpu_switchto */
464 dmb /* for mutex_enter; see cpu_switchto */
  /src/sys/arch/ia64/ia64/
vm_machdep.c 67 * The cpu_switchto() function saves the context of the LWP which is
69 * specified by newlwp. man cpu_switchto(9)
72 cpu_switchto(lwp_t *oldlwp, lwp_t *newlwp, bool returning) function in typeref:typename:lwp_t *
116 /* return oldlwp for the original thread that called cpu_switchto */
  /src/sys/arch/vax/include/
macros.h 336 cpu_switchto(struct lwp *oldlwp, struct lwp *newlwp, bool returning) function in typeref:struct:lwp *
  /src/sys/arch/aarch64/aarch64/
cpuswitch.S 52 ENTRY_NP(cpu_switchto)
165 END(cpu_switchto)
187 adr x2, softint_cleanup /* return address for cpu_switchto() */
225 dmb ishst /* for mutex_enter; see cpu_switchto */
275 dmb ishst /* for mutex_enter; see cpu_switchto */
277 dmb ish /* for mutex_enter; see cpu_switchto */
307 * cpu_switchto() bottom half arranges to start this when softlwp.
329 * x0 = old lwp (from cpu_switchto)
330 * x1 = new lwp (from cpu_switchto)
  /src/sys/arch/m68k/m68k/
switch_subr.s 86 * struct lwp *cpu_switchto(struct lwp *oldlwp, struct lwp *newlwp)
90 ENTRY(cpu_switchto) function
357 * %a0 will have old lwp from cpu_switchto(), and %a4 is new lwp
  /src/sys/arch/riscv/riscv/
cpu_switch.S 39 * cpu_switchto(struct lwp *oldl, struct lwp *newl, bool returning);
41 ENTRY_NP(cpu_switchto)
114 END(cpu_switchto)
118 * a0 = old lwp (from cpu_switchto)
119 * a1 = new lwp (from cpu_switchto)
182 fence w,w // for mutex_enter; see cpu_switchto
199 fence w,w // for mutex_enter; see cpu_switchto
201 fence w,r // for mutex_enter; see cpu_switchto
  /src/sys/arch/mips/mips/
db_trace.c 208 pc = (vaddr_t)cpu_switchto;
mips_stacktrace.c 188 Name(cpu_switchto),
locore.S 209 * struct lwp *cpu_switchto(struct lwp *cur, struct lwp *next)
219 NESTED(cpu_switchto, CALLFRAME_SIZ, ra)
367 END(cpu_switchto)
430 SYNC_PRODUCER /* XXX fixup */ /* for mutex_enter; see cpu_switchto */
449 SYNC_PRODUCER /* XXX fixup */ /* for mutex_enter; see cpu_switchto */
451 SYNC_DEKKER /* XXX fixup */ /* for mutex_enter; see cpu_switchto */
mipsX_subr.S 2636 # Call lwp_startup(), with args from cpu_switchto()/cpu_lwp_fork()
  /src/sys/arch/usermode/dev/
cpu.c 244 cpu_switchto(lwp_t *oldlwp, lwp_t *newlwp, bool returning) function in typeref:typename:lwp_t *
252 thunk_printf_debug("cpu_switchto [%s,pid=%d,lid=%d] -> [%s,pid=%d,lid=%d]\n",
295 thunk_printf_debug("cpu_switchto: returning %p (was %p)\n", ci->ci_stash, oldlwp);
  /src/sys/arch/hppa/hppa/
locore.S 793 * cpu_switchto(struct lwp *oldl, struct lwp *newl, bool returning)
796 ENTRY(cpu_switchto,128) function
819 .asciz "cpu_switchto: 0x%08x stack/len 0x%08x"
970 EXIT(cpu_switchto)
974 * cpu_switchto() has switched to it for the first time.
977 * arranges for cpu_switchto() to call us with a frame containing
980 * cpu_switchto() also makes sure that %arg0 and %arg1 are (still)
987 /* %arg0, %arg1 are still valid from cpu_switchto */
  /src/sys/arch/amd64/amd64/
locore.S 1313 * Mimic cpu_switchto() for postmortem debugging.
1335 * struct lwp *cpu_switchto(struct lwp *oldlwp, struct lwp *newlwp,
1345 ENTRY(cpu_switchto) function
1402 movq $_C_LABEL(cpu_switchto), CPUVAR(XEN_CLOCKF_PC)
1478 * mi_switch(), when cpu_switchto() returns. XXX Still needed? */
1549 END(cpu_switchto)
  /src/sys/arch/i386/i386/
locore.S 1462 * cpu_switchto()ing to the process, so we abuse the callee-saved
1463 * registers used by cpu_switchto() to store the information about the
1542 * Mimic cpu_switchto() for postmortem debugging.
1556 * struct lwp *cpu_switchto(struct lwp *oldlwp, struct lwp *newlwp,
1566 ENTRY(cpu_switchto) function
1622 movl $_C_LABEL(cpu_switchto), CPUVAR(XEN_CLOCKF_PC)
1726 END(cpu_switchto)
  /src/sys/arch/sh3/sh3/
locore_subr.S 82 * LINTSTUB: Func: lwp_t *cpu_switchto(lwp_t *olwp, lwp_t *nlwp, bool returning)
88 ENTRY(cpu_switchto) function
176 SET_ENTRY_SIZE(cpu_switchto)
384 * nascent lwp is selected by cpu_switchto().
391 * cpu_switchto(), so r0 contains previous lwp (the one we are
400 mov r0, r4 /* previous lwp returned by cpu_switchto */
  /src/sys/arch/powerpc/powerpc/
locore_subr.S 158 * cpu_switchto(struct lwp *current, struct lwp *new)
165 ENTRY(cpu_switchto) function
332 * cpu_switchto has restored r30/r31 for us.
370 * We need a 2nd callframe from which cpu_switchto will consume
418 sync /* XXX eieio */ /* for mutex_enter; see cpu_switchto */
462 sync /* XXX eieio */ /* for mutex_enter; see cpu_switchto */
466 sync /* for mutex_enter; see cpu_switchto */
538 * r3 (old lwp) and r4 (new lwp) are setup in cpu_switchto.
  /src/sys/arch/alpha/alpha/
locore.s 650 * since cpu_switchto will copy over the info saved here. (It _can_
651 * sanely be used for curlwp iff cpu_switchto won't be called again, e.g.
785 * struct lwp *cpu_switchto(struct lwp *current, struct lwp *next,
794 LEAF(cpu_switchto, 0)
882 END(cpu_switchto)
889 * via cpu_switchto(), which returns the LWP we switched away from in v0.
896 mov v0, a0 /* a0 = prev_lwp (from cpu_switchto()) */
  /src/sys/kern/
kern_synch.c 811 prevlwp = cpu_switchto(l, newl, returning);
  /src/share/man/man9/
Makefile 16 cpu_startup.9 cpu_switchto.9 cpufreq.9 \
  /src/sys/arch/sparc/sparc/
locore.s 4831 * cpu_switchto() runs an lwp, saving the current one away.
4833 ENTRY(cpu_switchto) function
5010 * cpu_switchto(), so %o0 contains previous lwp (the one we are
  /src/sys/arch/sparc64/sparc64/
locore.s 6679 * cpu_switchto() switches to an lwp to run and runs it, saving the
6682 * struct lwp * cpu_switchto(struct lwp *current, struct lwp *next)
6691 ENTRY(cpu_switchto) function
6848 membar #StoreStore /* for mutex_enter; see cpu_switchto */
6869 membar #StoreStore /* for mutex_enter; see cpu_switchto */
6871 membar #StoreLoad /* for mutex_enter; see cpu_switchto */
6882 * Trampoline function that gets returned to by cpu_switchto() when
6886 * o0 old lwp from cpu_switchto()
6935 * cpu_switchto(), so %o0 contains previous lwp (the one we are

Completed in 36 milliseconds