/src/sys/kern/ |
kern_idle.c | 48 struct schedstate_percpu *spc; local in function:idle_loop 54 spc = &ci->ci_schedstate; 55 KASSERT(lwp_locked(l, spc->spc_lwplock)); 59 spc->spc_flags |= SPCF_RUNNING; 83 if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
|
subr_interrupt.c | 66 struct schedstate_percpu *spc; local in function:interrupt_shield_xcall 71 spc = &ci->ci_schedstate; 75 spc->spc_flags &= ~SPCF_NOINTR; 77 spc->spc_flags |= SPCF_NOINTR; 88 struct schedstate_percpu *spc; local in function:interrupt_shield 96 spc = &ci->ci_schedstate; 98 if ((spc->spc_flags & SPCF_NOINTR) == 0) 101 if ((spc->spc_flags & SPCF_NOINTR) != 0) 114 spc->spc_lastmod = time_second;
|
kern_clock.c | 414 struct schedstate_percpu *spc = &ci->ci_schedstate; local in function:statclock 425 if (spc->spc_psdiv != psdiv) { 426 spc->spc_psdiv = psdiv; 427 spc->spc_pscnt = psdiv; 449 if (--spc->spc_pscnt > 0) { 460 spc->spc_cp_time[CP_NICE]++; 462 spc->spc_cp_time[CP_USER]++; 489 if (--spc->spc_pscnt > 0) { 510 spc->spc_cp_time[CP_INTR]++; 513 spc->spc_cp_time[CP_SYS]++ [all...] |
kern_sleepq.c | 122 struct schedstate_percpu *spc; local in function:sleepq_remove 140 spc = &ci->ci_schedstate; 148 lwp_setlock(l, spc->spc_lwplock); 159 lwp_setlock(l, spc->spc_lwplock); 170 spc = &ci->ci_schedstate; 176 lwp_setlock(l, spc->spc_mutex); 182 /* LWP & SPC now unlocked, but we still hold sleep queue lock. */
|
sched_m2.c | 280 struct schedstate_percpu *spc = &l->l_cpu->ci_schedstate; local in function:sched_oncpu 285 spc->spc_ticks = l->l_sched.timeslice; 298 struct schedstate_percpu *spc = &ci->ci_schedstate; local in function:sched_tick 306 KASSERT(l->l_mutex != spc->spc_mutex); 314 spc->spc_ticks = l->l_sched.timeslice; 339 if (lwp_eprio(l) <= spc->spc_maxpriority || l->l_target_cpu) { 340 spc->spc_flags |= SPCF_SHOULDYIELD; 343 /* spc now unlocked */ 345 spc->spc_ticks = l->l_sched.timeslice;
|
kern_cpu.c | 312 struct schedstate_percpu *spc, *mspc = NULL; local in function:cpu_xc_offline 322 spc = &ci->ci_schedstate; 324 spc->spc_flags |= SPCF_OFFLINE; 382 spc->spc_flags &= ~SPCF_OFFLINE; 389 struct schedstate_percpu *spc; local in function:cpu_xc_online 394 spc = &ci->ci_schedstate; 396 spc->spc_flags &= ~SPCF_OFFLINE; 403 struct schedstate_percpu *spc; local in function:cpu_setstate 410 spc = &ci->ci_schedstate; 415 if ((spc->spc_flags & SPCF_OFFLINE) == 0 495 struct schedstate_percpu *spc; local in function:cpu_xc_intr 507 struct schedstate_percpu *spc; local in function:cpu_xc_nointr 519 struct schedstate_percpu *spc; local in function:cpu_setintr [all...] |
sched_4bsd.c | 104 struct schedstate_percpu *spc = &ci->ci_schedstate; local in function:sched_tick 108 spc->spc_ticks = sched_rrticks; 113 /* spc now unlocked */ 124 KASSERT(l->l_mutex != spc->spc_mutex); 134 if (spc->spc_flags & SPCF_SHOULDYIELD) { 141 } else if (spc->spc_flags & SPCF_SEENRR) { 148 spc->spc_flags |= SPCF_SHOULDYIELD; 156 spc->spc_flags |= SPCF_SHOULDYIELD; 158 spc->spc_flags |= SPCF_SEENRR; 166 /* spc now unlocked * [all...] |
sys_pset.c | 222 struct schedstate_percpu *spc; local in function:kern_pset_destroy 224 spc = &ci->ci_schedstate; 225 if (spc->spc_psid != psid) 227 spc->spc_psid = PS_NONE; 306 struct schedstate_percpu *spc = NULL; local in function:sys_pset_assign 325 spc = ispc; 338 opsid = spc->spc_psid; 350 if (spc->spc_psid == psid) 356 if (psid != PS_NONE && ((spc->spc_flags & SPCF_OFFLINE) || 357 (nnone == 1 && spc->spc_psid == PS_NONE))) [all...] |
kern_runq.c | 138 struct schedstate_percpu *spc; local in function:sched_cpuattach 143 spc = &ci->ci_schedstate; 144 spc->spc_nextpkg = ci; 146 if (spc->spc_lwplock == NULL) { 147 spc->spc_lwplock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED); 151 lwp0.l_mutex = spc->spc_lwplock; 153 if (spc->spc_mutex != NULL) { 159 size = roundup2(sizeof(spc->spc_queue[0]) * PRI_COUNT, coherency_unit) + 162 spc->spc_queue = (void *)roundup2((uintptr_t)p, coherency_unit); 165 spc->spc_mutex = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED) 188 struct schedstate_percpu *spc; local in function:sched_enqueue 262 struct schedstate_percpu *spc; local in function:sched_dequeue 322 struct schedstate_percpu *spc; local in function:sched_resched_cpu 455 const struct schedstate_percpu *spc = &ci->ci_schedstate; local in function:sched_migratable 476 struct schedstate_percpu *spc = &curcpu()->ci_schedstate; local in function:sched_nextpkg 569 struct schedstate_percpu *spc; local in function:sched_takecpu 655 struct schedstate_percpu *spc, *curspc; local in function:sched_catchlwp 717 struct schedstate_percpu *spc, *tspc; local in function:sched_idle_migrate 795 struct schedstate_percpu *spc, *tspc; local in function:sched_steal 819 struct schedstate_percpu *spc, *tspc; local in function:sched_idle 1076 struct schedstate_percpu *spc; local in function:sched_nextlwp 1113 const struct schedstate_percpu *spc; local in function:sched_curcpu_runnable_p 1188 struct schedstate_percpu *spc; local in function:sched_print_runqueue [all...] |
kern_synch.c | 549 nextlwp(struct cpu_info *ci, struct schedstate_percpu *spc) 565 KASSERT(lwp_locked(newl, spc->spc_mutex)); 570 spc->spc_curpriority = lwp_eprio(newl); 571 spc->spc_flags &= ~(SPCF_SWITCHCLEAR | SPCF_IDLE); 572 lwp_setlock(newl, spc->spc_lwplock); 580 spc->spc_curpriority = PRI_IDLE; 581 spc->spc_flags = (spc->spc_flags & ~SPCF_SWITCHCLEAR) | 611 struct schedstate_percpu *spc; local in function:mi_switch 631 spc = &ci->ci_schedstate 1057 struct schedstate_percpu *spc; local in function:sched_changepri 1086 struct schedstate_percpu *spc; local in function:sched_lendpri [all...] |
/src/sys/dev/pcmcia/ |
spc_pcmcia.c | 106 struct spc_softc *spc = &sc->sc_spc; local in function:spc_pcmcia_attach 112 spc->sc_dev = self; 122 spc->sc_iot = cfe->iospace[0].handle.iot; 123 spc->sc_ioh = cfe->iospace[0].handle.ioh; 129 spc->sc_initiator = 7; /* XXX */ 130 spc->sc_adapter.adapt_enable = spc_pcmcia_enable; 131 spc->sc_adapter.adapt_refcnt = 1; 136 spc_attach(spc); 137 scsipi_adapter_delref(&spc->sc_adapter);
|
/src/sys/arch/i386/stand/lib/ |
biosdisk_ll.c | 213 int cyl, head, sec, nsec, spc, dblk32; local in function:do_read 216 spc = d->head * d->sec; 217 cyl = dblk32 / spc; 218 head = (dblk32 % spc) / d->sec;
|
/src/sys/dev/pci/ |
sti_pci.c | 98 struct sti_pci_softc *spc = device_private(self); local in function:sti_pci_attach 102 spc->sc_dev = self; 104 spc->sc_pc = paa->pa_pc; 105 spc->sc_tag = paa->pa_tag; 106 spc->sc_base.sc_dev = self; 107 spc->sc_base.sc_enable_rom = sti_pci_enable_rom; 108 spc->sc_base.sc_disable_rom = sti_pci_disable_rom; 112 if (sti_check_rom(spc, paa) != 0) 116 ret = sti_pci_is_console(paa, spc->sc_base. bases); 118 spc->sc_base.sc_flags |= STI_CONSOLE 129 struct sti_pci_softc *spc = device_private(dev); local in function:sti_pci_end_attach 396 struct sti_pci_softc *spc = device_private(sc->sc_dev); local in function:sti_pci_enable_rom 422 struct sti_pci_softc *spc = device_private(sc->sc_dev); local in function:sti_pci_disable_rom [all...] |
/src/distrib/utils/edlabel/ |
edlabel.c | 181 int nsect, ntrack, ncyl, spc; local in function:edit_geo 183 nsect = ntrack = ncyl = spc = 0; 199 spc = nsect * ntrack; 200 if (!(ncyl = d->d_secperunit / spc)) 205 d->d_secpercyl = spc;
|
/src/sys/arch/sh3/include/ |
cpu.h | 82 int spc; /* program counter at time of interrupt */ member in struct:clockframe 89 #define CLKF_PC(cf) ((cf)->spc)
|
/src/usr.sbin/diskpart/ |
diskpart.c | 124 int spc, def, part, layout, j, ch; local in function:main 176 spc = dp->d_secpercyl; 188 threshold = howmany(spc, badsecttable); 199 dp->d_ncylinders = howmany(totsize, spc); 200 badsecttable = spc * dp->d_ncylinders - totsize; 212 curcyl += howmany(defpart[def][part], spc); 231 numcyls[part] = howmany(defpart[def][part], spc); 239 defpart[def][PART('f')] = numcyls[PART('f')] * spc - badsecttable; 240 defpart[def][PART('g')] = numcyls[PART('g')] * spc - badsecttable; 241 defpart[def][PART('c')] = numcyls[PART('c')] * spc; [all...] |
/src/lib/libcurses/ |
slk.c | 733 size_t spc, len, width, x; local in function:__slk_set_finalise 737 spc = screen->slk_label_len; 756 if (width + w > spc) 766 if (len > spc) 767 len = spc; 776 x = (spc - width) / 2; 777 if (x + width > spc) 781 x = spc - width; 791 spc -= x; 796 spc -= width [all...] |
/src/sys/arch/aarch64/aarch64/ |
db_interface.c | 563 const char *spc = spaces[level]; variable in typeref:typename:const char * 569 pr("%sL%d: pa=%lx pg=NULL\n", spc, level, pa); 571 pr("%sL%d: pa=%lx pg=%p\n", spc, level, pa, pg); 579 spc, level, i, n, va, pde);
|
/src/usr.bin/tftp/ |
tftp.c | 638 const char *spc; local in function:tpacket 701 spc = ""; 705 (void)printf("%s%s=%s", spc, opt, cp); 706 spc = ", ";
|
/src/sbin/newfs_msdos/ |
mkfs_msdos.c | 120 u_int8_t spc; /* sectors per cluster */ member in struct:bsbpb 164 u_int spc; /* sectors per cluster */ member in struct:bpb 182 { .bps = a, .spc = b, .res = c, .nft = d, .rde = e, \ 340 if (bpb.spc == 0) { /* set defaults */ 344 bpb.spc = 1; /* use 512 bytes */ 348 bpb.spc = 1; /* use 512 bytes */ 352 bpb.spc = 8; /* use 4k */ 357 while (bpb.spc < 128 && x < x1) { 359 bpb.spc *= 2; 424 bpb.spc = o.block_size / bpb.bps [all...] |
/src/sys/external/bsd/gnu-efi/dist/lib/ |
print.c | 323 POOL_PRINT *spc; local in function:_SPrint 325 spc = Context; 332 if (spc->len + len > spc->maxlen) { 333 len = spc->maxlen - spc->len; 340 CopyMem (spc->str + spc->len, Buffer, len * sizeof(CHAR16)); 341 spc->len += len; 347 if (spc->len < spc->maxlen) 365 POOL_PRINT *spc; local in function:_PoolPrint 456 POOL_PRINT spc; local in function:UnicodeVSPrint 530 POOL_PRINT spc; local in function:VPoolPrint [all...] |
/src/sys/lib/libsa/ |
dosfs.c | 72 u_int spc; /* sectors per cluster */ member in struct:__anonc34c81eb0208 439 if (!(fs->spc = bs->bpb.bpbSecPerClust) || fs->spc & (fs->spc - 1)) 441 fs->bsize = secbyt(fs->spc); 537 nsec = !clus ? entsec(fs->dirents) : fs->spc;
|
/src/usr.sbin/makefs/ |
ffs.c | 322 int32_t spc, nspf, ncyl, fssize; local in function:ffs_validate
|
/src/lib/librumpuser/ |
rumpuser_sp.c | 138 LIST_ENTRY(prefork) pf_spcentries; /* linked from forking spc */ 148 waitresp(struct spclient *spc, struct respwait *rw) 153 pthread_mutex_lock(&spc->spc_mtx); 154 sendunlockl(spc); 155 while (!rw->rw_done && spc->spc_state != SPCSTATE_DYING) { 156 pthread_cond_wait(&rw->rw_cv, &spc->spc_mtx); 158 TAILQ_REMOVE(&spc->spc_respwait, rw, rw_entries); 159 spcstate = spc->spc_state; 160 pthread_mutex_unlock(&spc->spc_mtx); 195 lwproc_rfork(struct spclient *spc, int flags, const char *comm 560 struct spclient *spc = &spclist[idx]; local in function:serv_handledisco 594 struct spclient *spc; local in function:serv_shutdown 785 struct spclient *spc = arg; local in function:sp_copyin 826 struct spclient *spc = arg; local in function:sp_copyout 859 struct spclient *spc = arg; local in function:rumpuser_sp_anonmmap 888 struct spclient *spc = arg; local in function:rumpuser_sp_raise 1182 struct spclient *spc; local in function:spserver 1375 struct spclient *spc = arg; local in function:rumpuser_sp_fini [all...] |
/src/usr.sbin/sunlabel/ |
sunlabel.c | 98 * ncyl=pcyl=ceil(device size/spc) and acyl=apc=0. 124 uint32_t spc; /* Sectors per cylinder - nhead*nsect */ member in struct:label 181 * update_spc is a `changed' function for updating the spc value when 353 * centralize the check. (If spc is zero, cylinder numbers make 357 * whenever we change spc. 362 if (label.spc == 0) { 365 p->endcyl = p->startcyl + how_many(p->nblk, label.spc); 412 label.spc = 0; 453 label.spc = label.nhead * label.nsect; 716 * If label.spc is nonzero but the partition size is not a multiple o [all...] |