Home | History | Annotate | Line # | Download | only in sys
cpuvar.h revision 1.1.1.3
      1 /*
      2  * CDDL HEADER START
      3  *
      4  * The contents of this file are subject to the terms of the
      5  * Common Development and Distribution License (the "License").
      6  * You may not use this file except in compliance with the License.
      7  *
      8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
      9  * or http://www.opensolaris.org/os/licensing.
     10  * See the License for the specific language governing permissions
     11  * and limitations under the License.
     12  *
     13  * When distributing Covered Code, include this CDDL HEADER in each
     14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
     15  * If applicable, add the following below this CDDL HEADER, with the
     16  * fields enclosed by brackets "[]" replaced with your own identifying
     17  * information: Portions Copyright [yyyy] [name of copyright owner]
     18  *
     19  * CDDL HEADER END
     20  */
     21 
     22 /*
     23  * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
     24  * Copyright 2014 Igor Kozhukhov <ikozhukhov (at) gmail.com>.
     25  */
     26 
     27 #ifndef _SYS_CPUVAR_H
     28 #define	_SYS_CPUVAR_H
     29 
     30 #include <sys/thread.h>
     31 #include <sys/sysinfo.h>	/* has cpu_stat_t definition */
     32 #include <sys/disp.h>
     33 #include <sys/processor.h>
     34 
     35 #include <sys/loadavg.h>
     36 #if (defined(_KERNEL) || defined(_KMEMUSER)) && defined(_MACHDEP)
     37 #include <sys/machcpuvar.h>
     38 #endif
     39 
     40 #include <sys/types.h>
     41 #include <sys/file.h>
     42 #include <sys/bitmap.h>
     43 #include <sys/rwlock.h>
     44 #include <sys/msacct.h>
     45 #if defined(__GNUC__) && defined(_ASM_INLINES) && defined(_KERNEL) && \
     46 	(defined(__i386) || defined(__amd64))
     47 #include <asm/cpuvar.h>
     48 #endif
     49 
     50 #ifdef	__cplusplus
     51 extern "C" {
     52 #endif
     53 
     54 struct squeue_set_s;
     55 
     56 #define	CPU_CACHE_COHERENCE_SIZE	64
     57 
     58 /*
     59  * For fast event tracing.
     60  */
     61 struct ftrace_record;
     62 typedef struct ftrace_data {
     63 	int			ftd_state;	/* ftrace flags */
     64 	kmutex_t		ftd_unused;	/* ftrace buffer lock, unused */
     65 	struct ftrace_record	*ftd_cur;	/* current record */
     66 	struct ftrace_record	*ftd_first;	/* first record */
     67 	struct ftrace_record	*ftd_last;	/* last record */
     68 } ftrace_data_t;
     69 
     70 struct cyc_cpu;
     71 struct nvlist;
     72 
     73 /*
     74  * Per-CPU data.
     75  *
     76  * Be careful adding new members: if they are not the same in all modules (e.g.
     77  * change size depending on a #define), CTF uniquification can fail to work
     78  * properly.  Furthermore, this is transitive in that it applies recursively to
     79  * all types pointed to by cpu_t.
     80  */
     81 typedef struct cpu {
     82 	processorid_t	cpu_id;			/* CPU number */
     83 	processorid_t	cpu_seqid;	/* sequential CPU id (0..ncpus-1) */
     84 	volatile cpu_flag_t cpu_flags;		/* flags indicating CPU state */
     85 	struct cpu	*cpu_self;		/* pointer to itself */
     86 	kthread_t	*cpu_thread;		/* current thread */
     87 	kthread_t	*cpu_idle_thread;	/* idle thread for this CPU */
     88 	kthread_t	*cpu_pause_thread;	/* pause thread for this CPU */
     89 	klwp_id_t	cpu_lwp;		/* current lwp (if any) */
     90 	klwp_id_t	cpu_fpowner;		/* currently loaded fpu owner */
     91 	struct cpupart	*cpu_part;		/* partition with this CPU */
     92 	struct lgrp_ld	*cpu_lpl;		/* pointer to this cpu's load */
     93 	int		cpu_cache_offset;	/* see kmem.c for details */
     94 
     95 	/*
     96 	 * Links to other CPUs.  It is safe to walk these lists if
     97 	 * one of the following is true:
     98 	 * 	- cpu_lock held
     99 	 * 	- preemption disabled via kpreempt_disable
    100 	 * 	- PIL >= DISP_LEVEL
    101 	 * 	- acting thread is an interrupt thread
    102 	 * 	- all other CPUs are paused
    103 	 */
    104 	struct cpu	*cpu_next;		/* next existing CPU */
    105 	struct cpu	*cpu_prev;		/* prev existing CPU */
    106 	struct cpu	*cpu_next_onln;		/* next online (enabled) CPU */
    107 	struct cpu	*cpu_prev_onln;		/* prev online (enabled) CPU */
    108 	struct cpu	*cpu_next_part;		/* next CPU in partition */
    109 	struct cpu	*cpu_prev_part;		/* prev CPU in partition */
    110 	struct cpu	*cpu_next_lgrp;		/* next CPU in latency group */
    111 	struct cpu	*cpu_prev_lgrp;		/* prev CPU in latency group */
    112 	struct cpu	*cpu_next_lpl;		/* next CPU in lgrp partition */
    113 	struct cpu	*cpu_prev_lpl;
    114 
    115 	struct cpu_pg	*cpu_pg;		/* cpu's processor groups */
    116 
    117 	void		*cpu_reserved[4];	/* reserved for future use */
    118 
    119 	/*
    120 	 * Scheduling variables.
    121 	 */
    122 	disp_t		*cpu_disp;		/* dispatch queue data */
    123 	/*
    124 	 * Note that cpu_disp is set before the CPU is added to the system
    125 	 * and is never modified.  Hence, no additional locking is needed
    126 	 * beyond what's necessary to access the cpu_t structure.
    127 	 */
    128 	char		cpu_runrun;	/* scheduling flag - set to preempt */
    129 	char		cpu_kprunrun;		/* force kernel preemption */
    130 	pri_t		cpu_chosen_level; 	/* priority at which cpu */
    131 						/* was chosen for scheduling */
    132 	kthread_t	*cpu_dispthread; /* thread selected for dispatch */
    133 	disp_lock_t	cpu_thread_lock; /* dispatcher lock on current thread */
    134 	uint8_t		cpu_disp_flags;	/* flags used by dispatcher */
    135 	/*
    136 	 * The following field is updated when ever the cpu_dispthread
    137 	 * changes. Also in places, where the current thread(cpu_dispthread)
    138 	 * priority changes. This is used in disp_lowpri_cpu()
    139 	 */
    140 	pri_t		cpu_dispatch_pri; /* priority of cpu_dispthread */
    141 	clock_t		cpu_last_swtch;	/* last time switched to new thread */
    142 
    143 	/*
    144 	 * Interrupt data.
    145 	 */
    146 	caddr_t		cpu_intr_stack;	/* interrupt stack */
    147 	kthread_t	*cpu_intr_thread; /* interrupt thread list */
    148 	uint_t		cpu_intr_actv;	/* interrupt levels active (bitmask) */
    149 	int		cpu_base_spl;	/* priority for highest rupt active */
    150 
    151 	/*
    152 	 * Statistics.
    153 	 */
    154 	cpu_stats_t	cpu_stats;		/* per-CPU statistics */
    155 	struct kstat	*cpu_info_kstat;	/* kstat for cpu info */
    156 
    157 	uintptr_t	cpu_profile_pc;	/* kernel PC in profile interrupt */
    158 	uintptr_t	cpu_profile_upc; /* user PC in profile interrupt */
    159 	uintptr_t	cpu_profile_pil; /* PIL when profile interrupted */
    160 
    161 	ftrace_data_t	cpu_ftrace;		/* per cpu ftrace data */
    162 
    163 	clock_t		cpu_deadman_counter;	/* used by deadman() */
    164 	uint_t		cpu_deadman_countdown;	/* used by deadman() */
    165 
    166 	kmutex_t	cpu_cpc_ctxlock; /* protects context for idle thread */
    167 	kcpc_ctx_t	*cpu_cpc_ctx;	/* performance counter context */
    168 
    169 	/*
    170 	 * Configuration information for the processor_info system call.
    171 	 */
    172 	processor_info_t cpu_type_info;	/* config info */
    173 	time_t		cpu_state_begin; /* when CPU entered current state */
    174 	char		cpu_cpr_flags;	/* CPR related info */
    175 	struct cyc_cpu	*cpu_cyclic;	/* per cpu cyclic subsystem data */
    176 	struct squeue_set_s *cpu_squeue_set;	/* per cpu squeue set */
    177 	struct nvlist	*cpu_props;	/* pool-related properties */
    178 
    179 	krwlock_t	cpu_ft_lock;		/* DTrace: fasttrap lock */
    180 	uintptr_t	cpu_dtrace_caller;	/* DTrace: caller, if any */
    181 	hrtime_t	cpu_dtrace_chillmark;	/* DTrace: chill mark time */
    182 	hrtime_t	cpu_dtrace_chilled;	/* DTrace: total chill time */
    183 	volatile uint16_t cpu_mstate;		/* cpu microstate */
    184 	volatile uint16_t cpu_mstate_gen;	/* generation counter */
    185 	volatile hrtime_t cpu_mstate_start;	/* cpu microstate start time */
    186 	volatile hrtime_t cpu_acct[NCMSTATES];	/* cpu microstate data */
    187 	hrtime_t	cpu_intracct[NCMSTATES]; /* interrupt mstate data */
    188 	hrtime_t	cpu_waitrq;		/* cpu run-queue wait time */
    189 	struct loadavg_s cpu_loadavg;		/* loadavg info for this cpu */
    190 
    191 	char		*cpu_idstr;	/* for printing and debugging */
    192 	char		*cpu_brandstr;	/* for printing */
    193 
    194 	/*
    195 	 * Sum of all device interrupt weights that are currently directed at
    196 	 * this cpu. Cleared at start of interrupt redistribution.
    197 	 */
    198 	int32_t		cpu_intr_weight;
    199 	void		*cpu_vm_data;
    200 
    201 	struct cpu_physid *cpu_physid;	/* physical associations */
    202 
    203 	uint64_t	cpu_curr_clock;		/* current clock freq in Hz */
    204 	char		*cpu_supp_freqs;	/* supported freqs in Hz */
    205 
    206 	uintptr_t	cpu_cpcprofile_pc;	/* kernel PC in cpc interrupt */
    207 	uintptr_t	cpu_cpcprofile_upc;	/* user PC in cpc interrupt */
    208 
    209 	/*
    210 	 * Interrupt load factor used by dispatcher & softcall
    211 	 */
    212 	hrtime_t	cpu_intrlast;   /* total interrupt time (nsec) */
    213 	int		cpu_intrload;   /* interrupt load factor (0-99%) */
    214 
    215 	uint_t		cpu_rotor;	/* for cheap pseudo-random numbers */
    216 
    217 	struct cu_cpu_info	*cpu_cu_info;	/* capacity & util. info */
    218 
    219 	/*
    220 	 * cpu_generation is updated whenever CPU goes on-line or off-line.
    221 	 * Updates to cpu_generation are protected by cpu_lock.
    222 	 *
    223 	 * See CPU_NEW_GENERATION() macro below.
    224 	 */
    225 	volatile uint_t		cpu_generation;	/* tracking on/off-line */
    226 
    227 	/*
    228 	 * New members must be added /before/ this member, as the CTF tools
    229 	 * rely on this being the last field before cpu_m, so they can
    230 	 * correctly calculate the offset when synthetically adding the cpu_m
    231 	 * member in objects that do not have it.  This fixup is required for
    232 	 * uniquification to work correctly.
    233 	 */
    234 	uintptr_t	cpu_m_pad;
    235 
    236 #if (defined(_KERNEL) || defined(_KMEMUSER)) && defined(_MACHDEP)
    237 	struct machcpu	cpu_m;		/* per architecture info */
    238 #endif
    239 } cpu_t;
    240 
    241 /*
    242  * The cpu_core structure consists of per-CPU state available in any context.
    243  * On some architectures, this may mean that the page(s) containing the
    244  * NCPU-sized array of cpu_core structures must be locked in the TLB -- it
    245  * is up to the platform to assure that this is performed properly.  Note that
    246  * the structure is sized to avoid false sharing.
    247  */
    248 #define	CPUC_SIZE		(sizeof (uint16_t) + sizeof (uint8_t) + \
    249 				sizeof (uintptr_t) + sizeof (kmutex_t))
    250 #define	CPUC_PADSIZE		CPU_CACHE_COHERENCE_SIZE - CPUC_SIZE
    251 
    252 typedef struct cpu_core {
    253 	uint16_t	cpuc_dtrace_flags;	/* DTrace flags */
    254 	uint8_t		cpuc_dcpc_intr_state;	/* DCPC provider intr state */
    255 	uint8_t		cpuc_pad[CPUC_PADSIZE];	/* padding */
    256 	uintptr_t	cpuc_dtrace_illval;	/* DTrace illegal value */
    257 	kmutex_t	cpuc_pid_lock;		/* DTrace pid provider lock */
    258 } cpu_core_t;
    259 
    260 #ifdef _KERNEL
    261 extern cpu_core_t cpu_core[];
    262 #endif /* _KERNEL */
    263 
    264 /*
    265  * CPU_ON_INTR() macro. Returns non-zero if currently on interrupt stack.
    266  * Note that this isn't a test for a high PIL.  For example, cpu_intr_actv
    267  * does not get updated when we go through sys_trap from TL>0 at high PIL.
    268  * getpil() should be used instead to check for PIL levels.
    269  */
    270 #define	CPU_ON_INTR(cpup) ((cpup)->cpu_intr_actv >> (LOCK_LEVEL + 1))
    271 
    272 /*
    273  * Check to see if an interrupt thread might be active at a given ipl.
    274  * If so return true.
    275  * We must be conservative--it is ok to give a false yes, but a false no
    276  * will cause disaster.  (But if the situation changes after we check it is
    277  * ok--the caller is trying to ensure that an interrupt routine has been
    278  * exited).
    279  * This is used when trying to remove an interrupt handler from an autovector
    280  * list in avintr.c.
    281  */
    282 #define	INTR_ACTIVE(cpup, level)	\
    283 	((level) <= LOCK_LEVEL ? 	\
    284 	((cpup)->cpu_intr_actv & (1 << (level))) : (CPU_ON_INTR(cpup)))
    285 
    286 /*
    287  * CPU_PSEUDO_RANDOM() returns a per CPU value that changes each time one
    288  * looks at it. It's meant as a cheap mechanism to be incorporated in routines
    289  * wanting to avoid biasing, but where true randomness isn't needed (just
    290  * something that changes).
    291  */
    292 #define	CPU_PSEUDO_RANDOM() (CPU->cpu_rotor++)
    293 
    294 #if defined(_KERNEL) || defined(_KMEMUSER)
    295 
    296 #define	INTR_STACK_SIZE	MAX(DEFAULTSTKSZ, PAGESIZE)
    297 
    298 /* MEMBERS PROTECTED BY "atomicity": cpu_flags */
    299 
    300 /*
    301  * Flags in the CPU structure.
    302  *
    303  * These are protected by cpu_lock (except during creation).
    304  *
    305  * Offlined-CPUs have three stages of being offline:
    306  *
    307  * CPU_ENABLE indicates that the CPU is participating in I/O interrupts
    308  * that can be directed at a number of different CPUs.  If CPU_ENABLE
    309  * is off, the CPU will not be given interrupts that can be sent elsewhere,
    310  * but will still get interrupts from devices associated with that CPU only,
    311  * and from other CPUs.
    312  *
    313  * CPU_OFFLINE indicates that the dispatcher should not allow any threads
    314  * other than interrupt threads to run on that CPU.  A CPU will not have
    315  * CPU_OFFLINE set if there are any bound threads (besides interrupts).
    316  *
    317  * CPU_QUIESCED is set if p_offline was able to completely turn idle the
    318  * CPU and it will not have to run interrupt threads.  In this case it'll
    319  * stay in the idle loop until CPU_QUIESCED is turned off.
    320  *
    321  * CPU_FROZEN is used only by CPR to mark CPUs that have been successfully
    322  * suspended (in the suspend path), or have yet to be resumed (in the resume
    323  * case).
    324  *
    325  * On some platforms CPUs can be individually powered off.
    326  * The following flags are set for powered off CPUs: CPU_QUIESCED,
    327  * CPU_OFFLINE, and CPU_POWEROFF.  The following flags are cleared:
    328  * CPU_RUNNING, CPU_READY, CPU_EXISTS, and CPU_ENABLE.
    329  */
    330 #define	CPU_RUNNING	0x001		/* CPU running */
    331 #define	CPU_READY	0x002		/* CPU ready for cross-calls */
    332 #define	CPU_QUIESCED	0x004		/* CPU will stay in idle */
    333 #define	CPU_EXISTS	0x008		/* CPU is configured */
    334 #define	CPU_ENABLE	0x010		/* CPU enabled for interrupts */
    335 #define	CPU_OFFLINE	0x020		/* CPU offline via p_online */
    336 #define	CPU_POWEROFF	0x040		/* CPU is powered off */
    337 #define	CPU_FROZEN	0x080		/* CPU is frozen via CPR suspend */
    338 #define	CPU_SPARE	0x100		/* CPU offline available for use */
    339 #define	CPU_FAULTED	0x200		/* CPU offline diagnosed faulty */
    340 
    341 #define	FMT_CPU_FLAGS							\
    342 	"\20\12fault\11spare\10frozen"					\
    343 	"\7poweroff\6offline\5enable\4exist\3quiesced\2ready\1run"
    344 
    345 #define	CPU_ACTIVE(cpu)	(((cpu)->cpu_flags & CPU_OFFLINE) == 0)
    346 
    347 /*
    348  * Flags for cpu_offline(), cpu_faulted(), and cpu_spare().
    349  */
    350 #define	CPU_FORCED	0x0001		/* Force CPU offline */
    351 
    352 /*
    353  * DTrace flags.
    354  */
    355 #define	CPU_DTRACE_NOFAULT	0x0001	/* Don't fault */
    356 #define	CPU_DTRACE_DROP		0x0002	/* Drop this ECB */
    357 #define	CPU_DTRACE_BADADDR	0x0004	/* DTrace fault: bad address */
    358 #define	CPU_DTRACE_BADALIGN	0x0008	/* DTrace fault: bad alignment */
    359 #define	CPU_DTRACE_DIVZERO	0x0010	/* DTrace fault: divide by zero */
    360 #define	CPU_DTRACE_ILLOP	0x0020	/* DTrace fault: illegal operation */
    361 #define	CPU_DTRACE_NOSCRATCH	0x0040	/* DTrace fault: out of scratch */
    362 #define	CPU_DTRACE_KPRIV	0x0080	/* DTrace fault: bad kernel access */
    363 #define	CPU_DTRACE_UPRIV	0x0100	/* DTrace fault: bad user access */
    364 #define	CPU_DTRACE_TUPOFLOW	0x0200	/* DTrace fault: tuple stack overflow */
    365 #if defined(__sparc)
    366 #define	CPU_DTRACE_FAKERESTORE	0x0400	/* pid provider hint to getreg */
    367 #endif
    368 #define	CPU_DTRACE_ENTRY	0x0800	/* pid provider hint to ustack() */
    369 #define	CPU_DTRACE_BADSTACK	0x1000	/* DTrace fault: bad stack */
    370 
    371 #define	CPU_DTRACE_FAULT	(CPU_DTRACE_BADADDR | CPU_DTRACE_BADALIGN | \
    372 				CPU_DTRACE_DIVZERO | CPU_DTRACE_ILLOP | \
    373 				CPU_DTRACE_NOSCRATCH | CPU_DTRACE_KPRIV | \
    374 				CPU_DTRACE_UPRIV | CPU_DTRACE_TUPOFLOW | \
    375 				CPU_DTRACE_BADSTACK)
    376 #define	CPU_DTRACE_ERROR	(CPU_DTRACE_FAULT | CPU_DTRACE_DROP)
    377 
    378 /*
    379  * Dispatcher flags
    380  * These flags must be changed only by the current CPU.
    381  */
    382 #define	CPU_DISP_DONTSTEAL	0x01	/* CPU undergoing context swtch */
    383 #define	CPU_DISP_HALTED		0x02	/* CPU halted waiting for interrupt */
    384 
    385 #endif /* _KERNEL || _KMEMUSER */
    386 
    387 #if (defined(_KERNEL) || defined(_KMEMUSER)) && defined(_MACHDEP)
    388 
    389 /*
    390  * Macros for manipulating sets of CPUs as a bitmap.  Note that this
    391  * bitmap may vary in size depending on the maximum CPU id a specific
    392  * platform supports.  This may be different than the number of CPUs
    393  * the platform supports, since CPU ids can be sparse.  We define two
    394  * sets of macros; one for platforms where the maximum CPU id is less
    395  * than the number of bits in a single word (32 in a 32-bit kernel,
    396  * 64 in a 64-bit kernel), and one for platforms that require bitmaps
    397  * of more than one word.
    398  */
    399 
    400 #define	CPUSET_WORDS	BT_BITOUL(NCPU)
    401 #define	CPUSET_NOTINSET	((uint_t)-1)
    402 
    403 #if	CPUSET_WORDS > 1
    404 
    405 typedef struct cpuset {
    406 	ulong_t	cpub[CPUSET_WORDS];
    407 } cpuset_t;
    408 
    409 /*
    410  * Private functions for manipulating cpusets that do not fit in a
    411  * single word.  These should not be used directly; instead the
    412  * CPUSET_* macros should be used so the code will be portable
    413  * across different definitions of NCPU.
    414  */
    415 extern	void	cpuset_all(cpuset_t *);
    416 extern	void	cpuset_all_but(cpuset_t *, uint_t);
    417 extern	int	cpuset_isnull(cpuset_t *);
    418 extern	int	cpuset_cmp(cpuset_t *, cpuset_t *);
    419 extern	void	cpuset_only(cpuset_t *, uint_t);
    420 extern	uint_t	cpuset_find(cpuset_t *);
    421 extern	void	cpuset_bounds(cpuset_t *, uint_t *, uint_t *);
    422 
    423 #define	CPUSET_ALL(set)			cpuset_all(&(set))
    424 #define	CPUSET_ALL_BUT(set, cpu)	cpuset_all_but(&(set), cpu)
    425 #define	CPUSET_ONLY(set, cpu)		cpuset_only(&(set), cpu)
    426 #define	CPU_IN_SET(set, cpu)		BT_TEST((set).cpub, cpu)
    427 #define	CPUSET_ADD(set, cpu)		BT_SET((set).cpub, cpu)
    428 #define	CPUSET_DEL(set, cpu)		BT_CLEAR((set).cpub, cpu)
    429 #define	CPUSET_ISNULL(set)		cpuset_isnull(&(set))
    430 #define	CPUSET_ISEQUAL(set1, set2)	cpuset_cmp(&(set1), &(set2))
    431 
    432 /*
    433  * Find one CPU in the cpuset.
    434  * Sets "cpu" to the id of the found CPU, or CPUSET_NOTINSET if no cpu
    435  * could be found. (i.e. empty set)
    436  */
    437 #define	CPUSET_FIND(set, cpu)		{		\
    438 	cpu = cpuset_find(&(set));			\
    439 }
    440 
    441 /*
    442  * Determine the smallest and largest CPU id in the set. Returns
    443  * CPUSET_NOTINSET in smallest and largest when set is empty.
    444  */
    445 #define	CPUSET_BOUNDS(set, smallest, largest)	{		\
    446 	cpuset_bounds(&(set), &(smallest), &(largest));		\
    447 }
    448 
    449 /*
    450  * Atomic cpuset operations
    451  * These are safe to use for concurrent cpuset manipulations.
    452  * "xdel" and "xadd" are exclusive operations, that set "result" to "0"
    453  * if the add or del was successful, or "-1" if not successful.
    454  * (e.g. attempting to add a cpu to a cpuset that's already there, or
    455  * deleting a cpu that's not in the cpuset)
    456  */
    457 
    458 #define	CPUSET_ATOMIC_DEL(set, cpu)	BT_ATOMIC_CLEAR((set).cpub, (cpu))
    459 #define	CPUSET_ATOMIC_ADD(set, cpu)	BT_ATOMIC_SET((set).cpub, (cpu))
    460 
    461 #define	CPUSET_ATOMIC_XADD(set, cpu, result) \
    462 	BT_ATOMIC_SET_EXCL((set).cpub, cpu, result)
    463 
    464 #define	CPUSET_ATOMIC_XDEL(set, cpu, result) \
    465 	BT_ATOMIC_CLEAR_EXCL((set).cpub, cpu, result)
    466 
    467 
    468 #define	CPUSET_OR(set1, set2)		{		\
    469 	int _i;						\
    470 	for (_i = 0; _i < CPUSET_WORDS; _i++)		\
    471 		(set1).cpub[_i] |= (set2).cpub[_i];	\
    472 }
    473 
    474 #define	CPUSET_XOR(set1, set2)		{		\
    475 	int _i;						\
    476 	for (_i = 0; _i < CPUSET_WORDS; _i++)		\
    477 		(set1).cpub[_i] ^= (set2).cpub[_i];	\
    478 }
    479 
    480 #define	CPUSET_AND(set1, set2)		{		\
    481 	int _i;						\
    482 	for (_i = 0; _i < CPUSET_WORDS; _i++)		\
    483 		(set1).cpub[_i] &= (set2).cpub[_i];	\
    484 }
    485 
    486 #define	CPUSET_ZERO(set)		{		\
    487 	int _i;						\
    488 	for (_i = 0; _i < CPUSET_WORDS; _i++)		\
    489 		(set).cpub[_i] = 0;			\
    490 }
    491 
    492 #elif	CPUSET_WORDS == 1
    493 
    494 typedef	ulong_t	cpuset_t;	/* a set of CPUs */
    495 
    496 #define	CPUSET(cpu)			(1UL << (cpu))
    497 
    498 #define	CPUSET_ALL(set)			((void)((set) = ~0UL))
    499 #define	CPUSET_ALL_BUT(set, cpu)	((void)((set) = ~CPUSET(cpu)))
    500 #define	CPUSET_ONLY(set, cpu)		((void)((set) = CPUSET(cpu)))
    501 #define	CPU_IN_SET(set, cpu)		((set) & CPUSET(cpu))
    502 #define	CPUSET_ADD(set, cpu)		((void)((set) |= CPUSET(cpu)))
    503 #define	CPUSET_DEL(set, cpu)		((void)((set) &= ~CPUSET(cpu)))
    504 #define	CPUSET_ISNULL(set)		((set) == 0)
    505 #define	CPUSET_ISEQUAL(set1, set2)	((set1) == (set2))
    506 #define	CPUSET_OR(set1, set2)		((void)((set1) |= (set2)))
    507 #define	CPUSET_XOR(set1, set2)		((void)((set1) ^= (set2)))
    508 #define	CPUSET_AND(set1, set2)		((void)((set1) &= (set2)))
    509 #define	CPUSET_ZERO(set)		((void)((set) = 0))
    510 
    511 #define	CPUSET_FIND(set, cpu)		{		\
    512 	cpu = (uint_t)(lowbit(set) - 1);				\
    513 }
    514 
    515 #define	CPUSET_BOUNDS(set, smallest, largest)	{	\
    516 	smallest = (uint_t)(lowbit(set) - 1);		\
    517 	largest = (uint_t)(highbit(set) - 1);		\
    518 }
    519 
    520 #define	CPUSET_ATOMIC_DEL(set, cpu)	atomic_and_ulong(&(set), ~CPUSET(cpu))
    521 #define	CPUSET_ATOMIC_ADD(set, cpu)	atomic_or_ulong(&(set), CPUSET(cpu))
    522 
    523 #define	CPUSET_ATOMIC_XADD(set, cpu, result) \
    524 	{ result = atomic_set_long_excl(&(set), (cpu)); }
    525 
    526 #define	CPUSET_ATOMIC_XDEL(set, cpu, result) \
    527 	{ result = atomic_clear_long_excl(&(set), (cpu)); }
    528 
    529 #else	/* CPUSET_WORDS <= 0 */
    530 
    531 #error NCPU is undefined or invalid
    532 
    533 #endif	/* CPUSET_WORDS	*/
    534 
    535 extern cpuset_t cpu_seqid_inuse;
    536 
    537 #endif	/* (_KERNEL || _KMEMUSER) && _MACHDEP */
    538 
    539 #define	CPU_CPR_OFFLINE		0x0
    540 #define	CPU_CPR_ONLINE		0x1
    541 #define	CPU_CPR_IS_OFFLINE(cpu)	(((cpu)->cpu_cpr_flags & CPU_CPR_ONLINE) == 0)
    542 #define	CPU_CPR_IS_ONLINE(cpu)	((cpu)->cpu_cpr_flags & CPU_CPR_ONLINE)
    543 #define	CPU_SET_CPR_FLAGS(cpu, flag)	((cpu)->cpu_cpr_flags |= flag)
    544 
    545 #if defined(_KERNEL) || defined(_KMEMUSER)
    546 
    547 extern struct cpu	*cpu[];		/* indexed by CPU number */
    548 extern struct cpu	**cpu_seq;	/* indexed by sequential CPU id */
    549 extern cpu_t		*cpu_list;	/* list of CPUs */
    550 extern cpu_t		*cpu_active;	/* list of active CPUs */
    551 extern int		ncpus;		/* number of CPUs present */
    552 extern int		ncpus_online;	/* number of CPUs not quiesced */
    553 extern int		max_ncpus;	/* max present before ncpus is known */
    554 extern int		boot_max_ncpus;	/* like max_ncpus but for real */
    555 extern int		boot_ncpus;	/* # cpus present @ boot */
    556 extern processorid_t	max_cpuid;	/* maximum CPU number */
    557 extern struct cpu	*cpu_inmotion;	/* offline or partition move target */
    558 extern cpu_t		*clock_cpu_list;
    559 extern processorid_t	max_cpu_seqid_ever;	/* maximum seqid ever given */
    560 
    561 #if defined(__i386) || defined(__amd64)
    562 extern struct cpu *curcpup(void);
    563 #define	CPU		(curcpup())	/* Pointer to current CPU */
    564 #else
    565 #define	CPU		(curthread->t_cpu)	/* Pointer to current CPU */
    566 #endif
    567 
    568 /*
    569  * CPU_CURRENT indicates to thread_affinity_set to use CPU->cpu_id
    570  * as the target and to grab cpu_lock instead of requiring the caller
    571  * to grab it.
    572  */
    573 #define	CPU_CURRENT	-3
    574 
    575 /*
    576  * Per-CPU statistics
    577  *
    578  * cpu_stats_t contains numerous system and VM-related statistics, in the form
    579  * of gauges or monotonically-increasing event occurrence counts.
    580  */
    581 
    582 #define	CPU_STATS_ENTER_K()	kpreempt_disable()
    583 #define	CPU_STATS_EXIT_K()	kpreempt_enable()
    584 
    585 #define	CPU_STATS_ADD_K(class, stat, amount) \
    586 	{	kpreempt_disable(); /* keep from switching CPUs */\
    587 		CPU_STATS_ADDQ(CPU, class, stat, amount); \
    588 		kpreempt_enable(); \
    589 	}
    590 
    591 #define	CPU_STATS_ADDQ(cp, class, stat, amount)	{			\
    592 	extern void __dtrace_probe___cpu_##class##info_##stat(uint_t,	\
    593 	    uint64_t *, cpu_t *);					\
    594 	uint64_t *stataddr = &((cp)->cpu_stats.class.stat);		\
    595 	__dtrace_probe___cpu_##class##info_##stat((amount),		\
    596 	    stataddr, cp);						\
    597 	*(stataddr) += (amount);					\
    598 }
    599 
    600 #define	CPU_STATS(cp, stat)                                       \
    601 	((cp)->cpu_stats.stat)
    602 
    603 /*
    604  * Increment CPU generation value.
    605  * This macro should be called whenever CPU goes on-line or off-line.
    606  * Updates to cpu_generation should be protected by cpu_lock.
    607  */
    608 #define	CPU_NEW_GENERATION(cp)	((cp)->cpu_generation++)
    609 
    610 #endif /* _KERNEL || _KMEMUSER */
    611 
    612 /*
    613  * CPU support routines.
    614  */
    615 #if	defined(_KERNEL) && defined(__STDC__)	/* not for genassym.c */
    616 
    617 struct zone;
    618 
    619 void	cpu_list_init(cpu_t *);
    620 void	cpu_add_unit(cpu_t *);
    621 void	cpu_del_unit(int cpuid);
    622 void	cpu_add_active(cpu_t *);
    623 void	cpu_kstat_init(cpu_t *);
    624 void	cpu_visibility_add(cpu_t *, struct zone *);
    625 void	cpu_visibility_remove(cpu_t *, struct zone *);
    626 void	cpu_visibility_configure(cpu_t *, struct zone *);
    627 void	cpu_visibility_unconfigure(cpu_t *, struct zone *);
    628 void	cpu_visibility_online(cpu_t *, struct zone *);
    629 void	cpu_visibility_offline(cpu_t *, struct zone *);
    630 void	cpu_create_intrstat(cpu_t *);
    631 void	cpu_delete_intrstat(cpu_t *);
    632 int	cpu_kstat_intrstat_update(kstat_t *, int);
    633 void	cpu_intr_swtch_enter(kthread_t *);
    634 void	cpu_intr_swtch_exit(kthread_t *);
    635 
    636 void	mbox_lock_init(void);	 /* initialize cross-call locks */
    637 void	mbox_init(int cpun);	 /* initialize cross-calls */
    638 void	poke_cpu(int cpun);	 /* interrupt another CPU (to preempt) */
    639 
    640 /*
    641  * values for safe_list.  Pause state that CPUs are in.
    642  */
    643 #define	PAUSE_IDLE	0		/* normal state */
    644 #define	PAUSE_READY	1		/* paused thread ready to spl */
    645 #define	PAUSE_WAIT	2		/* paused thread is spl-ed high */
    646 #define	PAUSE_DIE	3		/* tell pause thread to leave */
    647 #define	PAUSE_DEAD	4		/* pause thread has left */
    648 
    649 void	mach_cpu_pause(volatile char *);
    650 
    651 void	pause_cpus(cpu_t *off_cp, void *(*func)(void *));
    652 void	start_cpus(void);
    653 int	cpus_paused(void);
    654 
    655 void	cpu_pause_init(void);
    656 cpu_t	*cpu_get(processorid_t cpun);	/* get the CPU struct associated */
    657 
    658 int	cpu_online(cpu_t *cp);			/* take cpu online */
    659 int	cpu_offline(cpu_t *cp, int flags);	/* take cpu offline */
    660 int	cpu_spare(cpu_t *cp, int flags);	/* take cpu to spare */
    661 int	cpu_faulted(cpu_t *cp, int flags);	/* take cpu to faulted */
    662 int	cpu_poweron(cpu_t *cp);		/* take powered-off cpu to offline */
    663 int	cpu_poweroff(cpu_t *cp);	/* take offline cpu to powered-off */
    664 
    665 cpu_t	*cpu_intr_next(cpu_t *cp);	/* get next online CPU taking intrs */
    666 int	cpu_intr_count(cpu_t *cp);	/* count # of CPUs handling intrs */
    667 int	cpu_intr_on(cpu_t *cp);		/* CPU taking I/O interrupts? */
    668 void	cpu_intr_enable(cpu_t *cp);	/* enable I/O interrupts */
    669 int	cpu_intr_disable(cpu_t *cp);	/* disable I/O interrupts */
    670 void	cpu_intr_alloc(cpu_t *cp, int n); /* allocate interrupt threads */
    671 
    672 /*
    673  * Routines for checking CPU states.
    674  */
    675 int	cpu_is_online(cpu_t *);		/* check if CPU is online */
    676 int	cpu_is_nointr(cpu_t *);		/* check if CPU can service intrs */
    677 int	cpu_is_active(cpu_t *);		/* check if CPU can run threads */
    678 int	cpu_is_offline(cpu_t *);	/* check if CPU is offline */
    679 int	cpu_is_poweredoff(cpu_t *);	/* check if CPU is powered off */
    680 
    681 int	cpu_flagged_online(cpu_flag_t);	/* flags show CPU is online */
    682 int	cpu_flagged_nointr(cpu_flag_t);	/* flags show CPU not handling intrs */
    683 int	cpu_flagged_active(cpu_flag_t); /* flags show CPU scheduling threads */
    684 int	cpu_flagged_offline(cpu_flag_t); /* flags show CPU is offline */
    685 int	cpu_flagged_poweredoff(cpu_flag_t); /* flags show CPU is powered off */
    686 
    687 /*
    688  * The processor_info(2) state of a CPU is a simplified representation suitable
    689  * for use by an application program.  Kernel subsystems should utilize the
    690  * internal per-CPU state as given by the cpu_flags member of the cpu structure,
    691  * as this information may include platform- or architecture-specific state
    692  * critical to a subsystem's disposition of a particular CPU.
    693  */
    694 void	cpu_set_state(cpu_t *);		/* record/timestamp current state */
    695 int	cpu_get_state(cpu_t *);		/* get current cpu state */
    696 const char *cpu_get_state_str(cpu_t *);	/* get current cpu state as string */
    697 
    698 
    699 void	cpu_set_curr_clock(uint64_t);	/* indicate the current CPU's freq */
    700 void	cpu_set_supp_freqs(cpu_t *, const char *); /* set the CPU supported */
    701 						/* frequencies */
    702 
    703 int	cpu_configure(int);
    704 int	cpu_unconfigure(int);
    705 void	cpu_destroy_bound_threads(cpu_t *cp);
    706 
    707 extern int cpu_bind_thread(kthread_t *tp, processorid_t bind,
    708     processorid_t *obind, int *error);
    709 extern int cpu_unbind(processorid_t cpu_id, boolean_t force);
    710 extern void thread_affinity_set(kthread_t *t, int cpu_id);
    711 extern void thread_affinity_clear(kthread_t *t);
    712 extern void affinity_set(int cpu_id);
    713 extern void affinity_clear(void);
    714 extern void init_cpu_mstate(struct cpu *, int);
    715 extern void term_cpu_mstate(struct cpu *);
    716 extern void new_cpu_mstate(int, hrtime_t);
    717 extern void get_cpu_mstate(struct cpu *, hrtime_t *);
    718 extern void thread_nomigrate(void);
    719 extern void thread_allowmigrate(void);
    720 extern void weakbinding_stop(void);
    721 extern void weakbinding_start(void);
    722 
    723 /*
    724  * The following routines affect the CPUs participation in interrupt processing,
    725  * if that is applicable on the architecture.  This only affects interrupts
    726  * which aren't directed at the processor (not cross calls).
    727  *
    728  * cpu_disable_intr returns non-zero if interrupts were previously enabled.
    729  */
    730 int	cpu_disable_intr(struct cpu *cp); /* stop issuing interrupts to cpu */
    731 void	cpu_enable_intr(struct cpu *cp); /* start issuing interrupts to cpu */
    732 
    733 /*
    734  * The mutex cpu_lock protects cpu_flags for all CPUs, as well as the ncpus
    735  * and ncpus_online counts.
    736  */
    737 extern kmutex_t	cpu_lock;	/* lock protecting CPU data */
    738 
    739 /*
    740  * CPU state change events
    741  *
    742  * Various subsystems need to know when CPUs change their state. They get this
    743  * information by registering  CPU state change callbacks using
    744  * register_cpu_setup_func(). Whenever any CPU changes its state, the callback
    745  * function is called. The callback function is passed three arguments:
    746  *
    747  *   Event, described by cpu_setup_t
    748  *   CPU ID
    749  *   Transparent pointer passed when registering the callback
    750  *
    751  * The callback function is called with cpu_lock held. The return value from the
    752  * callback function is usually ignored, except for CPU_CONFIG and CPU_UNCONFIG
    753  * events. For these two events, non-zero return value indicates a failure and
    754  * prevents successful completion of the operation.
    755  *
    756  * New events may be added in the future. Callback functions should ignore any
    757  * events that they do not understand.
    758  *
    759  * The following events provide notification callbacks:
    760  *
    761  *  CPU_INIT	A new CPU is started and added to the list of active CPUs
    762  *		  This event is only used during boot
    763  *
    764  *  CPU_CONFIG	A newly inserted CPU is prepared for starting running code
    765  *		  This event is called by DR code
    766  *
    767  *  CPU_UNCONFIG CPU has been powered off and needs cleanup
    768  *		  This event is called by DR code
    769  *
    770  *  CPU_ON	CPU is enabled but does not run anything yet
    771  *
    772  *  CPU_INTR_ON	CPU is enabled and has interrupts enabled
    773  *
    774  *  CPU_OFF	CPU is going offline but can still run threads
    775  *
    776  *  CPU_CPUPART_OUT	CPU is going to move out of its partition
    777  *
    778  *  CPU_CPUPART_IN	CPU is going to move to a new partition
    779  *
    780  *  CPU_SETUP	CPU is set up during boot and can run threads
    781  */
    782 typedef enum {
    783 	CPU_INIT,
    784 	CPU_CONFIG,
    785 	CPU_UNCONFIG,
    786 	CPU_ON,
    787 	CPU_OFF,
    788 	CPU_CPUPART_IN,
    789 	CPU_CPUPART_OUT,
    790 	CPU_SETUP,
    791 	CPU_INTR_ON
    792 } cpu_setup_t;
    793 
    794 typedef int cpu_setup_func_t(cpu_setup_t, int, void *);
    795 
    796 /*
    797  * Routines used to register interest in cpu's being added to or removed
    798  * from the system.
    799  */
    800 extern void register_cpu_setup_func(cpu_setup_func_t *, void *);
    801 extern void unregister_cpu_setup_func(cpu_setup_func_t *, void *);
    802 extern void cpu_state_change_notify(int, cpu_setup_t);
    803 
    804 /*
    805  * Call specified function on the given CPU
    806  */
    807 typedef void (*cpu_call_func_t)(uintptr_t, uintptr_t);
    808 extern void cpu_call(cpu_t *, cpu_call_func_t, uintptr_t, uintptr_t);
    809 
    810 
    811 /*
    812  * Create various strings that describe the given CPU for the
    813  * processor_info system call and configuration-related kstats.
    814  */
    815 #define	CPU_IDSTRLEN	100
    816 
    817 extern void init_cpu_info(struct cpu *);
    818 extern void populate_idstr(struct cpu *);
    819 extern void cpu_vm_data_init(struct cpu *);
    820 extern void cpu_vm_data_destroy(struct cpu *);
    821 
    822 #endif	/* _KERNEL */
    823 
    824 #ifdef	__cplusplus
    825 }
    826 #endif
    827 
    828 #endif /* _SYS_CPUVAR_H */
    829