Home | History | Annotate | Line # | Download | only in sys
cpuvar.h revision 1.1.1.1
      1 /*
      2  * CDDL HEADER START
      3  *
      4  * The contents of this file are subject to the terms of the
      5  * Common Development and Distribution License (the "License").
      6  * You may not use this file except in compliance with the License.
      7  *
      8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
      9  * or http://www.opensolaris.org/os/licensing.
     10  * See the License for the specific language governing permissions
     11  * and limitations under the License.
     12  *
     13  * When distributing Covered Code, include this CDDL HEADER in each
     14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
     15  * If applicable, add the following below this CDDL HEADER, with the
     16  * fields enclosed by brackets "[]" replaced with your own identifying
     17  * information: Portions Copyright [yyyy] [name of copyright owner]
     18  *
     19  * CDDL HEADER END
     20  */
     21 
     22 /*
     23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
     24  * Use is subject to license terms.
     25  */
     26 
     27 #ifndef _SYS_CPUVAR_H
     28 #define	_SYS_CPUVAR_H
     29 
     30 #include <sys/thread.h>
     31 #include <sys/sysinfo.h>	/* has cpu_stat_t definition */
     32 #include <sys/disp.h>
     33 #include <sys/processor.h>
     34 
     35 #if (defined(_KERNEL) || defined(_KMEMUSER)) && defined(_MACHDEP)
     36 #include <sys/machcpuvar.h>
     37 #endif
     38 
     39 #include <sys/types.h>
     40 #include <sys/file.h>
     41 #include <sys/bitmap.h>
     42 #include <sys/rwlock.h>
     43 #include <sys/msacct.h>
     44 #if defined(__GNUC__) && defined(_ASM_INLINES) && defined(_KERNEL) && \
     45 	(defined(__i386) || defined(__amd64))
     46 #include <asm/cpuvar.h>
     47 #endif
     48 
     49 #ifdef	__cplusplus
     50 extern "C" {
     51 #endif
     52 
     53 struct squeue_set_s;
     54 
     55 #define	CPU_CACHE_COHERENCE_SIZE	64
     56 #define	S_LOADAVG_SZ	11
     57 #define	S_MOVAVG_SZ	10
     58 
     59 struct loadavg_s {
     60 	int lg_cur;		/* current loadavg entry */
     61 	unsigned int lg_len;	/* number entries recorded */
     62 	hrtime_t lg_total;	/* used to temporarily hold load totals */
     63 	hrtime_t lg_loads[S_LOADAVG_SZ];	/* table of recorded entries */
     64 };
     65 
     66 /*
     67  * For fast event tracing.
     68  */
     69 struct ftrace_record;
     70 typedef struct ftrace_data {
     71 	int			ftd_state;	/* ftrace flags */
     72 	kmutex_t		ftd_unused;	/* ftrace buffer lock, unused */
     73 	struct ftrace_record	*ftd_cur;	/* current record */
     74 	struct ftrace_record	*ftd_first;	/* first record */
     75 	struct ftrace_record	*ftd_last;	/* last record */
     76 } ftrace_data_t;
     77 
     78 struct cyc_cpu;
     79 struct nvlist;
     80 
     81 /*
     82  * Per-CPU data.
     83  *
     84  * Be careful adding new members: if they are not the same in all modules (e.g.
     85  * change size depending on a #define), CTF uniquification can fail to work
     86  * properly.  Furthermore, this is transitive in that it applies recursively to
     87  * all types pointed to by cpu_t.
     88  */
     89 typedef struct cpu {
     90 	processorid_t	cpu_id;			/* CPU number */
     91 	processorid_t	cpu_seqid;	/* sequential CPU id (0..ncpus-1) */
     92 	volatile cpu_flag_t cpu_flags;		/* flags indicating CPU state */
     93 	struct cpu	*cpu_self;		/* pointer to itself */
     94 	kthread_t	*cpu_thread;		/* current thread */
     95 	kthread_t	*cpu_idle_thread;	/* idle thread for this CPU */
     96 	kthread_t	*cpu_pause_thread;	/* pause thread for this CPU */
     97 	klwp_id_t	cpu_lwp;		/* current lwp (if any) */
     98 	klwp_id_t	cpu_fpowner;		/* currently loaded fpu owner */
     99 	struct cpupart	*cpu_part;		/* partition with this CPU */
    100 	struct lgrp_ld	*cpu_lpl;		/* pointer to this cpu's load */
    101 	int		cpu_cache_offset;	/* see kmem.c for details */
    102 
    103 	/*
    104 	 * Links to other CPUs.  It is safe to walk these lists if
    105 	 * one of the following is true:
    106 	 * 	- cpu_lock held
    107 	 * 	- preemption disabled via kpreempt_disable
    108 	 * 	- PIL >= DISP_LEVEL
    109 	 * 	- acting thread is an interrupt thread
    110 	 * 	- all other CPUs are paused
    111 	 */
    112 	struct cpu	*cpu_next;		/* next existing CPU */
    113 	struct cpu	*cpu_prev;		/* prev existing CPU */
    114 	struct cpu	*cpu_next_onln;		/* next online (enabled) CPU */
    115 	struct cpu	*cpu_prev_onln;		/* prev online (enabled) CPU */
    116 	struct cpu	*cpu_next_part;		/* next CPU in partition */
    117 	struct cpu	*cpu_prev_part;		/* prev CPU in partition */
    118 	struct cpu	*cpu_next_lgrp;		/* next CPU in latency group */
    119 	struct cpu	*cpu_prev_lgrp;		/* prev CPU in latency group */
    120 	struct cpu	*cpu_next_lpl;		/* next CPU in lgrp partition */
    121 	struct cpu	*cpu_prev_lpl;
    122 
    123 	struct cpu_pg	*cpu_pg;		/* cpu's processor groups */
    124 
    125 	void		*cpu_reserved[4];	/* reserved for future use */
    126 
    127 	/*
    128 	 * Scheduling variables.
    129 	 */
    130 	disp_t		*cpu_disp;		/* dispatch queue data */
    131 	/*
    132 	 * Note that cpu_disp is set before the CPU is added to the system
    133 	 * and is never modified.  Hence, no additional locking is needed
    134 	 * beyond what's necessary to access the cpu_t structure.
    135 	 */
    136 	char		cpu_runrun;	/* scheduling flag - set to preempt */
    137 	char		cpu_kprunrun;		/* force kernel preemption */
    138 	pri_t		cpu_chosen_level; 	/* priority at which cpu */
    139 						/* was chosen for scheduling */
    140 	kthread_t	*cpu_dispthread; /* thread selected for dispatch */
    141 	disp_lock_t	cpu_thread_lock; /* dispatcher lock on current thread */
    142 	uint8_t		cpu_disp_flags;	/* flags used by dispatcher */
    143 	/*
    144 	 * The following field is updated when ever the cpu_dispthread
    145 	 * changes. Also in places, where the current thread(cpu_dispthread)
    146 	 * priority changes. This is used in disp_lowpri_cpu()
    147 	 */
    148 	pri_t		cpu_dispatch_pri; /* priority of cpu_dispthread */
    149 	clock_t		cpu_last_swtch;	/* last time switched to new thread */
    150 
    151 	/*
    152 	 * Interrupt data.
    153 	 */
    154 	caddr_t		cpu_intr_stack;	/* interrupt stack */
    155 	kthread_t	*cpu_intr_thread; /* interrupt thread list */
    156 	uint_t		cpu_intr_actv;	/* interrupt levels active (bitmask) */
    157 	int		cpu_base_spl;	/* priority for highest rupt active */
    158 
    159 	/*
    160 	 * Statistics.
    161 	 */
    162 	cpu_stats_t	cpu_stats;		/* per-CPU statistics */
    163 	struct kstat	*cpu_info_kstat;	/* kstat for cpu info */
    164 
    165 	uintptr_t	cpu_profile_pc;	/* kernel PC in profile interrupt */
    166 	uintptr_t	cpu_profile_upc; /* user PC in profile interrupt */
    167 	uintptr_t	cpu_profile_pil; /* PIL when profile interrupted */
    168 
    169 	ftrace_data_t	cpu_ftrace;		/* per cpu ftrace data */
    170 
    171 	clock_t		cpu_deadman_lbolt;	/* used by deadman() */
    172 	uint_t		cpu_deadman_countdown;	/* used by deadman() */
    173 
    174 	kmutex_t	cpu_cpc_ctxlock; /* protects context for idle thread */
    175 	kcpc_ctx_t	*cpu_cpc_ctx;	/* performance counter context */
    176 
    177 	/*
    178 	 * Configuration information for the processor_info system call.
    179 	 */
    180 	processor_info_t cpu_type_info;	/* config info */
    181 	time_t		cpu_state_begin; /* when CPU entered current state */
    182 	char		cpu_cpr_flags;	/* CPR related info */
    183 	struct cyc_cpu	*cpu_cyclic;	/* per cpu cyclic subsystem data */
    184 	struct squeue_set_s *cpu_squeue_set;	/* per cpu squeue set */
    185 	struct nvlist	*cpu_props;	/* pool-related properties */
    186 
    187 	krwlock_t	cpu_ft_lock;		/* DTrace: fasttrap lock */
    188 	uintptr_t	cpu_dtrace_caller;	/* DTrace: caller, if any */
    189 	hrtime_t	cpu_dtrace_chillmark;	/* DTrace: chill mark time */
    190 	hrtime_t	cpu_dtrace_chilled;	/* DTrace: total chill time */
    191 	volatile uint16_t cpu_mstate;		/* cpu microstate */
    192 	volatile uint16_t cpu_mstate_gen;	/* generation counter */
    193 	volatile hrtime_t cpu_mstate_start;	/* cpu microstate start time */
    194 	volatile hrtime_t cpu_acct[NCMSTATES];	/* cpu microstate data */
    195 	hrtime_t	cpu_intracct[NCMSTATES]; /* interrupt mstate data */
    196 	hrtime_t	cpu_waitrq;		/* cpu run-queue wait time */
    197 	struct loadavg_s cpu_loadavg;		/* loadavg info for this cpu */
    198 
    199 	char		*cpu_idstr;	/* for printing and debugging */
    200 	char		*cpu_brandstr;	/* for printing */
    201 
    202 	/*
    203 	 * Sum of all device interrupt weights that are currently directed at
    204 	 * this cpu. Cleared at start of interrupt redistribution.
    205 	 */
    206 	int32_t		cpu_intr_weight;
    207 	void		*cpu_vm_data;
    208 
    209 	struct cpu_physid *cpu_physid;	/* physical associations */
    210 
    211 	uint64_t	cpu_curr_clock;		/* current clock freq in Hz */
    212 	char		*cpu_supp_freqs;	/* supported freqs in Hz */
    213 
    214 	/*
    215 	 * Interrupt load factor used by dispatcher & softcall
    216 	 */
    217 	hrtime_t	cpu_intrlast;   /* total interrupt time (nsec) */
    218 	int		cpu_intrload;   /* interrupt load factor (0-99%) */
    219 
    220 	/*
    221 	 * New members must be added /before/ this member, as the CTF tools
    222 	 * rely on this being the last field before cpu_m, so they can
    223 	 * correctly calculate the offset when synthetically adding the cpu_m
    224 	 * member in objects that do not have it.  This fixup is required for
    225 	 * uniquification to work correctly.
    226 	 */
    227 	uintptr_t	cpu_m_pad;
    228 
    229 #if (defined(_KERNEL) || defined(_KMEMUSER)) && defined(_MACHDEP)
    230 	struct machcpu	cpu_m;		/* per architecture info */
    231 #endif
    232 } cpu_t;
    233 
    234 /*
    235  * The cpu_core structure consists of per-CPU state available in any context.
    236  * On some architectures, this may mean that the page(s) containing the
    237  * NCPU-sized array of cpu_core structures must be locked in the TLB -- it
    238  * is up to the platform to assure that this is performed properly.  Note that
    239  * the structure is sized to avoid false sharing.
    240  */
    241 #define	CPUC_SIZE		(sizeof (uint16_t) + sizeof (uintptr_t) + \
    242 				sizeof (kmutex_t))
    243 #define	CPUC_PADSIZE		CPU_CACHE_COHERENCE_SIZE - CPUC_SIZE
    244 
    245 typedef struct cpu_core {
    246 	uint16_t	cpuc_dtrace_flags;	/* DTrace flags */
    247 	uint8_t		cpuc_pad[CPUC_PADSIZE];	/* padding */
    248 	uintptr_t	cpuc_dtrace_illval;	/* DTrace illegal value */
    249 	kmutex_t	cpuc_pid_lock;		/* DTrace pid provider lock */
    250 } cpu_core_t;
    251 
    252 #ifdef _KERNEL
    253 extern cpu_core_t cpu_core[];
    254 #endif /* _KERNEL */
    255 
    256 /*
    257  * CPU_ON_INTR() macro. Returns non-zero if currently on interrupt stack.
    258  * Note that this isn't a test for a high PIL.  For example, cpu_intr_actv
    259  * does not get updated when we go through sys_trap from TL>0 at high PIL.
    260  * getpil() should be used instead to check for PIL levels.
    261  */
    262 #define	CPU_ON_INTR(cpup) ((cpup)->cpu_intr_actv >> (LOCK_LEVEL + 1))
    263 
    264 #if defined(_KERNEL) || defined(_KMEMUSER)
    265 
    266 #define	INTR_STACK_SIZE	MAX(DEFAULTSTKSZ, PAGESIZE)
    267 
    268 /* MEMBERS PROTECTED BY "atomicity": cpu_flags */
    269 
    270 /*
    271  * Flags in the CPU structure.
    272  *
    273  * These are protected by cpu_lock (except during creation).
    274  *
    275  * Offlined-CPUs have three stages of being offline:
    276  *
    277  * CPU_ENABLE indicates that the CPU is participating in I/O interrupts
    278  * that can be directed at a number of different CPUs.  If CPU_ENABLE
    279  * is off, the CPU will not be given interrupts that can be sent elsewhere,
    280  * but will still get interrupts from devices associated with that CPU only,
    281  * and from other CPUs.
    282  *
    283  * CPU_OFFLINE indicates that the dispatcher should not allow any threads
    284  * other than interrupt threads to run on that CPU.  A CPU will not have
    285  * CPU_OFFLINE set if there are any bound threads (besides interrupts).
    286  *
    287  * CPU_QUIESCED is set if p_offline was able to completely turn idle the
    288  * CPU and it will not have to run interrupt threads.  In this case it'll
    289  * stay in the idle loop until CPU_QUIESCED is turned off.
    290  *
    291  * CPU_FROZEN is used only by CPR to mark CPUs that have been successfully
    292  * suspended (in the suspend path), or have yet to be resumed (in the resume
    293  * case).
    294  *
    295  * On some platforms CPUs can be individually powered off.
    296  * The following flags are set for powered off CPUs: CPU_QUIESCED,
    297  * CPU_OFFLINE, and CPU_POWEROFF.  The following flags are cleared:
    298  * CPU_RUNNING, CPU_READY, CPU_EXISTS, and CPU_ENABLE.
    299  */
    300 #define	CPU_RUNNING	0x001		/* CPU running */
    301 #define	CPU_READY	0x002		/* CPU ready for cross-calls */
    302 #define	CPU_QUIESCED	0x004		/* CPU will stay in idle */
    303 #define	CPU_EXISTS	0x008		/* CPU is configured */
    304 #define	CPU_ENABLE	0x010		/* CPU enabled for interrupts */
    305 #define	CPU_OFFLINE	0x020		/* CPU offline via p_online */
    306 #define	CPU_POWEROFF	0x040		/* CPU is powered off */
    307 #define	CPU_FROZEN	0x080		/* CPU is frozen via CPR suspend */
    308 #define	CPU_SPARE	0x100		/* CPU offline available for use */
    309 #define	CPU_FAULTED	0x200		/* CPU offline diagnosed faulty */
    310 
    311 #define	FMT_CPU_FLAGS							\
    312 	"\20\12fault\11spare\10frozen"					\
    313 	"\7poweroff\6offline\5enable\4exist\3quiesced\2ready\1run"
    314 
    315 #define	CPU_ACTIVE(cpu)	(((cpu)->cpu_flags & CPU_OFFLINE) == 0)
    316 
    317 /*
    318  * Flags for cpu_offline(), cpu_faulted(), and cpu_spare().
    319  */
    320 #define	CPU_FORCED	0x0001		/* Force CPU offline */
    321 
    322 /*
    323  * DTrace flags.
    324  */
    325 #define	CPU_DTRACE_NOFAULT	0x0001	/* Don't fault */
    326 #define	CPU_DTRACE_DROP		0x0002	/* Drop this ECB */
    327 #define	CPU_DTRACE_BADADDR	0x0004	/* DTrace fault: bad address */
    328 #define	CPU_DTRACE_BADALIGN	0x0008	/* DTrace fault: bad alignment */
    329 #define	CPU_DTRACE_DIVZERO	0x0010	/* DTrace fault: divide by zero */
    330 #define	CPU_DTRACE_ILLOP	0x0020	/* DTrace fault: illegal operation */
    331 #define	CPU_DTRACE_NOSCRATCH	0x0040	/* DTrace fault: out of scratch */
    332 #define	CPU_DTRACE_KPRIV	0x0080	/* DTrace fault: bad kernel access */
    333 #define	CPU_DTRACE_UPRIV	0x0100	/* DTrace fault: bad user access */
    334 #define	CPU_DTRACE_TUPOFLOW	0x0200	/* DTrace fault: tuple stack overflow */
    335 #if defined(__sparc)
    336 #define	CPU_DTRACE_FAKERESTORE	0x0400	/* pid provider hint to getreg */
    337 #endif
    338 #define	CPU_DTRACE_ENTRY	0x0800	/* pid provider hint to ustack() */
    339 #define	CPU_DTRACE_BADSTACK	0x1000	/* DTrace fault: bad stack */
    340 
    341 #define	CPU_DTRACE_FAULT	(CPU_DTRACE_BADADDR | CPU_DTRACE_BADALIGN | \
    342 				CPU_DTRACE_DIVZERO | CPU_DTRACE_ILLOP | \
    343 				CPU_DTRACE_NOSCRATCH | CPU_DTRACE_KPRIV | \
    344 				CPU_DTRACE_UPRIV | CPU_DTRACE_TUPOFLOW | \
    345 				CPU_DTRACE_BADSTACK)
    346 #define	CPU_DTRACE_ERROR	(CPU_DTRACE_FAULT | CPU_DTRACE_DROP)
    347 
    348 /*
    349  * Dispatcher flags
    350  * These flags must be changed only by the current CPU.
    351  */
    352 #define	CPU_DISP_DONTSTEAL	0x01	/* CPU undergoing context swtch */
    353 #define	CPU_DISP_HALTED		0x02	/* CPU halted waiting for interrupt */
    354 
    355 
    356 #endif /* _KERNEL || _KMEMUSER */
    357 
    358 #if (defined(_KERNEL) || defined(_KMEMUSER)) && defined(_MACHDEP)
    359 
    360 /*
    361  * Macros for manipulating sets of CPUs as a bitmap.  Note that this
    362  * bitmap may vary in size depending on the maximum CPU id a specific
    363  * platform supports.  This may be different than the number of CPUs
    364  * the platform supports, since CPU ids can be sparse.  We define two
    365  * sets of macros; one for platforms where the maximum CPU id is less
    366  * than the number of bits in a single word (32 in a 32-bit kernel,
    367  * 64 in a 64-bit kernel), and one for platforms that require bitmaps
    368  * of more than one word.
    369  */
    370 
    371 #define	CPUSET_WORDS	BT_BITOUL(NCPU)
    372 #define	CPUSET_NOTINSET	((uint_t)-1)
    373 
    374 #if	CPUSET_WORDS > 1
    375 
    376 typedef struct cpuset {
    377 	ulong_t	cpub[CPUSET_WORDS];
    378 } cpuset_t;
    379 
    380 /*
    381  * Private functions for manipulating cpusets that do not fit in a
    382  * single word.  These should not be used directly; instead the
    383  * CPUSET_* macros should be used so the code will be portable
    384  * across different definitions of NCPU.
    385  */
    386 extern	void	cpuset_all(cpuset_t *);
    387 extern	void	cpuset_all_but(cpuset_t *, uint_t);
    388 extern	int	cpuset_isnull(cpuset_t *);
    389 extern	int	cpuset_cmp(cpuset_t *, cpuset_t *);
    390 extern	void	cpuset_only(cpuset_t *, uint_t);
    391 extern	uint_t	cpuset_find(cpuset_t *);
    392 extern	void	cpuset_bounds(cpuset_t *, uint_t *, uint_t *);
    393 
    394 #define	CPUSET_ALL(set)			cpuset_all(&(set))
    395 #define	CPUSET_ALL_BUT(set, cpu)	cpuset_all_but(&(set), cpu)
    396 #define	CPUSET_ONLY(set, cpu)		cpuset_only(&(set), cpu)
    397 #define	CPU_IN_SET(set, cpu)		BT_TEST((set).cpub, cpu)
    398 #define	CPUSET_ADD(set, cpu)		BT_SET((set).cpub, cpu)
    399 #define	CPUSET_DEL(set, cpu)		BT_CLEAR((set).cpub, cpu)
    400 #define	CPUSET_ISNULL(set)		cpuset_isnull(&(set))
    401 #define	CPUSET_ISEQUAL(set1, set2)	cpuset_cmp(&(set1), &(set2))
    402 
    403 /*
    404  * Find one CPU in the cpuset.
    405  * Sets "cpu" to the id of the found CPU, or CPUSET_NOTINSET if no cpu
    406  * could be found. (i.e. empty set)
    407  */
    408 #define	CPUSET_FIND(set, cpu)		{		\
    409 	cpu = cpuset_find(&(set));			\
    410 }
    411 
    412 /*
    413  * Determine the smallest and largest CPU id in the set. Returns
    414  * CPUSET_NOTINSET in smallest and largest when set is empty.
    415  */
    416 #define	CPUSET_BOUNDS(set, smallest, largest)	{		\
    417 	cpuset_bounds(&(set), &(smallest), &(largest));		\
    418 }
    419 
    420 /*
    421  * Atomic cpuset operations
    422  * These are safe to use for concurrent cpuset manipulations.
    423  * "xdel" and "xadd" are exclusive operations, that set "result" to "0"
    424  * if the add or del was successful, or "-1" if not successful.
    425  * (e.g. attempting to add a cpu to a cpuset that's already there, or
    426  * deleting a cpu that's not in the cpuset)
    427  */
    428 
    429 #define	CPUSET_ATOMIC_DEL(set, cpu)	BT_ATOMIC_CLEAR((set).cpub, (cpu))
    430 #define	CPUSET_ATOMIC_ADD(set, cpu)	BT_ATOMIC_SET((set).cpub, (cpu))
    431 
    432 #define	CPUSET_ATOMIC_XADD(set, cpu, result) \
    433 	BT_ATOMIC_SET_EXCL((set).cpub, cpu, result)
    434 
    435 #define	CPUSET_ATOMIC_XDEL(set, cpu, result) \
    436 	BT_ATOMIC_CLEAR_EXCL((set).cpub, cpu, result)
    437 
    438 
    439 #define	CPUSET_OR(set1, set2)		{		\
    440 	int _i;						\
    441 	for (_i = 0; _i < CPUSET_WORDS; _i++)		\
    442 		(set1).cpub[_i] |= (set2).cpub[_i];	\
    443 }
    444 
    445 #define	CPUSET_XOR(set1, set2)		{		\
    446 	int _i;						\
    447 	for (_i = 0; _i < CPUSET_WORDS; _i++)		\
    448 		(set1).cpub[_i] ^= (set2).cpub[_i];	\
    449 }
    450 
    451 #define	CPUSET_AND(set1, set2)		{		\
    452 	int _i;						\
    453 	for (_i = 0; _i < CPUSET_WORDS; _i++)		\
    454 		(set1).cpub[_i] &= (set2).cpub[_i];	\
    455 }
    456 
    457 #define	CPUSET_ZERO(set)		{		\
    458 	int _i;						\
    459 	for (_i = 0; _i < CPUSET_WORDS; _i++)		\
    460 		(set).cpub[_i] = 0;			\
    461 }
    462 
    463 #elif	CPUSET_WORDS == 1
    464 
    465 typedef	ulong_t	cpuset_t;	/* a set of CPUs */
    466 
    467 #define	CPUSET(cpu)			(1UL << (cpu))
    468 
    469 #define	CPUSET_ALL(set)			((void)((set) = ~0UL))
    470 #define	CPUSET_ALL_BUT(set, cpu)	((void)((set) = ~CPUSET(cpu)))
    471 #define	CPUSET_ONLY(set, cpu)		((void)((set) = CPUSET(cpu)))
    472 #define	CPU_IN_SET(set, cpu)		((set) & CPUSET(cpu))
    473 #define	CPUSET_ADD(set, cpu)		((void)((set) |= CPUSET(cpu)))
    474 #define	CPUSET_DEL(set, cpu)		((void)((set) &= ~CPUSET(cpu)))
    475 #define	CPUSET_ISNULL(set)		((set) == 0)
    476 #define	CPUSET_ISEQUAL(set1, set2)	((set1) == (set2))
    477 #define	CPUSET_OR(set1, set2)		((void)((set1) |= (set2)))
    478 #define	CPUSET_XOR(set1, set2)		((void)((set1) ^= (set2)))
    479 #define	CPUSET_AND(set1, set2)		((void)((set1) &= (set2)))
    480 #define	CPUSET_ZERO(set)		((void)((set) = 0))
    481 
    482 #define	CPUSET_FIND(set, cpu)		{		\
    483 	cpu = (uint_t)(lowbit(set) - 1);				\
    484 }
    485 
    486 #define	CPUSET_BOUNDS(set, smallest, largest)	{	\
    487 	smallest = (uint_t)(lowbit(set) - 1);		\
    488 	largest = (uint_t)(highbit(set) - 1);		\
    489 }
    490 
    491 #define	CPUSET_ATOMIC_DEL(set, cpu)	atomic_and_long(&(set), ~CPUSET(cpu))
    492 #define	CPUSET_ATOMIC_ADD(set, cpu)	atomic_or_long(&(set), CPUSET(cpu))
    493 
    494 #define	CPUSET_ATOMIC_XADD(set, cpu, result) \
    495 	{ result = atomic_set_long_excl(&(set), (cpu)); }
    496 
    497 #define	CPUSET_ATOMIC_XDEL(set, cpu, result) \
    498 	{ result = atomic_clear_long_excl(&(set), (cpu)); }
    499 
    500 #else	/* CPUSET_WORDS <= 0 */
    501 
    502 #error NCPU is undefined or invalid
    503 
    504 #endif	/* CPUSET_WORDS	*/
    505 
    506 extern cpuset_t cpu_seqid_inuse;
    507 
    508 #endif	/* (_KERNEL || _KMEMUSER) && _MACHDEP */
    509 
    510 #define	CPU_CPR_OFFLINE		0x0
    511 #define	CPU_CPR_ONLINE		0x1
    512 #define	CPU_CPR_IS_OFFLINE(cpu)	(((cpu)->cpu_cpr_flags & CPU_CPR_ONLINE) == 0)
    513 #define	CPU_CPR_IS_ONLINE(cpu)	((cpu)->cpu_cpr_flags & CPU_CPR_ONLINE)
    514 #define	CPU_SET_CPR_FLAGS(cpu, flag)	((cpu)->cpu_cpr_flags |= flag)
    515 
    516 #if defined(_KERNEL) || defined(_KMEMUSER)
    517 
    518 extern struct cpu	*cpu[];		/* indexed by CPU number */
    519 extern cpu_t		*cpu_list;	/* list of CPUs */
    520 extern cpu_t		*cpu_active;	/* list of active CPUs */
    521 extern int		ncpus;		/* number of CPUs present */
    522 extern int		ncpus_online;	/* number of CPUs not quiesced */
    523 extern int		max_ncpus;	/* max present before ncpus is known */
    524 extern int		boot_max_ncpus;	/* like max_ncpus but for real */
    525 extern int		boot_ncpus;	/* # cpus present @ boot */
    526 extern processorid_t	max_cpuid;	/* maximum CPU number */
    527 extern struct cpu	*cpu_inmotion;	/* offline or partition move target */
    528 extern cpu_t		*clock_cpu_list;
    529 
    530 #if defined(__i386) || defined(__amd64)
    531 extern struct cpu *curcpup(void);
    532 #define	CPU		(curcpup())	/* Pointer to current CPU */
    533 #else
    534 #define	CPU		(curthread->t_cpu)	/* Pointer to current CPU */
    535 #endif
    536 
    537 /*
    538  * CPU_CURRENT indicates to thread_affinity_set to use CPU->cpu_id
    539  * as the target and to grab cpu_lock instead of requiring the caller
    540  * to grab it.
    541  */
    542 #define	CPU_CURRENT	-3
    543 
    544 /*
    545  * Per-CPU statistics
    546  *
    547  * cpu_stats_t contains numerous system and VM-related statistics, in the form
    548  * of gauges or monotonically-increasing event occurrence counts.
    549  */
    550 
    551 #define	CPU_STATS_ENTER_K()	kpreempt_disable()
    552 #define	CPU_STATS_EXIT_K()	kpreempt_enable()
    553 
    554 #define	CPU_STATS_ADD_K(class, stat, amount) \
    555 	{	kpreempt_disable(); /* keep from switching CPUs */\
    556 		CPU_STATS_ADDQ(CPU, class, stat, amount); \
    557 		kpreempt_enable(); \
    558 	}
    559 
    560 #define	CPU_STATS_ADDQ(cp, class, stat, amount)	{			\
    561 	extern void __dtrace_probe___cpu_##class##info_##stat(uint_t,	\
    562 	    uint64_t *, cpu_t *);					\
    563 	uint64_t *stataddr = &((cp)->cpu_stats.class.stat);		\
    564 	__dtrace_probe___cpu_##class##info_##stat((amount),		\
    565 	    stataddr, cp);						\
    566 	*(stataddr) += (amount);					\
    567 }
    568 
    569 #define	CPU_STATS(cp, stat)                                       \
    570 	((cp)->cpu_stats.stat)
    571 
    572 #endif /* _KERNEL || _KMEMUSER */
    573 
    574 /*
    575  * CPU support routines.
    576  */
    577 #if	defined(_KERNEL) && defined(__STDC__)	/* not for genassym.c */
    578 
    579 struct zone;
    580 
    581 void	cpu_list_init(cpu_t *);
    582 void	cpu_add_unit(cpu_t *);
    583 void	cpu_del_unit(int cpuid);
    584 void	cpu_add_active(cpu_t *);
    585 void	cpu_kstat_init(cpu_t *);
    586 void	cpu_visibility_add(cpu_t *, struct zone *);
    587 void	cpu_visibility_remove(cpu_t *, struct zone *);
    588 void	cpu_visibility_configure(cpu_t *, struct zone *);
    589 void	cpu_visibility_unconfigure(cpu_t *, struct zone *);
    590 void	cpu_visibility_online(cpu_t *, struct zone *);
    591 void	cpu_visibility_offline(cpu_t *, struct zone *);
    592 void	cpu_create_intrstat(cpu_t *);
    593 void	cpu_delete_intrstat(cpu_t *);
    594 int	cpu_kstat_intrstat_update(kstat_t *, int);
    595 void	cpu_intr_swtch_enter(kthread_t *);
    596 void	cpu_intr_swtch_exit(kthread_t *);
    597 
    598 void	mbox_lock_init(void);	 /* initialize cross-call locks */
    599 void	mbox_init(int cpun);	 /* initialize cross-calls */
    600 void	poke_cpu(int cpun);	 /* interrupt another CPU (to preempt) */
    601 
    602 /*
    603  * values for safe_list.  Pause state that CPUs are in.
    604  */
    605 #define	PAUSE_IDLE	0		/* normal state */
    606 #define	PAUSE_READY	1		/* paused thread ready to spl */
    607 #define	PAUSE_WAIT	2		/* paused thread is spl-ed high */
    608 #define	PAUSE_DIE	3		/* tell pause thread to leave */
    609 #define	PAUSE_DEAD	4		/* pause thread has left */
    610 
    611 void	mach_cpu_pause(volatile char *);
    612 
    613 void	pause_cpus(cpu_t *off_cp);
    614 void	start_cpus(void);
    615 int	cpus_paused(void);
    616 
    617 void	cpu_pause_init(void);
    618 cpu_t	*cpu_get(processorid_t cpun);	/* get the CPU struct associated */
    619 
    620 int	cpu_online(cpu_t *cp);			/* take cpu online */
    621 int	cpu_offline(cpu_t *cp, int flags);	/* take cpu offline */
    622 int	cpu_spare(cpu_t *cp, int flags);	/* take cpu to spare */
    623 int	cpu_faulted(cpu_t *cp, int flags);	/* take cpu to faulted */
    624 int	cpu_poweron(cpu_t *cp);		/* take powered-off cpu to offline */
    625 int	cpu_poweroff(cpu_t *cp);	/* take offline cpu to powered-off */
    626 
    627 cpu_t	*cpu_intr_next(cpu_t *cp);	/* get next online CPU taking intrs */
    628 int	cpu_intr_count(cpu_t *cp);	/* count # of CPUs handling intrs */
    629 int	cpu_intr_on(cpu_t *cp);		/* CPU taking I/O interrupts? */
    630 void	cpu_intr_enable(cpu_t *cp);	/* enable I/O interrupts */
    631 int	cpu_intr_disable(cpu_t *cp);	/* disable I/O interrupts */
    632 void	cpu_intr_alloc(cpu_t *cp, int n); /* allocate interrupt threads */
    633 
    634 /*
    635  * Routines for checking CPU states.
    636  */
    637 int	cpu_is_online(cpu_t *);		/* check if CPU is online */
    638 int	cpu_is_nointr(cpu_t *);		/* check if CPU can service intrs */
    639 int	cpu_is_active(cpu_t *);		/* check if CPU can run threads */
    640 int	cpu_is_offline(cpu_t *);	/* check if CPU is offline */
    641 int	cpu_is_poweredoff(cpu_t *);	/* check if CPU is powered off */
    642 
    643 int	cpu_flagged_online(cpu_flag_t);	/* flags show CPU is online */
    644 int	cpu_flagged_nointr(cpu_flag_t);	/* flags show CPU not handling intrs */
    645 int	cpu_flagged_active(cpu_flag_t); /* flags show CPU scheduling threads */
    646 int	cpu_flagged_offline(cpu_flag_t); /* flags show CPU is offline */
    647 int	cpu_flagged_poweredoff(cpu_flag_t); /* flags show CPU is powered off */
    648 
    649 /*
    650  * The processor_info(2) state of a CPU is a simplified representation suitable
    651  * for use by an application program.  Kernel subsystems should utilize the
    652  * internal per-CPU state as given by the cpu_flags member of the cpu structure,
    653  * as this information may include platform- or architecture-specific state
    654  * critical to a subsystem's disposition of a particular CPU.
    655  */
    656 void	cpu_set_state(cpu_t *);		/* record/timestamp current state */
    657 int	cpu_get_state(cpu_t *);		/* get current cpu state */
    658 const char *cpu_get_state_str(cpu_t *);	/* get current cpu state as string */
    659 
    660 
    661 void	cpu_set_supp_freqs(cpu_t *, const char *); /* set the CPU supported */
    662 						/* frequencies */
    663 
    664 int	cpu_configure(int);
    665 int	cpu_unconfigure(int);
    666 void	cpu_destroy_bound_threads(cpu_t *cp);
    667 
    668 extern int cpu_bind_thread(kthread_t *tp, processorid_t bind,
    669     processorid_t *obind, int *error);
    670 extern int cpu_unbind(processorid_t cpu_id, boolean_t force);
    671 extern void thread_affinity_set(kthread_t *t, int cpu_id);
    672 extern void thread_affinity_clear(kthread_t *t);
    673 extern void affinity_set(int cpu_id);
    674 extern void affinity_clear(void);
    675 extern void init_cpu_mstate(struct cpu *, int);
    676 extern void term_cpu_mstate(struct cpu *);
    677 extern void new_cpu_mstate(int, hrtime_t);
    678 extern void get_cpu_mstate(struct cpu *, hrtime_t *);
    679 extern void thread_nomigrate(void);
    680 extern void thread_allowmigrate(void);
    681 extern void weakbinding_stop(void);
    682 extern void weakbinding_start(void);
    683 
    684 /*
    685  * The following routines affect the CPUs participation in interrupt processing,
    686  * if that is applicable on the architecture.  This only affects interrupts
    687  * which aren't directed at the processor (not cross calls).
    688  *
    689  * cpu_disable_intr returns non-zero if interrupts were previously enabled.
    690  */
    691 int	cpu_disable_intr(struct cpu *cp); /* stop issuing interrupts to cpu */
    692 void	cpu_enable_intr(struct cpu *cp); /* start issuing interrupts to cpu */
    693 
    694 /*
    695  * The mutex cpu_lock protects cpu_flags for all CPUs, as well as the ncpus
    696  * and ncpus_online counts.
    697  */
    698 extern kmutex_t	cpu_lock;	/* lock protecting CPU data */
    699 
    700 typedef enum {
    701 	CPU_INIT,
    702 	CPU_CONFIG,
    703 	CPU_UNCONFIG,
    704 	CPU_ON,
    705 	CPU_OFF,
    706 	CPU_CPUPART_IN,
    707 	CPU_CPUPART_OUT
    708 } cpu_setup_t;
    709 
    710 typedef int cpu_setup_func_t(cpu_setup_t, int, void *);
    711 
    712 /*
    713  * Routines used to register interest in cpu's being added to or removed
    714  * from the system.
    715  */
    716 extern void register_cpu_setup_func(cpu_setup_func_t *, void *);
    717 extern void unregister_cpu_setup_func(cpu_setup_func_t *, void *);
    718 extern void cpu_state_change_notify(int, cpu_setup_t);
    719 
    720 /*
    721  * Create various strings that describe the given CPU for the
    722  * processor_info system call and configuration-related kstats.
    723  */
    724 #define	CPU_IDSTRLEN	100
    725 
    726 extern void init_cpu_info(struct cpu *);
    727 extern void populate_idstr(struct cpu *);
    728 extern void cpu_vm_data_init(struct cpu *);
    729 extern void cpu_vm_data_destroy(struct cpu *);
    730 
    731 #endif	/* _KERNEL */
    732 
    733 #ifdef	__cplusplus
    734 }
    735 #endif
    736 
    737 #endif /* _SYS_CPUVAR_H */
    738