Home | History | Annotate | Line # | Download | only in kern
kern_proc.c revision 1.139.2.5
      1  1.139.2.5      yamt /*	$NetBSD: kern_proc.c,v 1.139.2.5 2010/08/11 22:54:39 yamt Exp $	*/
      2       1.33   thorpej 
      3       1.33   thorpej /*-
      4      1.131        ad  * Copyright (c) 1999, 2006, 2007, 2008 The NetBSD Foundation, Inc.
      5       1.33   thorpej  * All rights reserved.
      6       1.33   thorpej  *
      7       1.33   thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8       1.33   thorpej  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9      1.100        ad  * NASA Ames Research Center, and by Andrew Doran.
     10       1.33   thorpej  *
     11       1.33   thorpej  * Redistribution and use in source and binary forms, with or without
     12       1.33   thorpej  * modification, are permitted provided that the following conditions
     13       1.33   thorpej  * are met:
     14       1.33   thorpej  * 1. Redistributions of source code must retain the above copyright
     15       1.33   thorpej  *    notice, this list of conditions and the following disclaimer.
     16       1.33   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     17       1.33   thorpej  *    notice, this list of conditions and the following disclaimer in the
     18       1.33   thorpej  *    documentation and/or other materials provided with the distribution.
     19       1.33   thorpej  *
     20       1.33   thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21       1.33   thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22       1.33   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23       1.33   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24       1.33   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25       1.33   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26       1.33   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27       1.33   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28       1.33   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29       1.33   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30       1.33   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     31       1.33   thorpej  */
     32        1.9       cgd 
     33        1.1       cgd /*
     34        1.7       cgd  * Copyright (c) 1982, 1986, 1989, 1991, 1993
     35        1.7       cgd  *	The Regents of the University of California.  All rights reserved.
     36        1.1       cgd  *
     37        1.1       cgd  * Redistribution and use in source and binary forms, with or without
     38        1.1       cgd  * modification, are permitted provided that the following conditions
     39        1.1       cgd  * are met:
     40        1.1       cgd  * 1. Redistributions of source code must retain the above copyright
     41        1.1       cgd  *    notice, this list of conditions and the following disclaimer.
     42        1.1       cgd  * 2. Redistributions in binary form must reproduce the above copyright
     43        1.1       cgd  *    notice, this list of conditions and the following disclaimer in the
     44        1.1       cgd  *    documentation and/or other materials provided with the distribution.
     45       1.65       agc  * 3. Neither the name of the University nor the names of its contributors
     46        1.1       cgd  *    may be used to endorse or promote products derived from this software
     47        1.1       cgd  *    without specific prior written permission.
     48        1.1       cgd  *
     49        1.1       cgd  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     50        1.1       cgd  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     51        1.1       cgd  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     52        1.1       cgd  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     53        1.1       cgd  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     54        1.1       cgd  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     55        1.1       cgd  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     56        1.1       cgd  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     57        1.1       cgd  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     58        1.1       cgd  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     59        1.1       cgd  * SUCH DAMAGE.
     60        1.1       cgd  *
     61       1.23      fvdl  *	@(#)kern_proc.c	8.7 (Berkeley) 2/14/95
     62        1.1       cgd  */
     63       1.45     lukem 
     64       1.45     lukem #include <sys/cdefs.h>
     65  1.139.2.5      yamt __KERNEL_RCSID(0, "$NetBSD: kern_proc.c,v 1.139.2.5 2010/08/11 22:54:39 yamt Exp $");
     66       1.48      yamt 
     67  1.139.2.5      yamt #ifdef _KERNEL_OPT
     68       1.48      yamt #include "opt_kstack.h"
     69       1.88      onoe #include "opt_maxuprc.h"
     70  1.139.2.4      yamt #include "opt_dtrace.h"
     71  1.139.2.5      yamt #endif
     72        1.1       cgd 
     73        1.5   mycroft #include <sys/param.h>
     74        1.5   mycroft #include <sys/systm.h>
     75        1.5   mycroft #include <sys/kernel.h>
     76        1.5   mycroft #include <sys/proc.h>
     77       1.28   thorpej #include <sys/resourcevar.h>
     78        1.5   mycroft #include <sys/buf.h>
     79        1.5   mycroft #include <sys/acct.h>
     80        1.5   mycroft #include <sys/wait.h>
     81        1.5   mycroft #include <sys/file.h>
     82        1.8   mycroft #include <ufs/ufs/quota.h>
     83        1.5   mycroft #include <sys/uio.h>
     84       1.24   thorpej #include <sys/pool.h>
     85  1.139.2.2      yamt #include <sys/pset.h>
     86        1.5   mycroft #include <sys/mbuf.h>
     87        1.5   mycroft #include <sys/ioctl.h>
     88        1.5   mycroft #include <sys/tty.h>
     89       1.11       cgd #include <sys/signalvar.h>
     90       1.51  gmcgarry #include <sys/ras.h>
     91  1.139.2.2      yamt #include <sys/sa.h>
     92  1.139.2.2      yamt #include <sys/savar.h>
     93       1.81  junyoung #include <sys/filedesc.h>
     94      1.103       dsl #include "sys/syscall_stats.h"
     95       1.89      elad #include <sys/kauth.h>
     96      1.100        ad #include <sys/sleepq.h>
     97      1.126        ad #include <sys/atomic.h>
     98      1.131        ad #include <sys/kmem.h>
     99  1.139.2.4      yamt #include <sys/dtrace_bsd.h>
    100       1.81  junyoung 
    101       1.81  junyoung #include <uvm/uvm.h>
    102       1.79      yamt #include <uvm/uvm_extern.h>
    103        1.5   mycroft 
    104        1.7       cgd /*
    105       1.10   mycroft  * Other process lists
    106        1.7       cgd  */
    107       1.31   thorpej 
    108       1.10   mycroft struct proclist allproc;
    109       1.32   thorpej struct proclist zombproc;	/* resources have been freed */
    110       1.32   thorpej 
    111      1.136        ad kmutex_t	*proc_lock;
    112       1.33   thorpej 
    113       1.33   thorpej /*
    114       1.72  junyoung  * pid to proc lookup is done by indexing the pid_table array.
    115       1.61       dsl  * Since pid numbers are only allocated when an empty slot
    116       1.61       dsl  * has been found, there is no need to search any lists ever.
    117       1.61       dsl  * (an orphaned pgrp will lock the slot, a session will lock
    118       1.61       dsl  * the pgrp with the same number.)
    119       1.61       dsl  * If the table is too small it is reallocated with twice the
    120       1.61       dsl  * previous size and the entries 'unzipped' into the two halves.
    121       1.61       dsl  * A linked list of free entries is passed through the pt_proc
    122       1.61       dsl  * field of 'free' items - set odd to be an invalid ptr.
    123       1.61       dsl  */
    124       1.61       dsl 
    125       1.61       dsl struct pid_table {
    126       1.61       dsl 	struct proc	*pt_proc;
    127       1.61       dsl 	struct pgrp	*pt_pgrp;
    128  1.139.2.5      yamt 	pid_t		pt_pid;
    129       1.72  junyoung };
    130       1.61       dsl #if 1	/* strongly typed cast - should be a noop */
    131       1.84     perry static inline uint p2u(struct proc *p) { return (uint)(uintptr_t)p; }
    132       1.61       dsl #else
    133       1.61       dsl #define p2u(p) ((uint)p)
    134       1.72  junyoung #endif
    135       1.61       dsl #define P_VALID(p) (!(p2u(p) & 1))
    136       1.61       dsl #define P_NEXT(p) (p2u(p) >> 1)
    137       1.61       dsl #define P_FREE(pid) ((struct proc *)(uintptr_t)((pid) << 1 | 1))
    138       1.61       dsl 
    139       1.61       dsl #define INITIAL_PID_TABLE_SIZE	(1 << 5)
    140       1.61       dsl static struct pid_table *pid_table;
    141       1.61       dsl static uint pid_tbl_mask = INITIAL_PID_TABLE_SIZE - 1;
    142       1.61       dsl static uint pid_alloc_lim;	/* max we allocate before growing table */
    143       1.61       dsl static uint pid_alloc_cnt;	/* number of allocated pids */
    144       1.61       dsl 
    145       1.61       dsl /* links through free slots - never empty! */
    146       1.61       dsl static uint next_free_pt, last_free_pt;
    147       1.61       dsl static pid_t pid_max = PID_MAX;		/* largest value we allocate */
    148       1.31   thorpej 
    149       1.81  junyoung /* Components of the first process -- never freed. */
    150      1.123      matt 
    151  1.139.2.2      yamt extern struct emul emul_netbsd;	/* defined in kern_exec.c */
    152      1.123      matt 
    153      1.123      matt struct session session0 = {
    154      1.123      matt 	.s_count = 1,
    155      1.123      matt 	.s_sid = 0,
    156      1.123      matt };
    157      1.123      matt struct pgrp pgrp0 = {
    158      1.123      matt 	.pg_members = LIST_HEAD_INITIALIZER(&pgrp0.pg_members),
    159      1.123      matt 	.pg_session = &session0,
    160      1.123      matt };
    161      1.132        ad filedesc_t filedesc0;
    162      1.123      matt struct cwdinfo cwdi0 = {
    163      1.123      matt 	.cwdi_cmask = CMASK,		/* see cmask below */
    164      1.123      matt 	.cwdi_refcnt = 1,
    165      1.123      matt };
    166  1.139.2.2      yamt struct plimit limit0;
    167       1.81  junyoung struct pstats pstat0;
    168       1.81  junyoung struct vmspace vmspace0;
    169       1.81  junyoung struct sigacts sigacts0;
    170      1.123      matt struct proc proc0 = {
    171      1.123      matt 	.p_lwps = LIST_HEAD_INITIALIZER(&proc0.p_lwps),
    172      1.123      matt 	.p_sigwaiters = LIST_HEAD_INITIALIZER(&proc0.p_sigwaiters),
    173      1.123      matt 	.p_nlwps = 1,
    174      1.123      matt 	.p_nrlwps = 1,
    175      1.123      matt 	.p_nlwpid = 1,		/* must match lwp0.l_lid */
    176      1.123      matt 	.p_pgrp = &pgrp0,
    177      1.123      matt 	.p_comm = "system",
    178      1.123      matt 	/*
    179      1.123      matt 	 * Set P_NOCLDWAIT so that kernel threads are reparented to init(8)
    180      1.123      matt 	 * when they exit.  init(8) can easily wait them out for us.
    181      1.123      matt 	 */
    182      1.123      matt 	.p_flag = PK_SYSTEM | PK_NOCLDWAIT,
    183      1.123      matt 	.p_stat = SACTIVE,
    184      1.123      matt 	.p_nice = NZERO,
    185      1.123      matt 	.p_emul = &emul_netbsd,
    186      1.123      matt 	.p_cwdi = &cwdi0,
    187      1.123      matt 	.p_limit = &limit0,
    188      1.132        ad 	.p_fd = &filedesc0,
    189      1.123      matt 	.p_vmspace = &vmspace0,
    190      1.123      matt 	.p_stats = &pstat0,
    191      1.123      matt 	.p_sigacts = &sigacts0,
    192      1.123      matt };
    193      1.123      matt kauth_cred_t cred0;
    194       1.81  junyoung 
    195       1.81  junyoung int nofile = NOFILE;
    196       1.81  junyoung int maxuprc = MAXUPRC;
    197       1.81  junyoung int cmask = CMASK;
    198       1.81  junyoung 
    199       1.57   thorpej MALLOC_DEFINE(M_EMULDATA, "emuldata", "Per-process emulation data");
    200       1.57   thorpej MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
    201       1.10   mycroft 
    202       1.31   thorpej /*
    203       1.31   thorpej  * The process list descriptors, used during pid allocation and
    204       1.31   thorpej  * by sysctl.  No locking on this data structure is needed since
    205       1.31   thorpej  * it is completely static.
    206       1.31   thorpej  */
    207       1.31   thorpej const struct proclist_desc proclists[] = {
    208       1.31   thorpej 	{ &allproc	},
    209       1.31   thorpej 	{ &zombproc	},
    210       1.31   thorpej 	{ NULL		},
    211       1.31   thorpej };
    212       1.31   thorpej 
    213  1.139.2.2      yamt static struct pgrp *	pg_remove(pid_t);
    214  1.139.2.2      yamt static void		pg_delete(pid_t);
    215  1.139.2.2      yamt static void		orphanpg(struct pgrp *);
    216       1.13  christos 
    217       1.95   thorpej static specificdata_domain_t proc_specificdata_domain;
    218       1.95   thorpej 
    219      1.128        ad static pool_cache_t proc_cache;
    220      1.128        ad 
    221  1.139.2.4      yamt static kauth_listener_t proc_listener;
    222  1.139.2.4      yamt 
    223  1.139.2.4      yamt static int
    224  1.139.2.4      yamt proc_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
    225  1.139.2.4      yamt     void *arg0, void *arg1, void *arg2, void *arg3)
    226  1.139.2.4      yamt {
    227  1.139.2.4      yamt 	struct proc *p;
    228  1.139.2.4      yamt 	int result;
    229  1.139.2.4      yamt 
    230  1.139.2.4      yamt 	result = KAUTH_RESULT_DEFER;
    231  1.139.2.4      yamt 	p = arg0;
    232  1.139.2.4      yamt 
    233  1.139.2.4      yamt 	switch (action) {
    234  1.139.2.4      yamt 	case KAUTH_PROCESS_CANSEE: {
    235  1.139.2.4      yamt 		enum kauth_process_req req;
    236  1.139.2.4      yamt 
    237  1.139.2.4      yamt 		req = (enum kauth_process_req)arg1;
    238  1.139.2.4      yamt 
    239  1.139.2.4      yamt 		switch (req) {
    240  1.139.2.4      yamt 		case KAUTH_REQ_PROCESS_CANSEE_ARGS:
    241  1.139.2.4      yamt 		case KAUTH_REQ_PROCESS_CANSEE_ENTRY:
    242  1.139.2.4      yamt 		case KAUTH_REQ_PROCESS_CANSEE_OPENFILES:
    243  1.139.2.4      yamt 			result = KAUTH_RESULT_ALLOW;
    244  1.139.2.4      yamt 
    245  1.139.2.4      yamt 			break;
    246  1.139.2.4      yamt 
    247  1.139.2.4      yamt 		case KAUTH_REQ_PROCESS_CANSEE_ENV:
    248  1.139.2.4      yamt 			if (kauth_cred_getuid(cred) !=
    249  1.139.2.4      yamt 			    kauth_cred_getuid(p->p_cred) ||
    250  1.139.2.4      yamt 			    kauth_cred_getuid(cred) !=
    251  1.139.2.4      yamt 			    kauth_cred_getsvuid(p->p_cred))
    252  1.139.2.4      yamt 				break;
    253  1.139.2.4      yamt 
    254  1.139.2.4      yamt 			result = KAUTH_RESULT_ALLOW;
    255  1.139.2.4      yamt 
    256  1.139.2.4      yamt 			break;
    257  1.139.2.4      yamt 
    258  1.139.2.4      yamt 		default:
    259  1.139.2.4      yamt 			break;
    260  1.139.2.4      yamt 		}
    261  1.139.2.4      yamt 
    262  1.139.2.4      yamt 		break;
    263  1.139.2.4      yamt 		}
    264  1.139.2.4      yamt 
    265  1.139.2.4      yamt 	case KAUTH_PROCESS_FORK: {
    266  1.139.2.4      yamt 		int lnprocs = (int)(unsigned long)arg2;
    267  1.139.2.4      yamt 
    268  1.139.2.4      yamt 		/*
    269  1.139.2.4      yamt 		 * Don't allow a nonprivileged user to use the last few
    270  1.139.2.4      yamt 		 * processes. The variable lnprocs is the current number of
    271  1.139.2.4      yamt 		 * processes, maxproc is the limit.
    272  1.139.2.4      yamt 		 */
    273  1.139.2.4      yamt 		if (__predict_false((lnprocs >= maxproc - 5)))
    274  1.139.2.4      yamt 			break;
    275  1.139.2.4      yamt 
    276  1.139.2.4      yamt 		result = KAUTH_RESULT_ALLOW;
    277  1.139.2.4      yamt 
    278  1.139.2.4      yamt 		break;
    279  1.139.2.4      yamt 		}
    280  1.139.2.4      yamt 
    281  1.139.2.4      yamt 	case KAUTH_PROCESS_CORENAME:
    282  1.139.2.4      yamt 	case KAUTH_PROCESS_STOPFLAG:
    283  1.139.2.4      yamt 		if (proc_uidmatch(cred, p->p_cred) == 0)
    284  1.139.2.4      yamt 			result = KAUTH_RESULT_ALLOW;
    285  1.139.2.4      yamt 
    286  1.139.2.4      yamt 		break;
    287  1.139.2.4      yamt 
    288  1.139.2.4      yamt 	default:
    289  1.139.2.4      yamt 		break;
    290  1.139.2.4      yamt 	}
    291  1.139.2.4      yamt 
    292  1.139.2.4      yamt 	return result;
    293  1.139.2.4      yamt }
    294  1.139.2.4      yamt 
    295       1.10   mycroft /*
    296       1.10   mycroft  * Initialize global process hashing structures.
    297       1.10   mycroft  */
    298       1.11       cgd void
    299       1.59       dsl procinit(void)
    300        1.7       cgd {
    301       1.31   thorpej 	const struct proclist_desc *pd;
    302  1.139.2.2      yamt 	u_int i;
    303       1.61       dsl #define	LINK_EMPTY ((PID_MAX + INITIAL_PID_TABLE_SIZE) & ~(INITIAL_PID_TABLE_SIZE - 1))
    304       1.31   thorpej 
    305       1.31   thorpej 	for (pd = proclists; pd->pd_list != NULL; pd++)
    306       1.31   thorpej 		LIST_INIT(pd->pd_list);
    307        1.7       cgd 
    308      1.136        ad 	proc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    309  1.139.2.2      yamt 	pid_table = kmem_alloc(INITIAL_PID_TABLE_SIZE
    310  1.139.2.2      yamt 	    * sizeof(struct pid_table), KM_SLEEP);
    311       1.33   thorpej 
    312       1.61       dsl 	/* Set free list running through table...
    313       1.61       dsl 	   Preset 'use count' above PID_MAX so we allocate pid 1 next. */
    314       1.61       dsl 	for (i = 0; i <= pid_tbl_mask; i++) {
    315       1.61       dsl 		pid_table[i].pt_proc = P_FREE(LINK_EMPTY + i + 1);
    316       1.61       dsl 		pid_table[i].pt_pgrp = 0;
    317  1.139.2.5      yamt 		pid_table[i].pt_pid = 0;
    318       1.61       dsl 	}
    319       1.61       dsl 	/* slot 0 is just grabbed */
    320       1.61       dsl 	next_free_pt = 1;
    321       1.61       dsl 	/* Need to fix last entry. */
    322       1.61       dsl 	last_free_pt = pid_tbl_mask;
    323       1.61       dsl 	pid_table[last_free_pt].pt_proc = P_FREE(LINK_EMPTY);
    324       1.61       dsl 	/* point at which we grow table - to avoid reusing pids too often */
    325       1.61       dsl 	pid_alloc_lim = pid_tbl_mask - 1;
    326       1.61       dsl #undef LINK_EMPTY
    327       1.61       dsl 
    328       1.95   thorpej 	proc_specificdata_domain = specificdata_domain_create();
    329       1.95   thorpej 	KASSERT(proc_specificdata_domain != NULL);
    330      1.128        ad 
    331      1.128        ad 	proc_cache = pool_cache_init(sizeof(struct proc), 0, 0, 0,
    332      1.128        ad 	    "procpl", NULL, IPL_NONE, NULL, NULL, NULL);
    333  1.139.2.4      yamt 
    334  1.139.2.4      yamt 	proc_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS,
    335  1.139.2.4      yamt 	    proc_listener_cb, NULL);
    336        1.7       cgd }
    337        1.1       cgd 
    338        1.7       cgd /*
    339       1.81  junyoung  * Initialize process 0.
    340       1.81  junyoung  */
    341       1.81  junyoung void
    342       1.81  junyoung proc0_init(void)
    343       1.81  junyoung {
    344       1.81  junyoung 	struct proc *p;
    345       1.81  junyoung 	struct pgrp *pg;
    346       1.81  junyoung 	rlim_t lim;
    347  1.139.2.2      yamt 	int i;
    348       1.81  junyoung 
    349       1.81  junyoung 	p = &proc0;
    350       1.81  junyoung 	pg = &pgrp0;
    351      1.123      matt 
    352      1.127        ad 	mutex_init(&p->p_stmutex, MUTEX_DEFAULT, IPL_HIGH);
    353      1.129        ad 	mutex_init(&p->p_auxlock, MUTEX_DEFAULT, IPL_NONE);
    354      1.137        ad 	p->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    355      1.107        ad 
    356      1.122        ad 	rw_init(&p->p_reflock);
    357      1.100        ad 	cv_init(&p->p_waitcv, "wait");
    358      1.100        ad 	cv_init(&p->p_lwpcv, "lwpwait");
    359      1.100        ad 
    360  1.139.2.5      yamt 	LIST_INSERT_HEAD(&p->p_lwps, &lwp0, l_sibling);
    361      1.100        ad 
    362       1.81  junyoung 	pid_table[0].pt_proc = p;
    363       1.81  junyoung 	LIST_INSERT_HEAD(&allproc, p, p_list);
    364       1.81  junyoung 
    365       1.81  junyoung 	pid_table[0].pt_pgrp = pg;
    366       1.81  junyoung 	LIST_INSERT_HEAD(&pg->pg_members, p, p_pglist);
    367       1.81  junyoung 
    368       1.81  junyoung #ifdef __HAVE_SYSCALL_INTERN
    369       1.81  junyoung 	(*p->p_emul->e_syscall_intern)(p);
    370       1.81  junyoung #endif
    371       1.81  junyoung 
    372       1.81  junyoung 	/* Create credentials. */
    373       1.89      elad 	cred0 = kauth_cred_alloc();
    374       1.89      elad 	p->p_cred = cred0;
    375       1.81  junyoung 
    376       1.81  junyoung 	/* Create the CWD info. */
    377      1.113        ad 	rw_init(&cwdi0.cwdi_lock);
    378       1.81  junyoung 
    379       1.81  junyoung 	/* Create the limits structures. */
    380      1.116       dsl 	mutex_init(&limit0.pl_lock, MUTEX_DEFAULT, IPL_NONE);
    381  1.139.2.2      yamt 	for (i = 0; i < __arraycount(limit0.pl_rlimit); i++)
    382  1.139.2.2      yamt 		limit0.pl_rlimit[i].rlim_cur =
    383  1.139.2.2      yamt 		    limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY;
    384       1.81  junyoung 
    385       1.81  junyoung 	limit0.pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles;
    386       1.81  junyoung 	limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur =
    387       1.81  junyoung 	    maxfiles < nofile ? maxfiles : nofile;
    388       1.81  junyoung 
    389       1.81  junyoung 	limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
    390       1.81  junyoung 	limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur =
    391       1.81  junyoung 	    maxproc < maxuprc ? maxproc : maxuprc;
    392       1.81  junyoung 
    393  1.139.2.4      yamt 	lim = MIN(VM_MAXUSER_ADDRESS, ctob((rlim_t)uvmexp.free));
    394       1.81  junyoung 	limit0.pl_rlimit[RLIMIT_RSS].rlim_max = lim;
    395       1.81  junyoung 	limit0.pl_rlimit[RLIMIT_MEMLOCK].rlim_max = lim;
    396       1.81  junyoung 	limit0.pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = lim / 3;
    397  1.139.2.2      yamt 	limit0.pl_corename = defcorename;
    398  1.139.2.2      yamt 	limit0.pl_refcnt = 1;
    399  1.139.2.2      yamt 	limit0.pl_sv_limit = NULL;
    400       1.81  junyoung 
    401       1.81  junyoung 	/* Configure virtual memory system, set vm rlimits. */
    402       1.81  junyoung 	uvm_init_limits(p);
    403       1.81  junyoung 
    404       1.81  junyoung 	/* Initialize file descriptor table for proc0. */
    405      1.132        ad 	fd_init(&filedesc0);
    406       1.81  junyoung 
    407       1.81  junyoung 	/*
    408       1.81  junyoung 	 * Initialize proc0's vmspace, which uses the kernel pmap.
    409       1.81  junyoung 	 * All kernel processes (which never have user space mappings)
    410       1.81  junyoung 	 * share proc0's vmspace, and thus, the kernel pmap.
    411       1.81  junyoung 	 */
    412       1.81  junyoung 	uvmspace_init(&vmspace0, pmap_kernel(), round_page(VM_MIN_ADDRESS),
    413       1.81  junyoung 	    trunc_page(VM_MAX_ADDRESS));
    414       1.81  junyoung 
    415      1.127        ad 	/* Initialize signal state for proc0. XXX IPL_SCHED */
    416      1.127        ad 	mutex_init(&p->p_sigacts->sa_mutex, MUTEX_DEFAULT, IPL_SCHED);
    417       1.81  junyoung 	siginit(p);
    418       1.96  christos 
    419       1.96  christos 	proc_initspecific(p);
    420  1.139.2.5      yamt 	kdtrace_proc_ctor(NULL, p);
    421       1.81  junyoung }
    422       1.81  junyoung 
    423       1.81  junyoung /*
    424  1.139.2.2      yamt  * Session reference counting.
    425  1.139.2.2      yamt  */
    426  1.139.2.2      yamt 
    427  1.139.2.2      yamt void
    428  1.139.2.2      yamt proc_sesshold(struct session *ss)
    429  1.139.2.2      yamt {
    430  1.139.2.2      yamt 
    431  1.139.2.2      yamt 	KASSERT(mutex_owned(proc_lock));
    432  1.139.2.2      yamt 	ss->s_count++;
    433  1.139.2.2      yamt }
    434  1.139.2.2      yamt 
    435  1.139.2.2      yamt void
    436  1.139.2.2      yamt proc_sessrele(struct session *ss)
    437  1.139.2.2      yamt {
    438  1.139.2.2      yamt 
    439  1.139.2.2      yamt 	KASSERT(mutex_owned(proc_lock));
    440  1.139.2.2      yamt 	/*
    441  1.139.2.2      yamt 	 * We keep the pgrp with the same id as the session in order to
    442  1.139.2.2      yamt 	 * stop a process being given the same pid.  Since the pgrp holds
    443  1.139.2.2      yamt 	 * a reference to the session, it must be a 'zombie' pgrp by now.
    444  1.139.2.2      yamt 	 */
    445  1.139.2.2      yamt 	if (--ss->s_count == 0) {
    446  1.139.2.2      yamt 		struct pgrp *pg;
    447  1.139.2.2      yamt 
    448  1.139.2.2      yamt 		pg = pg_remove(ss->s_sid);
    449  1.139.2.2      yamt 		mutex_exit(proc_lock);
    450  1.139.2.2      yamt 
    451  1.139.2.2      yamt 		kmem_free(pg, sizeof(struct pgrp));
    452  1.139.2.2      yamt 		kmem_free(ss, sizeof(struct session));
    453  1.139.2.2      yamt 	} else {
    454  1.139.2.2      yamt 		mutex_exit(proc_lock);
    455  1.139.2.2      yamt 	}
    456  1.139.2.2      yamt }
    457  1.139.2.2      yamt 
    458  1.139.2.2      yamt /*
    459       1.74  junyoung  * Check that the specified process group is in the session of the
    460       1.60       dsl  * specified process.
    461       1.60       dsl  * Treats -ve ids as process ids.
    462       1.60       dsl  * Used to validate TIOCSPGRP requests.
    463       1.60       dsl  */
    464       1.60       dsl int
    465       1.60       dsl pgid_in_session(struct proc *p, pid_t pg_id)
    466       1.60       dsl {
    467       1.60       dsl 	struct pgrp *pgrp;
    468      1.101       dsl 	struct session *session;
    469      1.107        ad 	int error;
    470      1.101       dsl 
    471      1.136        ad 	mutex_enter(proc_lock);
    472       1.60       dsl 	if (pg_id < 0) {
    473  1.139.2.5      yamt 		struct proc *p1 = proc_find(-pg_id);
    474  1.139.2.5      yamt 		if (p1 == NULL) {
    475  1.139.2.5      yamt 			error = EINVAL;
    476  1.139.2.5      yamt 			goto fail;
    477  1.139.2.5      yamt 		}
    478       1.60       dsl 		pgrp = p1->p_pgrp;
    479       1.60       dsl 	} else {
    480  1.139.2.5      yamt 		pgrp = pgrp_find(pg_id);
    481  1.139.2.5      yamt 		if (pgrp == NULL) {
    482  1.139.2.5      yamt 			error = EINVAL;
    483  1.139.2.5      yamt 			goto fail;
    484  1.139.2.5      yamt 		}
    485       1.60       dsl 	}
    486      1.101       dsl 	session = pgrp->pg_session;
    487  1.139.2.5      yamt 	error = (session != p->p_pgrp->pg_session) ? EPERM : 0;
    488  1.139.2.5      yamt fail:
    489      1.136        ad 	mutex_exit(proc_lock);
    490      1.107        ad 	return error;
    491        1.7       cgd }
    492        1.4    andrew 
    493        1.1       cgd /*
    494  1.139.2.2      yamt  * p_inferior: is p an inferior of q?
    495        1.1       cgd  */
    496  1.139.2.2      yamt static inline bool
    497  1.139.2.2      yamt p_inferior(struct proc *p, struct proc *q)
    498        1.1       cgd {
    499        1.1       cgd 
    500  1.139.2.2      yamt 	KASSERT(mutex_owned(proc_lock));
    501  1.139.2.2      yamt 
    502       1.41  sommerfe 	for (; p != q; p = p->p_pptr)
    503        1.1       cgd 		if (p->p_pid == 0)
    504  1.139.2.2      yamt 			return false;
    505  1.139.2.2      yamt 	return true;
    506        1.1       cgd }
    507        1.1       cgd 
    508        1.1       cgd /*
    509  1.139.2.5      yamt  * proc_find: locate a process by the ID.
    510  1.139.2.5      yamt  *
    511  1.139.2.5      yamt  * => Must be called with proc_lock held.
    512        1.1       cgd  */
    513  1.139.2.5      yamt proc_t *
    514  1.139.2.5      yamt proc_find_raw(pid_t pid)
    515        1.1       cgd {
    516  1.139.2.5      yamt 	struct pid_table *pt;
    517  1.139.2.5      yamt 	proc_t *p;
    518        1.1       cgd 
    519  1.139.2.5      yamt 	KASSERT(mutex_owned(proc_lock));
    520  1.139.2.5      yamt 	pt = &pid_table[pid & pid_tbl_mask];
    521  1.139.2.5      yamt 	p = pt->pt_proc;
    522  1.139.2.5      yamt 	if (__predict_false(!P_VALID(p) || pt->pt_pid != pid)) {
    523  1.139.2.5      yamt 		return NULL;
    524  1.139.2.5      yamt 	}
    525  1.139.2.5      yamt 	return p;
    526  1.139.2.5      yamt }
    527  1.139.2.5      yamt 
    528  1.139.2.5      yamt proc_t *
    529  1.139.2.5      yamt proc_find(pid_t pid)
    530  1.139.2.5      yamt {
    531  1.139.2.5      yamt 	proc_t *p;
    532      1.100        ad 
    533  1.139.2.5      yamt 	p = proc_find_raw(pid);
    534  1.139.2.5      yamt 	if (__predict_false(p == NULL)) {
    535  1.139.2.5      yamt 		return NULL;
    536  1.139.2.5      yamt 	}
    537      1.100        ad 
    538  1.139.2.5      yamt 	/*
    539  1.139.2.5      yamt 	 * Only allow live processes to be found by PID.
    540  1.139.2.5      yamt 	 * XXX: p_stat might change, since unlocked.
    541  1.139.2.5      yamt 	 */
    542  1.139.2.5      yamt 	if (__predict_true(p->p_stat == SACTIVE || p->p_stat == SSTOP)) {
    543       1.68       dsl 		return p;
    544       1.68       dsl 	}
    545       1.68       dsl 	return NULL;
    546        1.1       cgd }
    547        1.1       cgd 
    548        1.1       cgd /*
    549  1.139.2.5      yamt  * pgrp_find: locate a process group by the ID.
    550  1.139.2.5      yamt  *
    551  1.139.2.5      yamt  * => Must be called with proc_lock held.
    552        1.1       cgd  */
    553        1.1       cgd struct pgrp *
    554  1.139.2.5      yamt pgrp_find(pid_t pgid)
    555        1.1       cgd {
    556       1.68       dsl 	struct pgrp *pg;
    557        1.1       cgd 
    558  1.139.2.5      yamt 	KASSERT(mutex_owned(proc_lock));
    559  1.139.2.5      yamt 
    560       1.68       dsl 	pg = pid_table[pgid & pid_tbl_mask].pt_pgrp;
    561  1.139.2.5      yamt 
    562       1.61       dsl 	/*
    563  1.139.2.5      yamt 	 * Cannot look up a process group that only exists because the
    564  1.139.2.5      yamt 	 * session has not died yet (traditional).
    565       1.61       dsl 	 */
    566       1.68       dsl 	if (pg == NULL || pg->pg_id != pgid || LIST_EMPTY(&pg->pg_members)) {
    567       1.68       dsl 		return NULL;
    568       1.68       dsl 	}
    569       1.68       dsl 	return pg;
    570        1.1       cgd }
    571        1.1       cgd 
    572       1.61       dsl static void
    573       1.61       dsl expand_pid_table(void)
    574        1.1       cgd {
    575  1.139.2.2      yamt 	size_t pt_size, tsz;
    576       1.61       dsl 	struct pid_table *n_pt, *new_pt;
    577       1.61       dsl 	struct proc *proc;
    578       1.61       dsl 	struct pgrp *pgrp;
    579  1.139.2.5      yamt 	pid_t pid, rpid;
    580  1.139.2.2      yamt 	u_int i;
    581  1.139.2.5      yamt 	uint new_pt_mask;
    582        1.1       cgd 
    583  1.139.2.2      yamt 	pt_size = pid_tbl_mask + 1;
    584  1.139.2.2      yamt 	tsz = pt_size * 2 * sizeof(struct pid_table);
    585  1.139.2.2      yamt 	new_pt = kmem_alloc(tsz, KM_SLEEP);
    586  1.139.2.5      yamt 	new_pt_mask = pt_size * 2 - 1;
    587       1.61       dsl 
    588      1.136        ad 	mutex_enter(proc_lock);
    589       1.61       dsl 	if (pt_size != pid_tbl_mask + 1) {
    590       1.61       dsl 		/* Another process beat us to it... */
    591      1.136        ad 		mutex_exit(proc_lock);
    592  1.139.2.2      yamt 		kmem_free(new_pt, tsz);
    593       1.61       dsl 		return;
    594       1.61       dsl 	}
    595       1.72  junyoung 
    596       1.61       dsl 	/*
    597       1.61       dsl 	 * Copy entries from old table into new one.
    598       1.61       dsl 	 * If 'pid' is 'odd' we need to place in the upper half,
    599       1.61       dsl 	 * even pid's to the lower half.
    600       1.61       dsl 	 * Free items stay in the low half so we don't have to
    601       1.61       dsl 	 * fixup the reference to them.
    602       1.61       dsl 	 * We stuff free items on the front of the freelist
    603       1.61       dsl 	 * because we can't write to unmodified entries.
    604       1.74  junyoung 	 * Processing the table backwards maintains a semblance
    605  1.139.2.5      yamt 	 * of issuing pid numbers that increase with time.
    606       1.61       dsl 	 */
    607       1.61       dsl 	i = pt_size - 1;
    608       1.61       dsl 	n_pt = new_pt + i;
    609       1.61       dsl 	for (; ; i--, n_pt--) {
    610       1.61       dsl 		proc = pid_table[i].pt_proc;
    611       1.61       dsl 		pgrp = pid_table[i].pt_pgrp;
    612       1.61       dsl 		if (!P_VALID(proc)) {
    613       1.61       dsl 			/* Up 'use count' so that link is valid */
    614       1.61       dsl 			pid = (P_NEXT(proc) + pt_size) & ~pt_size;
    615  1.139.2.5      yamt 			rpid = 0;
    616       1.61       dsl 			proc = P_FREE(pid);
    617       1.61       dsl 			if (pgrp)
    618       1.61       dsl 				pid = pgrp->pg_id;
    619  1.139.2.5      yamt 		} else {
    620  1.139.2.5      yamt 			pid = pid_table[i].pt_pid;
    621  1.139.2.5      yamt 			rpid = pid;
    622  1.139.2.5      yamt 		}
    623       1.72  junyoung 
    624       1.61       dsl 		/* Save entry in appropriate half of table */
    625       1.61       dsl 		n_pt[pid & pt_size].pt_proc = proc;
    626       1.61       dsl 		n_pt[pid & pt_size].pt_pgrp = pgrp;
    627  1.139.2.5      yamt 		n_pt[pid & pt_size].pt_pid = rpid;
    628       1.61       dsl 
    629       1.61       dsl 		/* Put other piece on start of free list */
    630       1.61       dsl 		pid = (pid ^ pt_size) & ~pid_tbl_mask;
    631       1.61       dsl 		n_pt[pid & pt_size].pt_proc =
    632  1.139.2.5      yamt 			P_FREE((pid & ~pt_size) | next_free_pt);
    633       1.61       dsl 		n_pt[pid & pt_size].pt_pgrp = 0;
    634  1.139.2.5      yamt 		n_pt[pid & pt_size].pt_pid = 0;
    635  1.139.2.5      yamt 
    636       1.61       dsl 		next_free_pt = i | (pid & pt_size);
    637       1.61       dsl 		if (i == 0)
    638       1.61       dsl 			break;
    639       1.61       dsl 	}
    640       1.61       dsl 
    641  1.139.2.2      yamt 	/* Save old table size and switch tables */
    642  1.139.2.2      yamt 	tsz = pt_size * sizeof(struct pid_table);
    643       1.61       dsl 	n_pt = pid_table;
    644       1.61       dsl 	pid_table = new_pt;
    645  1.139.2.5      yamt 	pid_tbl_mask = new_pt_mask;
    646       1.61       dsl 
    647       1.61       dsl 	/*
    648       1.61       dsl 	 * pid_max starts as PID_MAX (= 30000), once we have 16384
    649       1.61       dsl 	 * allocated pids we need it to be larger!
    650       1.61       dsl 	 */
    651       1.61       dsl 	if (pid_tbl_mask > PID_MAX) {
    652       1.61       dsl 		pid_max = pid_tbl_mask * 2 + 1;
    653       1.61       dsl 		pid_alloc_lim |= pid_alloc_lim << 1;
    654       1.61       dsl 	} else
    655       1.61       dsl 		pid_alloc_lim <<= 1;	/* doubles number of free slots... */
    656       1.61       dsl 
    657      1.136        ad 	mutex_exit(proc_lock);
    658  1.139.2.2      yamt 	kmem_free(n_pt, tsz);
    659       1.61       dsl }
    660       1.61       dsl 
    661       1.61       dsl struct proc *
    662       1.61       dsl proc_alloc(void)
    663       1.61       dsl {
    664       1.61       dsl 	struct proc *p;
    665       1.61       dsl 
    666      1.128        ad 	p = pool_cache_get(proc_cache, PR_WAITOK);
    667       1.61       dsl 	p->p_stat = SIDL;			/* protect against others */
    668       1.96  christos 	proc_initspecific(p);
    669  1.139.2.5      yamt 	kdtrace_proc_ctor(NULL, p);
    670  1.139.2.5      yamt 	p->p_pid = -1;
    671  1.139.2.5      yamt 	proc_alloc_pid(p);
    672  1.139.2.5      yamt 	return p;
    673  1.139.2.5      yamt }
    674  1.139.2.5      yamt 
    675  1.139.2.5      yamt pid_t
    676  1.139.2.5      yamt proc_alloc_pid(struct proc *p)
    677  1.139.2.5      yamt {
    678  1.139.2.5      yamt 	struct pid_table *pt;
    679  1.139.2.5      yamt 	pid_t pid;
    680  1.139.2.5      yamt 	int nxt;
    681       1.61       dsl 
    682       1.61       dsl 	for (;;expand_pid_table()) {
    683       1.61       dsl 		if (__predict_false(pid_alloc_cnt >= pid_alloc_lim))
    684       1.61       dsl 			/* ensure pids cycle through 2000+ values */
    685       1.61       dsl 			continue;
    686      1.136        ad 		mutex_enter(proc_lock);
    687       1.61       dsl 		pt = &pid_table[next_free_pt];
    688        1.1       cgd #ifdef DIAGNOSTIC
    689       1.63  christos 		if (__predict_false(P_VALID(pt->pt_proc) || pt->pt_pgrp))
    690       1.61       dsl 			panic("proc_alloc: slot busy");
    691        1.1       cgd #endif
    692       1.61       dsl 		nxt = P_NEXT(pt->pt_proc);
    693       1.61       dsl 		if (nxt & pid_tbl_mask)
    694       1.61       dsl 			break;
    695       1.61       dsl 		/* Table full - expand (NB last entry not used....) */
    696      1.136        ad 		mutex_exit(proc_lock);
    697       1.61       dsl 	}
    698       1.61       dsl 
    699       1.61       dsl 	/* pid is 'saved use count' + 'size' + entry */
    700       1.61       dsl 	pid = (nxt & ~pid_tbl_mask) + pid_tbl_mask + 1 + next_free_pt;
    701       1.61       dsl 	if ((uint)pid > (uint)pid_max)
    702       1.61       dsl 		pid &= pid_tbl_mask;
    703       1.61       dsl 	next_free_pt = nxt & pid_tbl_mask;
    704       1.61       dsl 
    705       1.61       dsl 	/* Grab table slot */
    706       1.61       dsl 	pt->pt_proc = p;
    707  1.139.2.4      yamt 
    708  1.139.2.5      yamt 	KASSERT(pt->pt_pid == 0);
    709  1.139.2.5      yamt 	pt->pt_pid = pid;
    710  1.139.2.5      yamt 	if (p->p_pid == -1) {
    711  1.139.2.5      yamt 		p->p_pid = pid;
    712  1.139.2.5      yamt 	}
    713  1.139.2.5      yamt 	pid_alloc_cnt++;
    714      1.136        ad 	mutex_exit(proc_lock);
    715       1.61       dsl 
    716  1.139.2.5      yamt 	return pid;
    717       1.61       dsl }
    718       1.61       dsl 
    719       1.61       dsl /*
    720      1.118        ad  * Free a process id - called from proc_free (in kern_exit.c)
    721      1.100        ad  *
    722      1.136        ad  * Called with the proc_lock held.
    723       1.61       dsl  */
    724       1.61       dsl void
    725  1.139.2.5      yamt proc_free_pid(pid_t pid)
    726       1.61       dsl {
    727       1.61       dsl 	struct pid_table *pt;
    728       1.61       dsl 
    729      1.136        ad 	KASSERT(mutex_owned(proc_lock));
    730       1.61       dsl 
    731       1.61       dsl 	pt = &pid_table[pid & pid_tbl_mask];
    732  1.139.2.5      yamt 
    733       1.61       dsl 	/* save pid use count in slot */
    734       1.61       dsl 	pt->pt_proc = P_FREE(pid & ~pid_tbl_mask);
    735  1.139.2.5      yamt 	KASSERT(pt->pt_pid == pid);
    736  1.139.2.5      yamt 	pt->pt_pid = 0;
    737       1.61       dsl 
    738       1.61       dsl 	if (pt->pt_pgrp == NULL) {
    739       1.61       dsl 		/* link last freed entry onto ours */
    740       1.61       dsl 		pid &= pid_tbl_mask;
    741       1.61       dsl 		pt = &pid_table[last_free_pt];
    742       1.61       dsl 		pt->pt_proc = P_FREE(P_NEXT(pt->pt_proc) | pid);
    743  1.139.2.5      yamt 		pt->pt_pid = 0;
    744       1.61       dsl 		last_free_pt = pid;
    745       1.61       dsl 		pid_alloc_cnt--;
    746       1.61       dsl 	}
    747       1.61       dsl 
    748      1.126        ad 	atomic_dec_uint(&nprocs);
    749       1.61       dsl }
    750       1.61       dsl 
    751      1.128        ad void
    752      1.128        ad proc_free_mem(struct proc *p)
    753      1.128        ad {
    754      1.128        ad 
    755  1.139.2.4      yamt 	kdtrace_proc_dtor(NULL, p);
    756      1.128        ad 	pool_cache_put(proc_cache, p);
    757      1.128        ad }
    758      1.128        ad 
    759       1.61       dsl /*
    760  1.139.2.2      yamt  * proc_enterpgrp: move p to a new or existing process group (and session).
    761       1.61       dsl  *
    762       1.61       dsl  * If we are creating a new pgrp, the pgid should equal
    763       1.72  junyoung  * the calling process' pid.
    764       1.61       dsl  * If is only valid to enter a process group that is in the session
    765       1.61       dsl  * of the process.
    766       1.61       dsl  * Also mksess should only be set if we are creating a process group
    767       1.61       dsl  *
    768      1.134      yamt  * Only called from sys_setsid and sys_setpgid.
    769       1.61       dsl  */
    770       1.61       dsl int
    771  1.139.2.2      yamt proc_enterpgrp(struct proc *curp, pid_t pid, pid_t pgid, bool mksess)
    772       1.61       dsl {
    773       1.61       dsl 	struct pgrp *new_pgrp, *pgrp;
    774       1.61       dsl 	struct session *sess;
    775      1.100        ad 	struct proc *p;
    776       1.61       dsl 	int rval;
    777       1.61       dsl 	pid_t pg_id = NO_PGID;
    778       1.61       dsl 
    779  1.139.2.2      yamt 	sess = mksess ? kmem_alloc(sizeof(*sess), KM_SLEEP) : NULL;
    780       1.61       dsl 
    781      1.107        ad 	/* Allocate data areas we might need before doing any validity checks */
    782      1.136        ad 	mutex_enter(proc_lock);		/* Because pid_table might change */
    783      1.107        ad 	if (pid_table[pgid & pid_tbl_mask].pt_pgrp == 0) {
    784      1.136        ad 		mutex_exit(proc_lock);
    785      1.131        ad 		new_pgrp = kmem_alloc(sizeof(*new_pgrp), KM_SLEEP);
    786      1.136        ad 		mutex_enter(proc_lock);
    787      1.107        ad 	} else
    788      1.107        ad 		new_pgrp = NULL;
    789       1.61       dsl 	rval = EPERM;	/* most common error (to save typing) */
    790       1.61       dsl 
    791       1.61       dsl 	/* Check pgrp exists or can be created */
    792       1.61       dsl 	pgrp = pid_table[pgid & pid_tbl_mask].pt_pgrp;
    793       1.61       dsl 	if (pgrp != NULL && pgrp->pg_id != pgid)
    794       1.61       dsl 		goto done;
    795       1.61       dsl 
    796       1.61       dsl 	/* Can only set another process under restricted circumstances. */
    797      1.100        ad 	if (pid != curp->p_pid) {
    798  1.139.2.5      yamt 		/* Must exist and be one of our children... */
    799  1.139.2.5      yamt 		p = proc_find(pid);
    800  1.139.2.5      yamt 		if (p == NULL || !p_inferior(p, curp)) {
    801       1.61       dsl 			rval = ESRCH;
    802       1.61       dsl 			goto done;
    803       1.61       dsl 		}
    804       1.61       dsl 		/* ... in the same session... */
    805       1.61       dsl 		if (sess != NULL || p->p_session != curp->p_session)
    806       1.61       dsl 			goto done;
    807       1.61       dsl 		/* ... existing pgid must be in same session ... */
    808       1.61       dsl 		if (pgrp != NULL && pgrp->pg_session != p->p_session)
    809       1.61       dsl 			goto done;
    810       1.61       dsl 		/* ... and not done an exec. */
    811      1.102     pavel 		if (p->p_flag & PK_EXEC) {
    812       1.61       dsl 			rval = EACCES;
    813       1.61       dsl 			goto done;
    814       1.49     enami 		}
    815      1.100        ad 	} else {
    816      1.100        ad 		/* ... setsid() cannot re-enter a pgrp */
    817      1.100        ad 		if (mksess && (curp->p_pgid == curp->p_pid ||
    818  1.139.2.5      yamt 		    pgrp_find(curp->p_pid)))
    819      1.100        ad 			goto done;
    820      1.100        ad 		p = curp;
    821       1.61       dsl 	}
    822        1.1       cgd 
    823       1.61       dsl 	/* Changing the process group/session of a session
    824       1.61       dsl 	   leader is definitely off limits. */
    825       1.61       dsl 	if (SESS_LEADER(p)) {
    826       1.61       dsl 		if (sess == NULL && p->p_pgrp == pgrp)
    827       1.61       dsl 			/* unless it's a definite noop */
    828       1.61       dsl 			rval = 0;
    829       1.61       dsl 		goto done;
    830       1.61       dsl 	}
    831       1.61       dsl 
    832       1.61       dsl 	/* Can only create a process group with id of process */
    833       1.61       dsl 	if (pgrp == NULL && pgid != pid)
    834       1.61       dsl 		goto done;
    835       1.61       dsl 
    836       1.61       dsl 	/* Can only create a session if creating pgrp */
    837       1.61       dsl 	if (sess != NULL && pgrp != NULL)
    838       1.61       dsl 		goto done;
    839       1.61       dsl 
    840       1.61       dsl 	/* Check we allocated memory for a pgrp... */
    841       1.61       dsl 	if (pgrp == NULL && new_pgrp == NULL)
    842       1.61       dsl 		goto done;
    843       1.61       dsl 
    844       1.61       dsl 	/* Don't attach to 'zombie' pgrp */
    845       1.61       dsl 	if (pgrp != NULL && LIST_EMPTY(&pgrp->pg_members))
    846       1.61       dsl 		goto done;
    847       1.61       dsl 
    848       1.61       dsl 	/* Expect to succeed now */
    849       1.61       dsl 	rval = 0;
    850       1.61       dsl 
    851       1.61       dsl 	if (pgrp == p->p_pgrp)
    852       1.61       dsl 		/* nothing to do */
    853       1.61       dsl 		goto done;
    854       1.61       dsl 
    855       1.61       dsl 	/* Ok all setup, link up required structures */
    856      1.100        ad 
    857       1.61       dsl 	if (pgrp == NULL) {
    858       1.61       dsl 		pgrp = new_pgrp;
    859  1.139.2.1      yamt 		new_pgrp = NULL;
    860       1.61       dsl 		if (sess != NULL) {
    861       1.21   thorpej 			sess->s_sid = p->p_pid;
    862        1.1       cgd 			sess->s_leader = p;
    863        1.1       cgd 			sess->s_count = 1;
    864        1.1       cgd 			sess->s_ttyvp = NULL;
    865        1.1       cgd 			sess->s_ttyp = NULL;
    866       1.58       dsl 			sess->s_flags = p->p_session->s_flags & ~S_LOGIN_SET;
    867       1.25     perry 			memcpy(sess->s_login, p->p_session->s_login,
    868        1.1       cgd 			    sizeof(sess->s_login));
    869      1.100        ad 			p->p_lflag &= ~PL_CONTROLT;
    870        1.1       cgd 		} else {
    871       1.61       dsl 			sess = p->p_pgrp->pg_session;
    872  1.139.2.2      yamt 			proc_sesshold(sess);
    873        1.1       cgd 		}
    874       1.61       dsl 		pgrp->pg_session = sess;
    875  1.139.2.1      yamt 		sess = NULL;
    876       1.61       dsl 
    877        1.1       cgd 		pgrp->pg_id = pgid;
    878       1.10   mycroft 		LIST_INIT(&pgrp->pg_members);
    879       1.61       dsl #ifdef DIAGNOSTIC
    880       1.63  christos 		if (__predict_false(pid_table[pgid & pid_tbl_mask].pt_pgrp))
    881       1.61       dsl 			panic("enterpgrp: pgrp table slot in use");
    882       1.63  christos 		if (__predict_false(mksess && p != curp))
    883       1.63  christos 			panic("enterpgrp: mksession and p != curproc");
    884       1.61       dsl #endif
    885       1.61       dsl 		pid_table[pgid & pid_tbl_mask].pt_pgrp = pgrp;
    886        1.1       cgd 		pgrp->pg_jobc = 0;
    887      1.136        ad 	}
    888        1.1       cgd 
    889        1.1       cgd 	/*
    890        1.1       cgd 	 * Adjust eligibility of affected pgrps to participate in job control.
    891        1.1       cgd 	 * Increment eligibility counts before decrementing, otherwise we
    892        1.1       cgd 	 * could reach 0 spuriously during the first call.
    893        1.1       cgd 	 */
    894        1.1       cgd 	fixjobc(p, pgrp, 1);
    895        1.1       cgd 	fixjobc(p, p->p_pgrp, 0);
    896        1.1       cgd 
    897      1.139        ad 	/* Interlock with ttread(). */
    898      1.139        ad 	mutex_spin_enter(&tty_lock);
    899      1.139        ad 
    900      1.100        ad 	/* Move process to requested group. */
    901       1.10   mycroft 	LIST_REMOVE(p, p_pglist);
    902       1.52      matt 	if (LIST_EMPTY(&p->p_pgrp->pg_members))
    903       1.61       dsl 		/* defer delete until we've dumped the lock */
    904       1.61       dsl 		pg_id = p->p_pgrp->pg_id;
    905        1.1       cgd 	p->p_pgrp = pgrp;
    906       1.10   mycroft 	LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
    907      1.100        ad 
    908      1.100        ad 	/* Done with the swap; we can release the tty mutex. */
    909      1.128        ad 	mutex_spin_exit(&tty_lock);
    910      1.128        ad 
    911       1.61       dsl     done:
    912  1.139.2.2      yamt 	if (pg_id != NO_PGID) {
    913  1.139.2.2      yamt 		/* Releases proc_lock. */
    914      1.100        ad 		pg_delete(pg_id);
    915  1.139.2.2      yamt 	} else {
    916  1.139.2.2      yamt 		mutex_exit(proc_lock);
    917  1.139.2.2      yamt 	}
    918       1.61       dsl 	if (sess != NULL)
    919      1.131        ad 		kmem_free(sess, sizeof(*sess));
    920       1.61       dsl 	if (new_pgrp != NULL)
    921      1.131        ad 		kmem_free(new_pgrp, sizeof(*new_pgrp));
    922       1.63  christos #ifdef DEBUG_PGRP
    923       1.63  christos 	if (__predict_false(rval))
    924       1.61       dsl 		printf("enterpgrp(%d,%d,%d), curproc %d, rval %d\n",
    925       1.61       dsl 			pid, pgid, mksess, curp->p_pid, rval);
    926       1.61       dsl #endif
    927       1.61       dsl 	return rval;
    928        1.1       cgd }
    929        1.1       cgd 
    930        1.1       cgd /*
    931  1.139.2.2      yamt  * proc_leavepgrp: remove a process from its process group.
    932  1.139.2.2      yamt  *  => must be called with the proc_lock held, which will be released;
    933        1.1       cgd  */
    934      1.100        ad void
    935  1.139.2.2      yamt proc_leavepgrp(struct proc *p)
    936        1.1       cgd {
    937       1.61       dsl 	struct pgrp *pgrp;
    938        1.1       cgd 
    939      1.136        ad 	KASSERT(mutex_owned(proc_lock));
    940      1.100        ad 
    941      1.139        ad 	/* Interlock with ttread() */
    942      1.128        ad 	mutex_spin_enter(&tty_lock);
    943       1.61       dsl 	pgrp = p->p_pgrp;
    944       1.10   mycroft 	LIST_REMOVE(p, p_pglist);
    945       1.94        ad 	p->p_pgrp = NULL;
    946      1.128        ad 	mutex_spin_exit(&tty_lock);
    947      1.100        ad 
    948  1.139.2.2      yamt 	if (LIST_EMPTY(&pgrp->pg_members)) {
    949  1.139.2.2      yamt 		/* Releases proc_lock. */
    950      1.100        ad 		pg_delete(pgrp->pg_id);
    951  1.139.2.2      yamt 	} else {
    952  1.139.2.2      yamt 		mutex_exit(proc_lock);
    953  1.139.2.2      yamt 	}
    954       1.61       dsl }
    955       1.61       dsl 
    956      1.100        ad /*
    957  1.139.2.2      yamt  * pg_remove: remove a process group from the table.
    958  1.139.2.2      yamt  *  => must be called with the proc_lock held;
    959  1.139.2.2      yamt  *  => returns process group to free;
    960      1.100        ad  */
    961  1.139.2.2      yamt static struct pgrp *
    962  1.139.2.2      yamt pg_remove(pid_t pg_id)
    963       1.61       dsl {
    964       1.61       dsl 	struct pgrp *pgrp;
    965       1.61       dsl 	struct pid_table *pt;
    966       1.61       dsl 
    967      1.136        ad 	KASSERT(mutex_owned(proc_lock));
    968      1.100        ad 
    969       1.61       dsl 	pt = &pid_table[pg_id & pid_tbl_mask];
    970       1.61       dsl 	pgrp = pt->pt_pgrp;
    971  1.139.2.2      yamt 
    972  1.139.2.2      yamt 	KASSERT(pgrp != NULL);
    973  1.139.2.2      yamt 	KASSERT(pgrp->pg_id == pg_id);
    974  1.139.2.2      yamt 	KASSERT(LIST_EMPTY(&pgrp->pg_members));
    975  1.139.2.2      yamt 
    976  1.139.2.2      yamt 	pt->pt_pgrp = NULL;
    977       1.61       dsl 
    978       1.61       dsl 	if (!P_VALID(pt->pt_proc)) {
    979  1.139.2.2      yamt 		/* Orphaned pgrp, put slot onto free list. */
    980  1.139.2.2      yamt 		KASSERT((P_NEXT(pt->pt_proc) & pid_tbl_mask) == 0);
    981       1.61       dsl 		pg_id &= pid_tbl_mask;
    982       1.61       dsl 		pt = &pid_table[last_free_pt];
    983       1.61       dsl 		pt->pt_proc = P_FREE(P_NEXT(pt->pt_proc) | pg_id);
    984  1.139.2.5      yamt 		KASSERT(pt->pt_pid == 0);
    985       1.61       dsl 		last_free_pt = pg_id;
    986       1.61       dsl 		pid_alloc_cnt--;
    987       1.61       dsl 	}
    988  1.139.2.2      yamt 	return pgrp;
    989        1.1       cgd }
    990        1.1       cgd 
    991        1.1       cgd /*
    992  1.139.2.2      yamt  * pg_delete: delete and free a process group.
    993  1.139.2.2      yamt  *  => must be called with the proc_lock held, which will be released.
    994        1.1       cgd  */
    995       1.61       dsl static void
    996       1.61       dsl pg_delete(pid_t pg_id)
    997       1.61       dsl {
    998  1.139.2.2      yamt 	struct pgrp *pg;
    999       1.61       dsl 	struct tty *ttyp;
   1000       1.61       dsl 	struct session *ss;
   1001      1.100        ad 
   1002      1.136        ad 	KASSERT(mutex_owned(proc_lock));
   1003       1.61       dsl 
   1004  1.139.2.2      yamt 	pg = pid_table[pg_id & pid_tbl_mask].pt_pgrp;
   1005  1.139.2.2      yamt 	if (pg == NULL || pg->pg_id != pg_id || !LIST_EMPTY(&pg->pg_members)) {
   1006  1.139.2.2      yamt 		mutex_exit(proc_lock);
   1007       1.61       dsl 		return;
   1008  1.139.2.2      yamt 	}
   1009       1.61       dsl 
   1010  1.139.2.2      yamt 	ss = pg->pg_session;
   1011       1.71        pk 
   1012       1.61       dsl 	/* Remove reference (if any) from tty to this process group */
   1013      1.128        ad 	mutex_spin_enter(&tty_lock);
   1014       1.71        pk 	ttyp = ss->s_ttyp;
   1015  1.139.2.2      yamt 	if (ttyp != NULL && ttyp->t_pgrp == pg) {
   1016       1.61       dsl 		ttyp->t_pgrp = NULL;
   1017  1.139.2.2      yamt 		KASSERT(ttyp->t_session == ss);
   1018       1.71        pk 	}
   1019      1.128        ad 	mutex_spin_exit(&tty_lock);
   1020       1.61       dsl 
   1021       1.71        pk 	/*
   1022  1.139.2.2      yamt 	 * The leading process group in a session is freed by proc_sessrele(),
   1023  1.139.2.2      yamt 	 * if last reference.  Note: proc_sessrele() releases proc_lock.
   1024       1.71        pk 	 */
   1025  1.139.2.2      yamt 	pg = (ss->s_sid != pg->pg_id) ? pg_remove(pg_id) : NULL;
   1026  1.139.2.2      yamt 	proc_sessrele(ss);
   1027      1.100        ad 
   1028  1.139.2.2      yamt 	if (pg != NULL) {
   1029  1.139.2.2      yamt 		/* Free it, if was not done by proc_sessrele(). */
   1030  1.139.2.2      yamt 		kmem_free(pg, sizeof(struct pgrp));
   1031  1.139.2.2      yamt 	}
   1032        1.1       cgd }
   1033        1.1       cgd 
   1034        1.1       cgd /*
   1035        1.1       cgd  * Adjust pgrp jobc counters when specified process changes process group.
   1036        1.1       cgd  * We count the number of processes in each process group that "qualify"
   1037        1.1       cgd  * the group for terminal job control (those with a parent in a different
   1038        1.1       cgd  * process group of the same session).  If that count reaches zero, the
   1039        1.1       cgd  * process group becomes orphaned.  Check both the specified process'
   1040        1.1       cgd  * process group and that of its children.
   1041        1.1       cgd  * entering == 0 => p is leaving specified group.
   1042        1.1       cgd  * entering == 1 => p is entering specified group.
   1043       1.68       dsl  *
   1044      1.136        ad  * Call with proc_lock held.
   1045        1.1       cgd  */
   1046        1.4    andrew void
   1047       1.59       dsl fixjobc(struct proc *p, struct pgrp *pgrp, int entering)
   1048        1.1       cgd {
   1049       1.39  augustss 	struct pgrp *hispgrp;
   1050       1.39  augustss 	struct session *mysession = pgrp->pg_session;
   1051       1.68       dsl 	struct proc *child;
   1052        1.1       cgd 
   1053      1.136        ad 	KASSERT(mutex_owned(proc_lock));
   1054      1.100        ad 
   1055        1.1       cgd 	/*
   1056        1.1       cgd 	 * Check p's parent to see whether p qualifies its own process
   1057        1.1       cgd 	 * group; if so, adjust count for p's process group.
   1058        1.1       cgd 	 */
   1059       1.68       dsl 	hispgrp = p->p_pptr->p_pgrp;
   1060       1.68       dsl 	if (hispgrp != pgrp && hispgrp->pg_session == mysession) {
   1061      1.100        ad 		if (entering) {
   1062        1.1       cgd 			pgrp->pg_jobc++;
   1063      1.136        ad 			p->p_lflag &= ~PL_ORPHANPG;
   1064      1.100        ad 		} else if (--pgrp->pg_jobc == 0)
   1065        1.1       cgd 			orphanpg(pgrp);
   1066       1.26   thorpej 	}
   1067        1.1       cgd 
   1068        1.1       cgd 	/*
   1069        1.1       cgd 	 * Check this process' children to see whether they qualify
   1070        1.1       cgd 	 * their process groups; if so, adjust counts for children's
   1071        1.1       cgd 	 * process groups.
   1072        1.1       cgd 	 */
   1073       1.68       dsl 	LIST_FOREACH(child, &p->p_children, p_sibling) {
   1074       1.68       dsl 		hispgrp = child->p_pgrp;
   1075       1.68       dsl 		if (hispgrp != pgrp && hispgrp->pg_session == mysession &&
   1076       1.68       dsl 		    !P_ZOMBIE(child)) {
   1077      1.100        ad 			if (entering) {
   1078      1.136        ad 				child->p_lflag &= ~PL_ORPHANPG;
   1079        1.1       cgd 				hispgrp->pg_jobc++;
   1080      1.100        ad 			} else if (--hispgrp->pg_jobc == 0)
   1081        1.1       cgd 				orphanpg(hispgrp);
   1082       1.26   thorpej 		}
   1083       1.26   thorpej 	}
   1084        1.1       cgd }
   1085        1.1       cgd 
   1086       1.72  junyoung /*
   1087        1.1       cgd  * A process group has become orphaned;
   1088        1.1       cgd  * if there are any stopped processes in the group,
   1089        1.1       cgd  * hang-up all process in that group.
   1090       1.68       dsl  *
   1091      1.136        ad  * Call with proc_lock held.
   1092        1.1       cgd  */
   1093        1.4    andrew static void
   1094       1.59       dsl orphanpg(struct pgrp *pg)
   1095        1.1       cgd {
   1096       1.39  augustss 	struct proc *p;
   1097      1.100        ad 
   1098      1.136        ad 	KASSERT(mutex_owned(proc_lock));
   1099      1.100        ad 
   1100       1.52      matt 	LIST_FOREACH(p, &pg->pg_members, p_pglist) {
   1101        1.1       cgd 		if (p->p_stat == SSTOP) {
   1102      1.136        ad 			p->p_lflag |= PL_ORPHANPG;
   1103      1.100        ad 			psignal(p, SIGHUP);
   1104      1.100        ad 			psignal(p, SIGCONT);
   1105       1.35    bouyer 		}
   1106       1.35    bouyer 	}
   1107       1.35    bouyer }
   1108        1.1       cgd 
   1109       1.61       dsl #ifdef DDB
   1110       1.61       dsl #include <ddb/db_output.h>
   1111       1.61       dsl void pidtbl_dump(void);
   1112       1.14  christos void
   1113       1.61       dsl pidtbl_dump(void)
   1114        1.1       cgd {
   1115       1.61       dsl 	struct pid_table *pt;
   1116       1.61       dsl 	struct proc *p;
   1117       1.39  augustss 	struct pgrp *pgrp;
   1118       1.61       dsl 	int id;
   1119        1.1       cgd 
   1120       1.61       dsl 	db_printf("pid table %p size %x, next %x, last %x\n",
   1121       1.61       dsl 		pid_table, pid_tbl_mask+1,
   1122       1.61       dsl 		next_free_pt, last_free_pt);
   1123       1.61       dsl 	for (pt = pid_table, id = 0; id <= pid_tbl_mask; id++, pt++) {
   1124       1.61       dsl 		p = pt->pt_proc;
   1125       1.61       dsl 		if (!P_VALID(p) && !pt->pt_pgrp)
   1126       1.61       dsl 			continue;
   1127       1.61       dsl 		db_printf("  id %x: ", id);
   1128       1.61       dsl 		if (P_VALID(p))
   1129  1.139.2.5      yamt 			db_printf("slotpid %d proc %p id %d (0x%x) %s\n",
   1130  1.139.2.5      yamt 				pt->pt_pid, p, p->p_pid, p->p_pid, p->p_comm);
   1131       1.61       dsl 		else
   1132       1.61       dsl 			db_printf("next %x use %x\n",
   1133       1.61       dsl 				P_NEXT(p) & pid_tbl_mask,
   1134       1.61       dsl 				P_NEXT(p) & ~pid_tbl_mask);
   1135       1.61       dsl 		if ((pgrp = pt->pt_pgrp)) {
   1136       1.61       dsl 			db_printf("\tsession %p, sid %d, count %d, login %s\n",
   1137       1.61       dsl 			    pgrp->pg_session, pgrp->pg_session->s_sid,
   1138       1.61       dsl 			    pgrp->pg_session->s_count,
   1139       1.61       dsl 			    pgrp->pg_session->s_login);
   1140       1.61       dsl 			db_printf("\tpgrp %p, pg_id %d, pg_jobc %d, members %p\n",
   1141       1.61       dsl 			    pgrp, pgrp->pg_id, pgrp->pg_jobc,
   1142      1.135      yamt 			    LIST_FIRST(&pgrp->pg_members));
   1143      1.135      yamt 			LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
   1144       1.72  junyoung 				db_printf("\t\tpid %d addr %p pgrp %p %s\n",
   1145       1.61       dsl 				    p->p_pid, p, p->p_pgrp, p->p_comm);
   1146       1.10   mycroft 			}
   1147        1.1       cgd 		}
   1148        1.1       cgd 	}
   1149        1.1       cgd }
   1150       1.61       dsl #endif /* DDB */
   1151       1.48      yamt 
   1152       1.48      yamt #ifdef KSTACK_CHECK_MAGIC
   1153       1.48      yamt 
   1154       1.48      yamt #define	KSTACK_MAGIC	0xdeadbeaf
   1155       1.48      yamt 
   1156       1.48      yamt /* XXX should be per process basis? */
   1157  1.139.2.2      yamt static int	kstackleftmin = KSTACK_SIZE;
   1158  1.139.2.2      yamt static int	kstackleftthres = KSTACK_SIZE / 8;
   1159       1.48      yamt 
   1160       1.48      yamt void
   1161       1.56      yamt kstack_setup_magic(const struct lwp *l)
   1162       1.48      yamt {
   1163       1.85     perry 	uint32_t *ip;
   1164       1.85     perry 	uint32_t const *end;
   1165       1.48      yamt 
   1166       1.56      yamt 	KASSERT(l != NULL);
   1167       1.56      yamt 	KASSERT(l != &lwp0);
   1168       1.48      yamt 
   1169       1.48      yamt 	/*
   1170       1.48      yamt 	 * fill all the stack with magic number
   1171       1.48      yamt 	 * so that later modification on it can be detected.
   1172       1.48      yamt 	 */
   1173       1.85     perry 	ip = (uint32_t *)KSTACK_LOWEST_ADDR(l);
   1174      1.114    dyoung 	end = (uint32_t *)((char *)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE);
   1175       1.48      yamt 	for (; ip < end; ip++) {
   1176       1.48      yamt 		*ip = KSTACK_MAGIC;
   1177       1.48      yamt 	}
   1178       1.48      yamt }
   1179       1.48      yamt 
   1180       1.48      yamt void
   1181       1.56      yamt kstack_check_magic(const struct lwp *l)
   1182       1.48      yamt {
   1183       1.85     perry 	uint32_t const *ip, *end;
   1184       1.48      yamt 	int stackleft;
   1185       1.48      yamt 
   1186       1.56      yamt 	KASSERT(l != NULL);
   1187       1.48      yamt 
   1188       1.48      yamt 	/* don't check proc0 */ /*XXX*/
   1189       1.56      yamt 	if (l == &lwp0)
   1190       1.48      yamt 		return;
   1191       1.48      yamt 
   1192       1.48      yamt #ifdef __MACHINE_STACK_GROWS_UP
   1193       1.48      yamt 	/* stack grows upwards (eg. hppa) */
   1194      1.106  christos 	ip = (uint32_t *)((void *)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE);
   1195       1.85     perry 	end = (uint32_t *)KSTACK_LOWEST_ADDR(l);
   1196       1.48      yamt 	for (ip--; ip >= end; ip--)
   1197       1.48      yamt 		if (*ip != KSTACK_MAGIC)
   1198       1.48      yamt 			break;
   1199       1.72  junyoung 
   1200      1.106  christos 	stackleft = (void *)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE - (void *)ip;
   1201       1.48      yamt #else /* __MACHINE_STACK_GROWS_UP */
   1202       1.48      yamt 	/* stack grows downwards (eg. i386) */
   1203       1.85     perry 	ip = (uint32_t *)KSTACK_LOWEST_ADDR(l);
   1204      1.114    dyoung 	end = (uint32_t *)((char *)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE);
   1205       1.48      yamt 	for (; ip < end; ip++)
   1206       1.48      yamt 		if (*ip != KSTACK_MAGIC)
   1207       1.48      yamt 			break;
   1208       1.48      yamt 
   1209       1.93  christos 	stackleft = ((const char *)ip) - (const char *)KSTACK_LOWEST_ADDR(l);
   1210       1.48      yamt #endif /* __MACHINE_STACK_GROWS_UP */
   1211       1.48      yamt 
   1212       1.48      yamt 	if (kstackleftmin > stackleft) {
   1213       1.48      yamt 		kstackleftmin = stackleft;
   1214       1.48      yamt 		if (stackleft < kstackleftthres)
   1215       1.56      yamt 			printf("warning: kernel stack left %d bytes"
   1216       1.56      yamt 			    "(pid %u:lid %u)\n", stackleft,
   1217       1.56      yamt 			    (u_int)l->l_proc->p_pid, (u_int)l->l_lid);
   1218       1.48      yamt 	}
   1219       1.48      yamt 
   1220       1.48      yamt 	if (stackleft <= 0) {
   1221       1.56      yamt 		panic("magic on the top of kernel stack changed for "
   1222       1.56      yamt 		    "pid %u, lid %u: maybe kernel stack overflow",
   1223       1.56      yamt 		    (u_int)l->l_proc->p_pid, (u_int)l->l_lid);
   1224       1.48      yamt 	}
   1225       1.48      yamt }
   1226       1.50     enami #endif /* KSTACK_CHECK_MAGIC */
   1227       1.79      yamt 
   1228       1.79      yamt int
   1229       1.79      yamt proclist_foreach_call(struct proclist *list,
   1230       1.79      yamt     int (*callback)(struct proc *, void *arg), void *arg)
   1231       1.79      yamt {
   1232       1.79      yamt 	struct proc marker;
   1233       1.79      yamt 	struct proc *p;
   1234       1.79      yamt 	int ret = 0;
   1235       1.79      yamt 
   1236      1.102     pavel 	marker.p_flag = PK_MARKER;
   1237      1.136        ad 	mutex_enter(proc_lock);
   1238       1.79      yamt 	for (p = LIST_FIRST(list); ret == 0 && p != NULL;) {
   1239      1.102     pavel 		if (p->p_flag & PK_MARKER) {
   1240       1.79      yamt 			p = LIST_NEXT(p, p_list);
   1241       1.79      yamt 			continue;
   1242       1.79      yamt 		}
   1243       1.79      yamt 		LIST_INSERT_AFTER(p, &marker, p_list);
   1244       1.79      yamt 		ret = (*callback)(p, arg);
   1245      1.136        ad 		KASSERT(mutex_owned(proc_lock));
   1246       1.79      yamt 		p = LIST_NEXT(&marker, p_list);
   1247       1.79      yamt 		LIST_REMOVE(&marker, p_list);
   1248       1.79      yamt 	}
   1249      1.136        ad 	mutex_exit(proc_lock);
   1250       1.79      yamt 
   1251       1.79      yamt 	return ret;
   1252       1.79      yamt }
   1253       1.86      yamt 
   1254       1.86      yamt int
   1255       1.86      yamt proc_vmspace_getref(struct proc *p, struct vmspace **vm)
   1256       1.86      yamt {
   1257       1.86      yamt 
   1258       1.86      yamt 	/* XXXCDC: how should locking work here? */
   1259       1.86      yamt 
   1260       1.87      yamt 	/* curproc exception is for coredump. */
   1261       1.87      yamt 
   1262      1.100        ad 	if ((p != curproc && (p->p_sflag & PS_WEXIT) != 0) ||
   1263       1.86      yamt 	    (p->p_vmspace->vm_refcnt < 1)) { /* XXX */
   1264       1.86      yamt 		return EFAULT;
   1265       1.86      yamt 	}
   1266       1.86      yamt 
   1267       1.86      yamt 	uvmspace_addref(p->p_vmspace);
   1268       1.86      yamt 	*vm = p->p_vmspace;
   1269       1.86      yamt 
   1270       1.86      yamt 	return 0;
   1271       1.86      yamt }
   1272       1.94        ad 
   1273       1.94        ad /*
   1274       1.94        ad  * Acquire a write lock on the process credential.
   1275       1.94        ad  */
   1276       1.94        ad void
   1277      1.100        ad proc_crmod_enter(void)
   1278       1.94        ad {
   1279      1.100        ad 	struct lwp *l = curlwp;
   1280      1.100        ad 	struct proc *p = l->l_proc;
   1281      1.100        ad 	struct plimit *lim;
   1282      1.100        ad 	kauth_cred_t oc;
   1283      1.100        ad 	char *cn;
   1284       1.94        ad 
   1285      1.117       dsl 	/* Reset what needs to be reset in plimit. */
   1286      1.117       dsl 	if (p->p_limit->pl_corename != defcorename) {
   1287      1.117       dsl 		lim_privatise(p, false);
   1288      1.117       dsl 		lim = p->p_limit;
   1289      1.117       dsl 		mutex_enter(&lim->pl_lock);
   1290      1.117       dsl 		cn = lim->pl_corename;
   1291      1.117       dsl 		lim->pl_corename = defcorename;
   1292      1.117       dsl 		mutex_exit(&lim->pl_lock);
   1293      1.117       dsl 		if (cn != defcorename)
   1294      1.117       dsl 			free(cn, M_TEMP);
   1295      1.117       dsl 	}
   1296      1.117       dsl 
   1297      1.137        ad 	mutex_enter(p->p_lock);
   1298      1.100        ad 
   1299      1.100        ad 	/* Ensure the LWP cached credentials are up to date. */
   1300      1.100        ad 	if ((oc = l->l_cred) != p->p_cred) {
   1301      1.100        ad 		kauth_cred_hold(p->p_cred);
   1302      1.100        ad 		l->l_cred = p->p_cred;
   1303      1.100        ad 		kauth_cred_free(oc);
   1304      1.100        ad 	}
   1305      1.100        ad 
   1306       1.94        ad }
   1307       1.94        ad 
   1308       1.94        ad /*
   1309      1.100        ad  * Set in a new process credential, and drop the write lock.  The credential
   1310      1.100        ad  * must have a reference already.  Optionally, free a no-longer required
   1311      1.100        ad  * credential.  The scheduler also needs to inspect p_cred, so we also
   1312      1.100        ad  * briefly acquire the sched state mutex.
   1313       1.94        ad  */
   1314       1.94        ad void
   1315      1.104   thorpej proc_crmod_leave(kauth_cred_t scred, kauth_cred_t fcred, bool sugid)
   1316       1.94        ad {
   1317      1.133        ad 	struct lwp *l = curlwp, *l2;
   1318      1.100        ad 	struct proc *p = l->l_proc;
   1319      1.100        ad 	kauth_cred_t oc;
   1320      1.100        ad 
   1321      1.137        ad 	KASSERT(mutex_owned(p->p_lock));
   1322      1.137        ad 
   1323      1.100        ad 	/* Is there a new credential to set in? */
   1324      1.100        ad 	if (scred != NULL) {
   1325      1.100        ad 		p->p_cred = scred;
   1326      1.133        ad 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
   1327      1.133        ad 			if (l2 != l)
   1328      1.133        ad 				l2->l_prflag |= LPR_CRMOD;
   1329      1.133        ad 		}
   1330      1.100        ad 
   1331      1.100        ad 		/* Ensure the LWP cached credentials are up to date. */
   1332      1.100        ad 		if ((oc = l->l_cred) != scred) {
   1333      1.100        ad 			kauth_cred_hold(scred);
   1334      1.100        ad 			l->l_cred = scred;
   1335      1.100        ad 		}
   1336      1.100        ad 	} else
   1337      1.100        ad 		oc = NULL;	/* XXXgcc */
   1338      1.100        ad 
   1339      1.100        ad 	if (sugid) {
   1340      1.100        ad 		/*
   1341      1.100        ad 		 * Mark process as having changed credentials, stops
   1342      1.100        ad 		 * tracing etc.
   1343      1.100        ad 		 */
   1344      1.102     pavel 		p->p_flag |= PK_SUGID;
   1345      1.100        ad 	}
   1346       1.94        ad 
   1347      1.137        ad 	mutex_exit(p->p_lock);
   1348      1.100        ad 
   1349      1.100        ad 	/* If there is a credential to be released, free it now. */
   1350      1.100        ad 	if (fcred != NULL) {
   1351      1.100        ad 		KASSERT(scred != NULL);
   1352       1.94        ad 		kauth_cred_free(fcred);
   1353      1.100        ad 		if (oc != scred)
   1354      1.100        ad 			kauth_cred_free(oc);
   1355      1.100        ad 	}
   1356      1.100        ad }
   1357      1.100        ad 
   1358      1.100        ad /*
   1359       1.95   thorpej  * proc_specific_key_create --
   1360       1.95   thorpej  *	Create a key for subsystem proc-specific data.
   1361       1.95   thorpej  */
   1362       1.95   thorpej int
   1363       1.95   thorpej proc_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
   1364       1.95   thorpej {
   1365       1.95   thorpej 
   1366       1.98   thorpej 	return (specificdata_key_create(proc_specificdata_domain, keyp, dtor));
   1367       1.95   thorpej }
   1368       1.95   thorpej 
   1369       1.95   thorpej /*
   1370       1.95   thorpej  * proc_specific_key_delete --
   1371       1.95   thorpej  *	Delete a key for subsystem proc-specific data.
   1372       1.95   thorpej  */
   1373       1.95   thorpej void
   1374       1.95   thorpej proc_specific_key_delete(specificdata_key_t key)
   1375       1.95   thorpej {
   1376       1.95   thorpej 
   1377       1.95   thorpej 	specificdata_key_delete(proc_specificdata_domain, key);
   1378       1.95   thorpej }
   1379       1.95   thorpej 
   1380       1.98   thorpej /*
   1381       1.98   thorpej  * proc_initspecific --
   1382       1.98   thorpej  *	Initialize a proc's specificdata container.
   1383       1.98   thorpej  */
   1384       1.96  christos void
   1385       1.96  christos proc_initspecific(struct proc *p)
   1386       1.96  christos {
   1387       1.96  christos 	int error;
   1388       1.98   thorpej 
   1389       1.96  christos 	error = specificdata_init(proc_specificdata_domain, &p->p_specdataref);
   1390       1.96  christos 	KASSERT(error == 0);
   1391       1.96  christos }
   1392       1.96  christos 
   1393       1.95   thorpej /*
   1394       1.98   thorpej  * proc_finispecific --
   1395       1.98   thorpej  *	Finalize a proc's specificdata container.
   1396       1.98   thorpej  */
   1397       1.98   thorpej void
   1398       1.98   thorpej proc_finispecific(struct proc *p)
   1399       1.98   thorpej {
   1400       1.98   thorpej 
   1401       1.98   thorpej 	specificdata_fini(proc_specificdata_domain, &p->p_specdataref);
   1402       1.98   thorpej }
   1403       1.98   thorpej 
   1404       1.98   thorpej /*
   1405       1.95   thorpej  * proc_getspecific --
   1406       1.95   thorpej  *	Return proc-specific data corresponding to the specified key.
   1407       1.95   thorpej  */
   1408       1.95   thorpej void *
   1409       1.95   thorpej proc_getspecific(struct proc *p, specificdata_key_t key)
   1410       1.95   thorpej {
   1411       1.95   thorpej 
   1412       1.95   thorpej 	return (specificdata_getspecific(proc_specificdata_domain,
   1413       1.95   thorpej 					 &p->p_specdataref, key));
   1414       1.95   thorpej }
   1415       1.95   thorpej 
   1416       1.95   thorpej /*
   1417       1.95   thorpej  * proc_setspecific --
   1418       1.95   thorpej  *	Set proc-specific data corresponding to the specified key.
   1419       1.95   thorpej  */
   1420       1.95   thorpej void
   1421       1.95   thorpej proc_setspecific(struct proc *p, specificdata_key_t key, void *data)
   1422       1.95   thorpej {
   1423       1.95   thorpej 
   1424       1.95   thorpej 	specificdata_setspecific(proc_specificdata_domain,
   1425       1.95   thorpej 				 &p->p_specdataref, key, data);
   1426       1.95   thorpej }
   1427  1.139.2.4      yamt 
   1428  1.139.2.4      yamt int
   1429  1.139.2.4      yamt proc_uidmatch(kauth_cred_t cred, kauth_cred_t target)
   1430  1.139.2.4      yamt {
   1431  1.139.2.4      yamt 	int r = 0;
   1432  1.139.2.4      yamt 
   1433  1.139.2.4      yamt 	if (kauth_cred_getuid(cred) != kauth_cred_getuid(target) ||
   1434  1.139.2.4      yamt 	    kauth_cred_getuid(cred) != kauth_cred_getsvuid(target)) {
   1435  1.139.2.4      yamt 		/*
   1436  1.139.2.4      yamt 		 * suid proc of ours or proc not ours
   1437  1.139.2.4      yamt 		 */
   1438  1.139.2.4      yamt 		r = EPERM;
   1439  1.139.2.4      yamt 	} else if (kauth_cred_getgid(target) != kauth_cred_getsvgid(target)) {
   1440  1.139.2.4      yamt 		/*
   1441  1.139.2.4      yamt 		 * sgid proc has sgid back to us temporarily
   1442  1.139.2.4      yamt 		 */
   1443  1.139.2.4      yamt 		r = EPERM;
   1444  1.139.2.4      yamt 	} else {
   1445  1.139.2.4      yamt 		/*
   1446  1.139.2.4      yamt 		 * our rgid must be in target's group list (ie,
   1447  1.139.2.4      yamt 		 * sub-processes started by a sgid process)
   1448  1.139.2.4      yamt 		 */
   1449  1.139.2.4      yamt 		int ismember = 0;
   1450  1.139.2.4      yamt 
   1451  1.139.2.4      yamt 		if (kauth_cred_ismember_gid(cred,
   1452  1.139.2.4      yamt 		    kauth_cred_getgid(target), &ismember) != 0 ||
   1453  1.139.2.4      yamt 		    !ismember)
   1454  1.139.2.4      yamt 			r = EPERM;
   1455  1.139.2.4      yamt 	}
   1456  1.139.2.4      yamt 
   1457  1.139.2.4      yamt 	return (r);
   1458  1.139.2.4      yamt }
   1459