kern_proc.c revision 1.75 1 1.75 cl /* $NetBSD: kern_proc.c,v 1.75 2004/03/14 01:08:47 cl Exp $ */
2 1.33 thorpej
3 1.33 thorpej /*-
4 1.33 thorpej * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 1.33 thorpej * All rights reserved.
6 1.33 thorpej *
7 1.33 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.33 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.33 thorpej * NASA Ames Research Center.
10 1.33 thorpej *
11 1.33 thorpej * Redistribution and use in source and binary forms, with or without
12 1.33 thorpej * modification, are permitted provided that the following conditions
13 1.33 thorpej * are met:
14 1.33 thorpej * 1. Redistributions of source code must retain the above copyright
15 1.33 thorpej * notice, this list of conditions and the following disclaimer.
16 1.33 thorpej * 2. Redistributions in binary form must reproduce the above copyright
17 1.33 thorpej * notice, this list of conditions and the following disclaimer in the
18 1.33 thorpej * documentation and/or other materials provided with the distribution.
19 1.33 thorpej * 3. All advertising materials mentioning features or use of this software
20 1.33 thorpej * must display the following acknowledgement:
21 1.33 thorpej * This product includes software developed by the NetBSD
22 1.33 thorpej * Foundation, Inc. and its contributors.
23 1.33 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.33 thorpej * contributors may be used to endorse or promote products derived
25 1.33 thorpej * from this software without specific prior written permission.
26 1.33 thorpej *
27 1.33 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.33 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.33 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.33 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.33 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.33 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.33 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.33 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.33 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.33 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.33 thorpej * POSSIBILITY OF SUCH DAMAGE.
38 1.33 thorpej */
39 1.9 cgd
40 1.1 cgd /*
41 1.7 cgd * Copyright (c) 1982, 1986, 1989, 1991, 1993
42 1.7 cgd * The Regents of the University of California. All rights reserved.
43 1.1 cgd *
44 1.1 cgd * Redistribution and use in source and binary forms, with or without
45 1.1 cgd * modification, are permitted provided that the following conditions
46 1.1 cgd * are met:
47 1.1 cgd * 1. Redistributions of source code must retain the above copyright
48 1.1 cgd * notice, this list of conditions and the following disclaimer.
49 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
50 1.1 cgd * notice, this list of conditions and the following disclaimer in the
51 1.1 cgd * documentation and/or other materials provided with the distribution.
52 1.65 agc * 3. Neither the name of the University nor the names of its contributors
53 1.1 cgd * may be used to endorse or promote products derived from this software
54 1.1 cgd * without specific prior written permission.
55 1.1 cgd *
56 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 1.1 cgd * SUCH DAMAGE.
67 1.1 cgd *
68 1.23 fvdl * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95
69 1.1 cgd */
70 1.45 lukem
71 1.45 lukem #include <sys/cdefs.h>
72 1.75 cl __KERNEL_RCSID(0, "$NetBSD: kern_proc.c,v 1.75 2004/03/14 01:08:47 cl Exp $");
73 1.48 yamt
74 1.48 yamt #include "opt_kstack.h"
75 1.1 cgd
76 1.5 mycroft #include <sys/param.h>
77 1.5 mycroft #include <sys/systm.h>
78 1.5 mycroft #include <sys/kernel.h>
79 1.5 mycroft #include <sys/proc.h>
80 1.28 thorpej #include <sys/resourcevar.h>
81 1.5 mycroft #include <sys/buf.h>
82 1.5 mycroft #include <sys/acct.h>
83 1.5 mycroft #include <sys/wait.h>
84 1.5 mycroft #include <sys/file.h>
85 1.8 mycroft #include <ufs/ufs/quota.h>
86 1.5 mycroft #include <sys/uio.h>
87 1.5 mycroft #include <sys/malloc.h>
88 1.24 thorpej #include <sys/pool.h>
89 1.5 mycroft #include <sys/mbuf.h>
90 1.5 mycroft #include <sys/ioctl.h>
91 1.5 mycroft #include <sys/tty.h>
92 1.11 cgd #include <sys/signalvar.h>
93 1.51 gmcgarry #include <sys/ras.h>
94 1.55 thorpej #include <sys/sa.h>
95 1.55 thorpej #include <sys/savar.h>
96 1.5 mycroft
97 1.7 cgd /*
98 1.74 junyoung * Structure associated with user caching.
99 1.7 cgd */
100 1.7 cgd struct uidinfo {
101 1.10 mycroft LIST_ENTRY(uidinfo) ui_hash;
102 1.7 cgd uid_t ui_uid;
103 1.7 cgd long ui_proccnt;
104 1.10 mycroft };
105 1.10 mycroft #define UIHASH(uid) (&uihashtbl[(uid) & uihash])
106 1.10 mycroft LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
107 1.10 mycroft u_long uihash; /* size of hash table - 1 */
108 1.7 cgd
109 1.7 cgd /*
110 1.10 mycroft * Other process lists
111 1.7 cgd */
112 1.31 thorpej
113 1.10 mycroft struct proclist allproc;
114 1.32 thorpej struct proclist zombproc; /* resources have been freed */
115 1.32 thorpej
116 1.55 thorpej
117 1.32 thorpej /*
118 1.33 thorpej * Process list locking:
119 1.33 thorpej *
120 1.33 thorpej * We have two types of locks on the proclists: read locks and write
121 1.33 thorpej * locks. Read locks can be used in interrupt context, so while we
122 1.38 thorpej * hold the write lock, we must also block clock interrupts to
123 1.37 thorpej * lock out any scheduling changes that may happen in interrupt
124 1.37 thorpej * context.
125 1.33 thorpej *
126 1.33 thorpej * The proclist lock locks the following structures:
127 1.33 thorpej *
128 1.33 thorpej * allproc
129 1.33 thorpej * zombproc
130 1.61 dsl * pid_table
131 1.33 thorpej */
132 1.33 thorpej struct lock proclist_lock;
133 1.33 thorpej
134 1.33 thorpej /*
135 1.72 junyoung * pid to proc lookup is done by indexing the pid_table array.
136 1.61 dsl * Since pid numbers are only allocated when an empty slot
137 1.61 dsl * has been found, there is no need to search any lists ever.
138 1.61 dsl * (an orphaned pgrp will lock the slot, a session will lock
139 1.61 dsl * the pgrp with the same number.)
140 1.61 dsl * If the table is too small it is reallocated with twice the
141 1.61 dsl * previous size and the entries 'unzipped' into the two halves.
142 1.61 dsl * A linked list of free entries is passed through the pt_proc
143 1.61 dsl * field of 'free' items - set odd to be an invalid ptr.
144 1.61 dsl */
145 1.61 dsl
146 1.61 dsl struct pid_table {
147 1.61 dsl struct proc *pt_proc;
148 1.61 dsl struct pgrp *pt_pgrp;
149 1.72 junyoung };
150 1.61 dsl #if 1 /* strongly typed cast - should be a noop */
151 1.72 junyoung static __inline uint p2u(struct proc *p) { return (uint)(uintptr_t)p; }
152 1.61 dsl #else
153 1.61 dsl #define p2u(p) ((uint)p)
154 1.72 junyoung #endif
155 1.61 dsl #define P_VALID(p) (!(p2u(p) & 1))
156 1.61 dsl #define P_NEXT(p) (p2u(p) >> 1)
157 1.61 dsl #define P_FREE(pid) ((struct proc *)(uintptr_t)((pid) << 1 | 1))
158 1.61 dsl
159 1.61 dsl #define INITIAL_PID_TABLE_SIZE (1 << 5)
160 1.61 dsl static struct pid_table *pid_table;
161 1.61 dsl static uint pid_tbl_mask = INITIAL_PID_TABLE_SIZE - 1;
162 1.61 dsl static uint pid_alloc_lim; /* max we allocate before growing table */
163 1.61 dsl static uint pid_alloc_cnt; /* number of allocated pids */
164 1.61 dsl
165 1.61 dsl /* links through free slots - never empty! */
166 1.61 dsl static uint next_free_pt, last_free_pt;
167 1.61 dsl static pid_t pid_max = PID_MAX; /* largest value we allocate */
168 1.31 thorpej
169 1.24 thorpej struct pool proc_pool;
170 1.55 thorpej struct pool lwp_pool;
171 1.55 thorpej struct pool lwp_uc_pool;
172 1.28 thorpej struct pool pcred_pool;
173 1.28 thorpej struct pool plimit_pool;
174 1.55 thorpej struct pool pstats_pool;
175 1.29 thorpej struct pool pgrp_pool;
176 1.30 thorpej struct pool rusage_pool;
177 1.51 gmcgarry struct pool ras_pool;
178 1.55 thorpej struct pool sadata_pool;
179 1.55 thorpej struct pool saupcall_pool;
180 1.69 cl struct pool sastack_pool;
181 1.75 cl struct pool savp_pool;
182 1.55 thorpej struct pool ptimer_pool;
183 1.57 thorpej
184 1.57 thorpej MALLOC_DEFINE(M_EMULDATA, "emuldata", "Per-process emulation data");
185 1.57 thorpej MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
186 1.57 thorpej MALLOC_DEFINE(M_SESSION, "session", "session header");
187 1.57 thorpej MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
188 1.10 mycroft
189 1.31 thorpej /*
190 1.31 thorpej * The process list descriptors, used during pid allocation and
191 1.31 thorpej * by sysctl. No locking on this data structure is needed since
192 1.31 thorpej * it is completely static.
193 1.31 thorpej */
194 1.31 thorpej const struct proclist_desc proclists[] = {
195 1.31 thorpej { &allproc },
196 1.31 thorpej { &zombproc },
197 1.31 thorpej { NULL },
198 1.31 thorpej };
199 1.31 thorpej
200 1.72 junyoung static void orphanpg(struct pgrp *);
201 1.72 junyoung static void pg_delete(pid_t);
202 1.13 christos
203 1.10 mycroft /*
204 1.10 mycroft * Initialize global process hashing structures.
205 1.10 mycroft */
206 1.11 cgd void
207 1.59 dsl procinit(void)
208 1.7 cgd {
209 1.31 thorpej const struct proclist_desc *pd;
210 1.61 dsl int i;
211 1.61 dsl #define LINK_EMPTY ((PID_MAX + INITIAL_PID_TABLE_SIZE) & ~(INITIAL_PID_TABLE_SIZE - 1))
212 1.31 thorpej
213 1.31 thorpej for (pd = proclists; pd->pd_list != NULL; pd++)
214 1.31 thorpej LIST_INIT(pd->pd_list);
215 1.7 cgd
216 1.34 thorpej spinlockinit(&proclist_lock, "proclk", 0);
217 1.33 thorpej
218 1.61 dsl pid_table = malloc(INITIAL_PID_TABLE_SIZE * sizeof *pid_table,
219 1.61 dsl M_PROC, M_WAITOK);
220 1.61 dsl /* Set free list running through table...
221 1.61 dsl Preset 'use count' above PID_MAX so we allocate pid 1 next. */
222 1.61 dsl for (i = 0; i <= pid_tbl_mask; i++) {
223 1.61 dsl pid_table[i].pt_proc = P_FREE(LINK_EMPTY + i + 1);
224 1.61 dsl pid_table[i].pt_pgrp = 0;
225 1.61 dsl }
226 1.61 dsl /* slot 0 is just grabbed */
227 1.61 dsl next_free_pt = 1;
228 1.61 dsl /* Need to fix last entry. */
229 1.61 dsl last_free_pt = pid_tbl_mask;
230 1.61 dsl pid_table[last_free_pt].pt_proc = P_FREE(LINK_EMPTY);
231 1.61 dsl /* point at which we grow table - to avoid reusing pids too often */
232 1.61 dsl pid_alloc_lim = pid_tbl_mask - 1;
233 1.61 dsl #undef LINK_EMPTY
234 1.61 dsl
235 1.55 thorpej LIST_INIT(&alllwp);
236 1.55 thorpej
237 1.43 ad uihashtbl =
238 1.43 ad hashinit(maxproc / 16, HASH_LIST, M_PROC, M_WAITOK, &uihash);
239 1.31 thorpej
240 1.24 thorpej pool_init(&proc_pool, sizeof(struct proc), 0, 0, 0, "procpl",
241 1.46 thorpej &pool_allocator_nointr);
242 1.55 thorpej pool_init(&lwp_pool, sizeof(struct lwp), 0, 0, 0, "lwppl",
243 1.55 thorpej &pool_allocator_nointr);
244 1.55 thorpej pool_init(&lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
245 1.55 thorpej &pool_allocator_nointr);
246 1.29 thorpej pool_init(&pgrp_pool, sizeof(struct pgrp), 0, 0, 0, "pgrppl",
247 1.46 thorpej &pool_allocator_nointr);
248 1.28 thorpej pool_init(&pcred_pool, sizeof(struct pcred), 0, 0, 0, "pcredpl",
249 1.46 thorpej &pool_allocator_nointr);
250 1.28 thorpej pool_init(&plimit_pool, sizeof(struct plimit), 0, 0, 0, "plimitpl",
251 1.46 thorpej &pool_allocator_nointr);
252 1.55 thorpej pool_init(&pstats_pool, sizeof(struct pstats), 0, 0, 0, "pstatspl",
253 1.55 thorpej &pool_allocator_nointr);
254 1.30 thorpej pool_init(&rusage_pool, sizeof(struct rusage), 0, 0, 0, "rusgepl",
255 1.51 gmcgarry &pool_allocator_nointr);
256 1.51 gmcgarry pool_init(&ras_pool, sizeof(struct ras), 0, 0, 0, "raspl",
257 1.46 thorpej &pool_allocator_nointr);
258 1.55 thorpej pool_init(&sadata_pool, sizeof(struct sadata), 0, 0, 0, "sadatapl",
259 1.55 thorpej &pool_allocator_nointr);
260 1.72 junyoung pool_init(&saupcall_pool, sizeof(struct sadata_upcall), 0, 0, 0,
261 1.69 cl "saupcpl", &pool_allocator_nointr);
262 1.69 cl pool_init(&sastack_pool, sizeof(struct sastack), 0, 0, 0, "sastackpl",
263 1.55 thorpej &pool_allocator_nointr);
264 1.75 cl pool_init(&savp_pool, sizeof(struct sadata_vp), 0, 0, 0, "savppl",
265 1.75 cl &pool_allocator_nointr);
266 1.55 thorpej pool_init(&ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl",
267 1.55 thorpej &pool_allocator_nointr);
268 1.7 cgd }
269 1.1 cgd
270 1.7 cgd /*
271 1.33 thorpej * Acquire a read lock on the proclist.
272 1.33 thorpej */
273 1.33 thorpej void
274 1.59 dsl proclist_lock_read(void)
275 1.33 thorpej {
276 1.42 thorpej int error;
277 1.33 thorpej
278 1.34 thorpej error = spinlockmgr(&proclist_lock, LK_SHARED, NULL);
279 1.33 thorpej #ifdef DIAGNOSTIC
280 1.40 thorpej if (__predict_false(error != 0))
281 1.34 thorpej panic("proclist_lock_read: failed to acquire lock");
282 1.33 thorpej #endif
283 1.33 thorpej }
284 1.33 thorpej
285 1.33 thorpej /*
286 1.33 thorpej * Release a read lock on the proclist.
287 1.33 thorpej */
288 1.33 thorpej void
289 1.59 dsl proclist_unlock_read(void)
290 1.33 thorpej {
291 1.33 thorpej
292 1.34 thorpej (void) spinlockmgr(&proclist_lock, LK_RELEASE, NULL);
293 1.33 thorpej }
294 1.33 thorpej
295 1.33 thorpej /*
296 1.33 thorpej * Acquire a write lock on the proclist.
297 1.33 thorpej */
298 1.33 thorpej int
299 1.59 dsl proclist_lock_write(void)
300 1.33 thorpej {
301 1.42 thorpej int s, error;
302 1.33 thorpej
303 1.38 thorpej s = splclock();
304 1.34 thorpej error = spinlockmgr(&proclist_lock, LK_EXCLUSIVE, NULL);
305 1.33 thorpej #ifdef DIAGNOSTIC
306 1.40 thorpej if (__predict_false(error != 0))
307 1.33 thorpej panic("proclist_lock: failed to acquire lock");
308 1.33 thorpej #endif
309 1.33 thorpej return (s);
310 1.33 thorpej }
311 1.33 thorpej
312 1.33 thorpej /*
313 1.33 thorpej * Release a write lock on the proclist.
314 1.33 thorpej */
315 1.33 thorpej void
316 1.59 dsl proclist_unlock_write(int s)
317 1.33 thorpej {
318 1.33 thorpej
319 1.34 thorpej (void) spinlockmgr(&proclist_lock, LK_RELEASE, NULL);
320 1.33 thorpej splx(s);
321 1.33 thorpej }
322 1.33 thorpej
323 1.33 thorpej /*
324 1.7 cgd * Change the count associated with number of processes
325 1.7 cgd * a given user is using.
326 1.7 cgd */
327 1.7 cgd int
328 1.59 dsl chgproccnt(uid_t uid, int diff)
329 1.7 cgd {
330 1.39 augustss struct uidinfo *uip;
331 1.39 augustss struct uihashhead *uipp;
332 1.7 cgd
333 1.10 mycroft uipp = UIHASH(uid);
334 1.52 matt
335 1.52 matt LIST_FOREACH(uip, uipp, ui_hash)
336 1.7 cgd if (uip->ui_uid == uid)
337 1.7 cgd break;
338 1.52 matt
339 1.7 cgd if (uip) {
340 1.7 cgd uip->ui_proccnt += diff;
341 1.7 cgd if (uip->ui_proccnt > 0)
342 1.7 cgd return (uip->ui_proccnt);
343 1.7 cgd if (uip->ui_proccnt < 0)
344 1.7 cgd panic("chgproccnt: procs < 0");
345 1.10 mycroft LIST_REMOVE(uip, ui_hash);
346 1.7 cgd FREE(uip, M_PROC);
347 1.7 cgd return (0);
348 1.7 cgd }
349 1.7 cgd if (diff <= 0) {
350 1.7 cgd if (diff == 0)
351 1.7 cgd return(0);
352 1.7 cgd panic("chgproccnt: lost user");
353 1.7 cgd }
354 1.7 cgd MALLOC(uip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK);
355 1.10 mycroft LIST_INSERT_HEAD(uipp, uip, ui_hash);
356 1.7 cgd uip->ui_uid = uid;
357 1.7 cgd uip->ui_proccnt = diff;
358 1.7 cgd return (diff);
359 1.60 dsl }
360 1.60 dsl
361 1.60 dsl /*
362 1.74 junyoung * Check that the specified process group is in the session of the
363 1.60 dsl * specified process.
364 1.60 dsl * Treats -ve ids as process ids.
365 1.60 dsl * Used to validate TIOCSPGRP requests.
366 1.60 dsl */
367 1.60 dsl int
368 1.60 dsl pgid_in_session(struct proc *p, pid_t pg_id)
369 1.60 dsl {
370 1.60 dsl struct pgrp *pgrp;
371 1.60 dsl
372 1.60 dsl if (pg_id < 0) {
373 1.60 dsl struct proc *p1 = pfind(-pg_id);
374 1.64 dsl if (p1 == NULL)
375 1.64 dsl return EINVAL;
376 1.60 dsl pgrp = p1->p_pgrp;
377 1.60 dsl } else {
378 1.60 dsl pgrp = pgfind(pg_id);
379 1.60 dsl if (pgrp == NULL)
380 1.64 dsl return EINVAL;
381 1.60 dsl }
382 1.60 dsl if (pgrp->pg_session != p->p_pgrp->pg_session)
383 1.60 dsl return EPERM;
384 1.60 dsl return 0;
385 1.7 cgd }
386 1.4 andrew
387 1.1 cgd /*
388 1.41 sommerfe * Is p an inferior of q?
389 1.1 cgd */
390 1.11 cgd int
391 1.59 dsl inferior(struct proc *p, struct proc *q)
392 1.1 cgd {
393 1.1 cgd
394 1.41 sommerfe for (; p != q; p = p->p_pptr)
395 1.1 cgd if (p->p_pid == 0)
396 1.1 cgd return (0);
397 1.1 cgd return (1);
398 1.1 cgd }
399 1.1 cgd
400 1.1 cgd /*
401 1.1 cgd * Locate a process by number
402 1.1 cgd */
403 1.1 cgd struct proc *
404 1.68 dsl p_find(pid_t pid, uint flags)
405 1.1 cgd {
406 1.33 thorpej struct proc *p;
407 1.68 dsl char stat;
408 1.1 cgd
409 1.68 dsl if (!(flags & PFIND_LOCKED))
410 1.68 dsl proclist_lock_read();
411 1.61 dsl p = pid_table[pid & pid_tbl_mask].pt_proc;
412 1.61 dsl /* Only allow live processes to be found by pid. */
413 1.68 dsl if (P_VALID(p) && p->p_pid == pid &&
414 1.68 dsl ((stat = p->p_stat) == SACTIVE || stat == SSTOP
415 1.68 dsl || (stat == SZOMB && (flags & PFIND_ZOMBIE)))) {
416 1.68 dsl if (flags & PFIND_UNLOCK_OK)
417 1.68 dsl proclist_unlock_read();
418 1.68 dsl return p;
419 1.68 dsl }
420 1.68 dsl if (flags & PFIND_UNLOCK_FAIL)
421 1.68 dsl proclist_unlock_read();
422 1.68 dsl return NULL;
423 1.1 cgd }
424 1.1 cgd
425 1.61 dsl
426 1.1 cgd /*
427 1.1 cgd * Locate a process group by number
428 1.1 cgd */
429 1.1 cgd struct pgrp *
430 1.68 dsl pg_find(pid_t pgid, uint flags)
431 1.1 cgd {
432 1.68 dsl struct pgrp *pg;
433 1.1 cgd
434 1.68 dsl if (!(flags & PFIND_LOCKED))
435 1.68 dsl proclist_lock_read();
436 1.68 dsl pg = pid_table[pgid & pid_tbl_mask].pt_pgrp;
437 1.61 dsl /*
438 1.61 dsl * Can't look up a pgrp that only exists because the session
439 1.61 dsl * hasn't died yet (traditional)
440 1.61 dsl */
441 1.68 dsl if (pg == NULL || pg->pg_id != pgid || LIST_EMPTY(&pg->pg_members)) {
442 1.68 dsl if (flags & PFIND_UNLOCK_FAIL)
443 1.68 dsl proclist_unlock_read();
444 1.68 dsl return NULL;
445 1.68 dsl }
446 1.68 dsl
447 1.68 dsl if (flags & PFIND_UNLOCK_OK)
448 1.68 dsl proclist_unlock_read();
449 1.68 dsl return pg;
450 1.1 cgd }
451 1.1 cgd
452 1.1 cgd /*
453 1.61 dsl * Set entry for process 0
454 1.1 cgd */
455 1.61 dsl void
456 1.61 dsl proc0_insert(struct proc *p, struct lwp *l, struct pgrp *pgrp,
457 1.61 dsl struct session *sess)
458 1.61 dsl {
459 1.61 dsl int s;
460 1.61 dsl
461 1.67 dsl simple_lock_init(&p->p_lock);
462 1.61 dsl LIST_INIT(&p->p_lwps);
463 1.61 dsl LIST_INSERT_HEAD(&p->p_lwps, l, l_sibling);
464 1.61 dsl p->p_nlwps = 1;
465 1.66 christos simple_lock_init(&p->p_sigctx.ps_silock);
466 1.66 christos CIRCLEQ_INIT(&p->p_sigctx.ps_siginfo);
467 1.61 dsl
468 1.61 dsl s = proclist_lock_write();
469 1.61 dsl
470 1.61 dsl pid_table[0].pt_proc = p;
471 1.61 dsl LIST_INSERT_HEAD(&allproc, p, p_list);
472 1.61 dsl LIST_INSERT_HEAD(&alllwp, l, l_list);
473 1.61 dsl
474 1.61 dsl p->p_pgrp = pgrp;
475 1.61 dsl pid_table[0].pt_pgrp = pgrp;
476 1.61 dsl LIST_INIT(&pgrp->pg_members);
477 1.61 dsl LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
478 1.61 dsl
479 1.61 dsl pgrp->pg_session = sess;
480 1.61 dsl sess->s_count = 1;
481 1.61 dsl sess->s_sid = 0;
482 1.61 dsl sess->s_leader = p;
483 1.61 dsl
484 1.61 dsl proclist_unlock_write(s);
485 1.61 dsl }
486 1.61 dsl
487 1.61 dsl static void
488 1.61 dsl expand_pid_table(void)
489 1.1 cgd {
490 1.61 dsl uint pt_size = pid_tbl_mask + 1;
491 1.61 dsl struct pid_table *n_pt, *new_pt;
492 1.61 dsl struct proc *proc;
493 1.61 dsl struct pgrp *pgrp;
494 1.61 dsl int i;
495 1.61 dsl int s;
496 1.61 dsl pid_t pid;
497 1.1 cgd
498 1.61 dsl new_pt = malloc(pt_size * 2 * sizeof *new_pt, M_PROC, M_WAITOK);
499 1.61 dsl
500 1.61 dsl s = proclist_lock_write();
501 1.61 dsl if (pt_size != pid_tbl_mask + 1) {
502 1.61 dsl /* Another process beat us to it... */
503 1.61 dsl proclist_unlock_write(s);
504 1.61 dsl FREE(new_pt, M_PROC);
505 1.61 dsl return;
506 1.61 dsl }
507 1.72 junyoung
508 1.61 dsl /*
509 1.61 dsl * Copy entries from old table into new one.
510 1.61 dsl * If 'pid' is 'odd' we need to place in the upper half,
511 1.61 dsl * even pid's to the lower half.
512 1.61 dsl * Free items stay in the low half so we don't have to
513 1.61 dsl * fixup the reference to them.
514 1.61 dsl * We stuff free items on the front of the freelist
515 1.61 dsl * because we can't write to unmodified entries.
516 1.74 junyoung * Processing the table backwards maintains a semblance
517 1.61 dsl * of issueing pid numbers that increase with time.
518 1.61 dsl */
519 1.61 dsl i = pt_size - 1;
520 1.61 dsl n_pt = new_pt + i;
521 1.61 dsl for (; ; i--, n_pt--) {
522 1.61 dsl proc = pid_table[i].pt_proc;
523 1.61 dsl pgrp = pid_table[i].pt_pgrp;
524 1.61 dsl if (!P_VALID(proc)) {
525 1.61 dsl /* Up 'use count' so that link is valid */
526 1.61 dsl pid = (P_NEXT(proc) + pt_size) & ~pt_size;
527 1.61 dsl proc = P_FREE(pid);
528 1.61 dsl if (pgrp)
529 1.61 dsl pid = pgrp->pg_id;
530 1.61 dsl } else
531 1.61 dsl pid = proc->p_pid;
532 1.72 junyoung
533 1.61 dsl /* Save entry in appropriate half of table */
534 1.61 dsl n_pt[pid & pt_size].pt_proc = proc;
535 1.61 dsl n_pt[pid & pt_size].pt_pgrp = pgrp;
536 1.61 dsl
537 1.61 dsl /* Put other piece on start of free list */
538 1.61 dsl pid = (pid ^ pt_size) & ~pid_tbl_mask;
539 1.61 dsl n_pt[pid & pt_size].pt_proc =
540 1.61 dsl P_FREE((pid & ~pt_size) | next_free_pt);
541 1.61 dsl n_pt[pid & pt_size].pt_pgrp = 0;
542 1.61 dsl next_free_pt = i | (pid & pt_size);
543 1.61 dsl if (i == 0)
544 1.61 dsl break;
545 1.61 dsl }
546 1.61 dsl
547 1.61 dsl /* Switch tables */
548 1.61 dsl n_pt = pid_table;
549 1.61 dsl pid_table = new_pt;
550 1.61 dsl pid_tbl_mask = pt_size * 2 - 1;
551 1.61 dsl
552 1.61 dsl /*
553 1.61 dsl * pid_max starts as PID_MAX (= 30000), once we have 16384
554 1.61 dsl * allocated pids we need it to be larger!
555 1.61 dsl */
556 1.61 dsl if (pid_tbl_mask > PID_MAX) {
557 1.61 dsl pid_max = pid_tbl_mask * 2 + 1;
558 1.61 dsl pid_alloc_lim |= pid_alloc_lim << 1;
559 1.61 dsl } else
560 1.61 dsl pid_alloc_lim <<= 1; /* doubles number of free slots... */
561 1.61 dsl
562 1.61 dsl proclist_unlock_write(s);
563 1.61 dsl FREE(n_pt, M_PROC);
564 1.61 dsl }
565 1.61 dsl
566 1.61 dsl struct proc *
567 1.61 dsl proc_alloc(void)
568 1.61 dsl {
569 1.61 dsl struct proc *p;
570 1.61 dsl int s;
571 1.61 dsl int nxt;
572 1.61 dsl pid_t pid;
573 1.61 dsl struct pid_table *pt;
574 1.61 dsl
575 1.61 dsl p = pool_get(&proc_pool, PR_WAITOK);
576 1.61 dsl p->p_stat = SIDL; /* protect against others */
577 1.61 dsl
578 1.61 dsl /* allocate next free pid */
579 1.61 dsl
580 1.61 dsl for (;;expand_pid_table()) {
581 1.61 dsl if (__predict_false(pid_alloc_cnt >= pid_alloc_lim))
582 1.61 dsl /* ensure pids cycle through 2000+ values */
583 1.61 dsl continue;
584 1.61 dsl s = proclist_lock_write();
585 1.61 dsl pt = &pid_table[next_free_pt];
586 1.1 cgd #ifdef DIAGNOSTIC
587 1.63 christos if (__predict_false(P_VALID(pt->pt_proc) || pt->pt_pgrp))
588 1.61 dsl panic("proc_alloc: slot busy");
589 1.1 cgd #endif
590 1.61 dsl nxt = P_NEXT(pt->pt_proc);
591 1.61 dsl if (nxt & pid_tbl_mask)
592 1.61 dsl break;
593 1.61 dsl /* Table full - expand (NB last entry not used....) */
594 1.61 dsl proclist_unlock_write(s);
595 1.61 dsl }
596 1.61 dsl
597 1.61 dsl /* pid is 'saved use count' + 'size' + entry */
598 1.61 dsl pid = (nxt & ~pid_tbl_mask) + pid_tbl_mask + 1 + next_free_pt;
599 1.61 dsl if ((uint)pid > (uint)pid_max)
600 1.61 dsl pid &= pid_tbl_mask;
601 1.61 dsl p->p_pid = pid;
602 1.61 dsl next_free_pt = nxt & pid_tbl_mask;
603 1.61 dsl
604 1.61 dsl /* Grab table slot */
605 1.61 dsl pt->pt_proc = p;
606 1.61 dsl pid_alloc_cnt++;
607 1.61 dsl
608 1.61 dsl proclist_unlock_write(s);
609 1.61 dsl
610 1.61 dsl return p;
611 1.61 dsl }
612 1.61 dsl
613 1.61 dsl /*
614 1.61 dsl * Free last resources of a process - called from proc_free (in kern_exit.c)
615 1.61 dsl */
616 1.61 dsl void
617 1.61 dsl proc_free_mem(struct proc *p)
618 1.61 dsl {
619 1.61 dsl int s;
620 1.61 dsl pid_t pid = p->p_pid;
621 1.61 dsl struct pid_table *pt;
622 1.61 dsl
623 1.61 dsl s = proclist_lock_write();
624 1.61 dsl
625 1.61 dsl pt = &pid_table[pid & pid_tbl_mask];
626 1.1 cgd #ifdef DIAGNOSTIC
627 1.63 christos if (__predict_false(pt->pt_proc != p))
628 1.61 dsl panic("proc_free: pid_table mismatch, pid %x, proc %p",
629 1.61 dsl pid, p);
630 1.1 cgd #endif
631 1.61 dsl /* save pid use count in slot */
632 1.61 dsl pt->pt_proc = P_FREE(pid & ~pid_tbl_mask);
633 1.61 dsl
634 1.61 dsl if (pt->pt_pgrp == NULL) {
635 1.61 dsl /* link last freed entry onto ours */
636 1.61 dsl pid &= pid_tbl_mask;
637 1.61 dsl pt = &pid_table[last_free_pt];
638 1.61 dsl pt->pt_proc = P_FREE(P_NEXT(pt->pt_proc) | pid);
639 1.61 dsl last_free_pt = pid;
640 1.61 dsl pid_alloc_cnt--;
641 1.61 dsl }
642 1.61 dsl
643 1.61 dsl nprocs--;
644 1.61 dsl proclist_unlock_write(s);
645 1.61 dsl
646 1.61 dsl pool_put(&proc_pool, p);
647 1.61 dsl }
648 1.61 dsl
649 1.61 dsl /*
650 1.61 dsl * Move p to a new or existing process group (and session)
651 1.61 dsl *
652 1.61 dsl * If we are creating a new pgrp, the pgid should equal
653 1.72 junyoung * the calling process' pid.
654 1.61 dsl * If is only valid to enter a process group that is in the session
655 1.61 dsl * of the process.
656 1.61 dsl * Also mksess should only be set if we are creating a process group
657 1.61 dsl *
658 1.72 junyoung * Only called from sys_setsid, sys_setpgid/sys_setpgrp and the
659 1.61 dsl * SYSV setpgrp support for hpux == enterpgrp(curproc, curproc->p_pid)
660 1.61 dsl */
661 1.61 dsl int
662 1.61 dsl enterpgrp(struct proc *p, pid_t pgid, int mksess)
663 1.61 dsl {
664 1.61 dsl struct pgrp *new_pgrp, *pgrp;
665 1.61 dsl struct session *sess;
666 1.61 dsl struct proc *curp = curproc;
667 1.61 dsl pid_t pid = p->p_pid;
668 1.61 dsl int rval;
669 1.61 dsl int s;
670 1.61 dsl pid_t pg_id = NO_PGID;
671 1.61 dsl
672 1.61 dsl /* Allocate data areas we might need before doing any validity checks */
673 1.61 dsl proclist_lock_read(); /* Because pid_table might change */
674 1.61 dsl if (pid_table[pgid & pid_tbl_mask].pt_pgrp == 0) {
675 1.61 dsl proclist_unlock_read();
676 1.61 dsl new_pgrp = pool_get(&pgrp_pool, PR_WAITOK);
677 1.61 dsl } else {
678 1.61 dsl proclist_unlock_read();
679 1.61 dsl new_pgrp = NULL;
680 1.61 dsl }
681 1.61 dsl if (mksess)
682 1.61 dsl MALLOC(sess, struct session *, sizeof(struct session),
683 1.61 dsl M_SESSION, M_WAITOK);
684 1.61 dsl else
685 1.61 dsl sess = NULL;
686 1.61 dsl
687 1.61 dsl s = proclist_lock_write();
688 1.61 dsl rval = EPERM; /* most common error (to save typing) */
689 1.61 dsl
690 1.61 dsl /* Check pgrp exists or can be created */
691 1.61 dsl pgrp = pid_table[pgid & pid_tbl_mask].pt_pgrp;
692 1.61 dsl if (pgrp != NULL && pgrp->pg_id != pgid)
693 1.61 dsl goto done;
694 1.61 dsl
695 1.61 dsl /* Can only set another process under restricted circumstances. */
696 1.61 dsl if (p != curp) {
697 1.61 dsl /* must exist and be one of our children... */
698 1.61 dsl if (p != pid_table[pid & pid_tbl_mask].pt_proc
699 1.61 dsl || !inferior(p, curp)) {
700 1.61 dsl rval = ESRCH;
701 1.61 dsl goto done;
702 1.61 dsl }
703 1.61 dsl /* ... in the same session... */
704 1.61 dsl if (sess != NULL || p->p_session != curp->p_session)
705 1.61 dsl goto done;
706 1.61 dsl /* ... existing pgid must be in same session ... */
707 1.61 dsl if (pgrp != NULL && pgrp->pg_session != p->p_session)
708 1.61 dsl goto done;
709 1.61 dsl /* ... and not done an exec. */
710 1.61 dsl if (p->p_flag & P_EXEC) {
711 1.61 dsl rval = EACCES;
712 1.61 dsl goto done;
713 1.49 enami }
714 1.61 dsl }
715 1.1 cgd
716 1.61 dsl /* Changing the process group/session of a session
717 1.61 dsl leader is definitely off limits. */
718 1.61 dsl if (SESS_LEADER(p)) {
719 1.61 dsl if (sess == NULL && p->p_pgrp == pgrp)
720 1.61 dsl /* unless it's a definite noop */
721 1.61 dsl rval = 0;
722 1.61 dsl goto done;
723 1.61 dsl }
724 1.61 dsl
725 1.61 dsl /* Can only create a process group with id of process */
726 1.61 dsl if (pgrp == NULL && pgid != pid)
727 1.61 dsl goto done;
728 1.61 dsl
729 1.61 dsl /* Can only create a session if creating pgrp */
730 1.61 dsl if (sess != NULL && pgrp != NULL)
731 1.61 dsl goto done;
732 1.61 dsl
733 1.61 dsl /* Check we allocated memory for a pgrp... */
734 1.61 dsl if (pgrp == NULL && new_pgrp == NULL)
735 1.61 dsl goto done;
736 1.61 dsl
737 1.61 dsl /* Don't attach to 'zombie' pgrp */
738 1.61 dsl if (pgrp != NULL && LIST_EMPTY(&pgrp->pg_members))
739 1.61 dsl goto done;
740 1.61 dsl
741 1.61 dsl /* Expect to succeed now */
742 1.61 dsl rval = 0;
743 1.61 dsl
744 1.61 dsl if (pgrp == p->p_pgrp)
745 1.61 dsl /* nothing to do */
746 1.61 dsl goto done;
747 1.61 dsl
748 1.61 dsl /* Ok all setup, link up required structures */
749 1.61 dsl if (pgrp == NULL) {
750 1.61 dsl pgrp = new_pgrp;
751 1.61 dsl new_pgrp = 0;
752 1.61 dsl if (sess != NULL) {
753 1.21 thorpej sess->s_sid = p->p_pid;
754 1.1 cgd sess->s_leader = p;
755 1.1 cgd sess->s_count = 1;
756 1.1 cgd sess->s_ttyvp = NULL;
757 1.1 cgd sess->s_ttyp = NULL;
758 1.58 dsl sess->s_flags = p->p_session->s_flags & ~S_LOGIN_SET;
759 1.25 perry memcpy(sess->s_login, p->p_session->s_login,
760 1.1 cgd sizeof(sess->s_login));
761 1.6 cgd p->p_flag &= ~P_CONTROLT;
762 1.1 cgd } else {
763 1.61 dsl sess = p->p_pgrp->pg_session;
764 1.61 dsl SESSHOLD(sess);
765 1.1 cgd }
766 1.61 dsl pgrp->pg_session = sess;
767 1.61 dsl sess = 0;
768 1.61 dsl
769 1.1 cgd pgrp->pg_id = pgid;
770 1.10 mycroft LIST_INIT(&pgrp->pg_members);
771 1.61 dsl #ifdef DIAGNOSTIC
772 1.63 christos if (__predict_false(pid_table[pgid & pid_tbl_mask].pt_pgrp))
773 1.61 dsl panic("enterpgrp: pgrp table slot in use");
774 1.63 christos if (__predict_false(mksess && p != curp))
775 1.63 christos panic("enterpgrp: mksession and p != curproc");
776 1.61 dsl #endif
777 1.61 dsl pid_table[pgid & pid_tbl_mask].pt_pgrp = pgrp;
778 1.1 cgd pgrp->pg_jobc = 0;
779 1.61 dsl }
780 1.1 cgd
781 1.1 cgd /*
782 1.1 cgd * Adjust eligibility of affected pgrps to participate in job control.
783 1.1 cgd * Increment eligibility counts before decrementing, otherwise we
784 1.1 cgd * could reach 0 spuriously during the first call.
785 1.1 cgd */
786 1.1 cgd fixjobc(p, pgrp, 1);
787 1.1 cgd fixjobc(p, p->p_pgrp, 0);
788 1.1 cgd
789 1.61 dsl /* Move process to requested group */
790 1.10 mycroft LIST_REMOVE(p, p_pglist);
791 1.52 matt if (LIST_EMPTY(&p->p_pgrp->pg_members))
792 1.61 dsl /* defer delete until we've dumped the lock */
793 1.61 dsl pg_id = p->p_pgrp->pg_id;
794 1.1 cgd p->p_pgrp = pgrp;
795 1.10 mycroft LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
796 1.61 dsl
797 1.61 dsl done:
798 1.61 dsl proclist_unlock_write(s);
799 1.61 dsl if (sess != NULL)
800 1.61 dsl free(sess, M_SESSION);
801 1.61 dsl if (new_pgrp != NULL)
802 1.61 dsl pool_put(&pgrp_pool, new_pgrp);
803 1.61 dsl if (pg_id != NO_PGID)
804 1.61 dsl pg_delete(pg_id);
805 1.63 christos #ifdef DEBUG_PGRP
806 1.63 christos if (__predict_false(rval))
807 1.61 dsl printf("enterpgrp(%d,%d,%d), curproc %d, rval %d\n",
808 1.61 dsl pid, pgid, mksess, curp->p_pid, rval);
809 1.61 dsl #endif
810 1.61 dsl return rval;
811 1.1 cgd }
812 1.1 cgd
813 1.1 cgd /*
814 1.1 cgd * remove process from process group
815 1.1 cgd */
816 1.11 cgd int
817 1.59 dsl leavepgrp(struct proc *p)
818 1.1 cgd {
819 1.68 dsl int s;
820 1.61 dsl struct pgrp *pgrp;
821 1.61 dsl pid_t pg_id;
822 1.1 cgd
823 1.68 dsl s = proclist_lock_write();
824 1.61 dsl pgrp = p->p_pgrp;
825 1.10 mycroft LIST_REMOVE(p, p_pglist);
826 1.1 cgd p->p_pgrp = 0;
827 1.61 dsl pg_id = LIST_EMPTY(&pgrp->pg_members) ? pgrp->pg_id : NO_PGID;
828 1.61 dsl proclist_unlock_write(s);
829 1.61 dsl
830 1.61 dsl if (pg_id != NO_PGID)
831 1.61 dsl pg_delete(pg_id);
832 1.61 dsl return 0;
833 1.61 dsl }
834 1.61 dsl
835 1.61 dsl static void
836 1.61 dsl pg_free(pid_t pg_id)
837 1.61 dsl {
838 1.61 dsl struct pgrp *pgrp;
839 1.61 dsl struct pid_table *pt;
840 1.61 dsl int s;
841 1.61 dsl
842 1.61 dsl s = proclist_lock_write();
843 1.61 dsl pt = &pid_table[pg_id & pid_tbl_mask];
844 1.61 dsl pgrp = pt->pt_pgrp;
845 1.61 dsl #ifdef DIAGNOSTIC
846 1.63 christos if (__predict_false(!pgrp || pgrp->pg_id != pg_id
847 1.63 christos || !LIST_EMPTY(&pgrp->pg_members)))
848 1.61 dsl panic("pg_free: process group absent or has members");
849 1.61 dsl #endif
850 1.61 dsl pt->pt_pgrp = 0;
851 1.61 dsl
852 1.61 dsl if (!P_VALID(pt->pt_proc)) {
853 1.61 dsl /* orphaned pgrp, put slot onto free list */
854 1.61 dsl #ifdef DIAGNOSTIC
855 1.63 christos if (__predict_false(P_NEXT(pt->pt_proc) & pid_tbl_mask))
856 1.61 dsl panic("pg_free: process slot on free list");
857 1.61 dsl #endif
858 1.61 dsl
859 1.61 dsl pg_id &= pid_tbl_mask;
860 1.61 dsl pt = &pid_table[last_free_pt];
861 1.61 dsl pt->pt_proc = P_FREE(P_NEXT(pt->pt_proc) | pg_id);
862 1.61 dsl last_free_pt = pg_id;
863 1.61 dsl pid_alloc_cnt--;
864 1.61 dsl }
865 1.61 dsl proclist_unlock_write(s);
866 1.61 dsl
867 1.61 dsl pool_put(&pgrp_pool, pgrp);
868 1.1 cgd }
869 1.1 cgd
870 1.1 cgd /*
871 1.7 cgd * delete a process group
872 1.1 cgd */
873 1.61 dsl static void
874 1.61 dsl pg_delete(pid_t pg_id)
875 1.61 dsl {
876 1.61 dsl struct pgrp *pgrp;
877 1.61 dsl struct tty *ttyp;
878 1.61 dsl struct session *ss;
879 1.71 pk int s, is_pgrp_leader;
880 1.61 dsl
881 1.61 dsl s = proclist_lock_write();
882 1.61 dsl pgrp = pid_table[pg_id & pid_tbl_mask].pt_pgrp;
883 1.61 dsl if (pgrp == NULL || pgrp->pg_id != pg_id ||
884 1.64 dsl !LIST_EMPTY(&pgrp->pg_members)) {
885 1.61 dsl proclist_unlock_write(s);
886 1.61 dsl return;
887 1.61 dsl }
888 1.61 dsl
889 1.71 pk ss = pgrp->pg_session;
890 1.71 pk
891 1.61 dsl /* Remove reference (if any) from tty to this process group */
892 1.71 pk ttyp = ss->s_ttyp;
893 1.71 pk if (ttyp != NULL && ttyp->t_pgrp == pgrp) {
894 1.61 dsl ttyp->t_pgrp = NULL;
895 1.71 pk #ifdef DIAGNOSTIC
896 1.71 pk if (ttyp->t_session != ss)
897 1.71 pk panic("pg_delete: wrong session on terminal");
898 1.71 pk #endif
899 1.71 pk }
900 1.61 dsl
901 1.71 pk /*
902 1.71 pk * The leading process group in a session is freed
903 1.71 pk * by sessdelete() if last reference.
904 1.71 pk */
905 1.71 pk is_pgrp_leader = (ss->s_sid == pgrp->pg_id);
906 1.71 pk proclist_unlock_write(s);
907 1.71 pk SESSRELE(ss);
908 1.61 dsl
909 1.71 pk if (is_pgrp_leader)
910 1.61 dsl return;
911 1.61 dsl
912 1.61 dsl pg_free(pg_id);
913 1.61 dsl }
914 1.61 dsl
915 1.61 dsl /*
916 1.61 dsl * Delete session - called from SESSRELE when s_count becomes zero.
917 1.61 dsl */
918 1.11 cgd void
919 1.61 dsl sessdelete(struct session *ss)
920 1.1 cgd {
921 1.61 dsl /*
922 1.61 dsl * We keep the pgrp with the same id as the session in
923 1.61 dsl * order to stop a process being given the same pid.
924 1.61 dsl * Since the pgrp holds a reference to the session, it
925 1.61 dsl * must be a 'zombie' pgrp by now.
926 1.61 dsl */
927 1.61 dsl
928 1.61 dsl pg_free(ss->s_sid);
929 1.1 cgd
930 1.61 dsl FREE(ss, M_SESSION);
931 1.1 cgd }
932 1.1 cgd
933 1.1 cgd /*
934 1.1 cgd * Adjust pgrp jobc counters when specified process changes process group.
935 1.1 cgd * We count the number of processes in each process group that "qualify"
936 1.1 cgd * the group for terminal job control (those with a parent in a different
937 1.1 cgd * process group of the same session). If that count reaches zero, the
938 1.1 cgd * process group becomes orphaned. Check both the specified process'
939 1.1 cgd * process group and that of its children.
940 1.1 cgd * entering == 0 => p is leaving specified group.
941 1.1 cgd * entering == 1 => p is entering specified group.
942 1.68 dsl *
943 1.68 dsl * Call with proclist_lock held.
944 1.1 cgd */
945 1.4 andrew void
946 1.59 dsl fixjobc(struct proc *p, struct pgrp *pgrp, int entering)
947 1.1 cgd {
948 1.39 augustss struct pgrp *hispgrp;
949 1.39 augustss struct session *mysession = pgrp->pg_session;
950 1.68 dsl struct proc *child;
951 1.1 cgd
952 1.1 cgd /*
953 1.1 cgd * Check p's parent to see whether p qualifies its own process
954 1.1 cgd * group; if so, adjust count for p's process group.
955 1.1 cgd */
956 1.68 dsl hispgrp = p->p_pptr->p_pgrp;
957 1.68 dsl if (hispgrp != pgrp && hispgrp->pg_session == mysession) {
958 1.1 cgd if (entering)
959 1.1 cgd pgrp->pg_jobc++;
960 1.1 cgd else if (--pgrp->pg_jobc == 0)
961 1.1 cgd orphanpg(pgrp);
962 1.26 thorpej }
963 1.1 cgd
964 1.1 cgd /*
965 1.1 cgd * Check this process' children to see whether they qualify
966 1.1 cgd * their process groups; if so, adjust counts for children's
967 1.1 cgd * process groups.
968 1.1 cgd */
969 1.68 dsl LIST_FOREACH(child, &p->p_children, p_sibling) {
970 1.68 dsl hispgrp = child->p_pgrp;
971 1.68 dsl if (hispgrp != pgrp && hispgrp->pg_session == mysession &&
972 1.68 dsl !P_ZOMBIE(child)) {
973 1.1 cgd if (entering)
974 1.1 cgd hispgrp->pg_jobc++;
975 1.1 cgd else if (--hispgrp->pg_jobc == 0)
976 1.1 cgd orphanpg(hispgrp);
977 1.26 thorpej }
978 1.26 thorpej }
979 1.1 cgd }
980 1.1 cgd
981 1.72 junyoung /*
982 1.1 cgd * A process group has become orphaned;
983 1.1 cgd * if there are any stopped processes in the group,
984 1.1 cgd * hang-up all process in that group.
985 1.68 dsl *
986 1.68 dsl * Call with proclist_lock held.
987 1.1 cgd */
988 1.4 andrew static void
989 1.59 dsl orphanpg(struct pgrp *pg)
990 1.1 cgd {
991 1.39 augustss struct proc *p;
992 1.1 cgd
993 1.52 matt LIST_FOREACH(p, &pg->pg_members, p_pglist) {
994 1.1 cgd if (p->p_stat == SSTOP) {
995 1.52 matt LIST_FOREACH(p, &pg->pg_members, p_pglist) {
996 1.1 cgd psignal(p, SIGHUP);
997 1.1 cgd psignal(p, SIGCONT);
998 1.1 cgd }
999 1.1 cgd return;
1000 1.1 cgd }
1001 1.1 cgd }
1002 1.1 cgd }
1003 1.35 bouyer
1004 1.61 dsl /* mark process as suid/sgid, reset some values to defaults */
1005 1.35 bouyer void
1006 1.59 dsl p_sugid(struct proc *p)
1007 1.35 bouyer {
1008 1.35 bouyer struct plimit *newlim;
1009 1.35 bouyer
1010 1.35 bouyer p->p_flag |= P_SUGID;
1011 1.35 bouyer /* reset what needs to be reset in plimit */
1012 1.35 bouyer if (p->p_limit->pl_corename != defcorename) {
1013 1.35 bouyer if (p->p_limit->p_refcnt > 1 &&
1014 1.35 bouyer (p->p_limit->p_lflags & PL_SHAREMOD) == 0) {
1015 1.35 bouyer newlim = limcopy(p->p_limit);
1016 1.35 bouyer limfree(p->p_limit);
1017 1.35 bouyer p->p_limit = newlim;
1018 1.35 bouyer }
1019 1.49 enami free(p->p_limit->pl_corename, M_TEMP);
1020 1.35 bouyer p->p_limit->pl_corename = defcorename;
1021 1.35 bouyer }
1022 1.35 bouyer }
1023 1.1 cgd
1024 1.61 dsl #ifdef DDB
1025 1.61 dsl #include <ddb/db_output.h>
1026 1.61 dsl void pidtbl_dump(void);
1027 1.14 christos void
1028 1.61 dsl pidtbl_dump(void)
1029 1.1 cgd {
1030 1.61 dsl struct pid_table *pt;
1031 1.61 dsl struct proc *p;
1032 1.39 augustss struct pgrp *pgrp;
1033 1.61 dsl int id;
1034 1.1 cgd
1035 1.61 dsl db_printf("pid table %p size %x, next %x, last %x\n",
1036 1.61 dsl pid_table, pid_tbl_mask+1,
1037 1.61 dsl next_free_pt, last_free_pt);
1038 1.61 dsl for (pt = pid_table, id = 0; id <= pid_tbl_mask; id++, pt++) {
1039 1.61 dsl p = pt->pt_proc;
1040 1.61 dsl if (!P_VALID(p) && !pt->pt_pgrp)
1041 1.61 dsl continue;
1042 1.61 dsl db_printf(" id %x: ", id);
1043 1.61 dsl if (P_VALID(p))
1044 1.61 dsl db_printf("proc %p id %d (0x%x) %s\n",
1045 1.61 dsl p, p->p_pid, p->p_pid, p->p_comm);
1046 1.61 dsl else
1047 1.61 dsl db_printf("next %x use %x\n",
1048 1.61 dsl P_NEXT(p) & pid_tbl_mask,
1049 1.61 dsl P_NEXT(p) & ~pid_tbl_mask);
1050 1.61 dsl if ((pgrp = pt->pt_pgrp)) {
1051 1.61 dsl db_printf("\tsession %p, sid %d, count %d, login %s\n",
1052 1.61 dsl pgrp->pg_session, pgrp->pg_session->s_sid,
1053 1.61 dsl pgrp->pg_session->s_count,
1054 1.61 dsl pgrp->pg_session->s_login);
1055 1.61 dsl db_printf("\tpgrp %p, pg_id %d, pg_jobc %d, members %p\n",
1056 1.61 dsl pgrp, pgrp->pg_id, pgrp->pg_jobc,
1057 1.61 dsl pgrp->pg_members.lh_first);
1058 1.61 dsl for (p = pgrp->pg_members.lh_first; p != 0;
1059 1.61 dsl p = p->p_pglist.le_next) {
1060 1.72 junyoung db_printf("\t\tpid %d addr %p pgrp %p %s\n",
1061 1.61 dsl p->p_pid, p, p->p_pgrp, p->p_comm);
1062 1.10 mycroft }
1063 1.1 cgd }
1064 1.1 cgd }
1065 1.1 cgd }
1066 1.61 dsl #endif /* DDB */
1067 1.48 yamt
1068 1.48 yamt #ifdef KSTACK_CHECK_MAGIC
1069 1.48 yamt #include <sys/user.h>
1070 1.48 yamt
1071 1.48 yamt #define KSTACK_MAGIC 0xdeadbeaf
1072 1.48 yamt
1073 1.48 yamt /* XXX should be per process basis? */
1074 1.48 yamt int kstackleftmin = KSTACK_SIZE;
1075 1.50 enami int kstackleftthres = KSTACK_SIZE / 8; /* warn if remaining stack is
1076 1.50 enami less than this */
1077 1.48 yamt
1078 1.48 yamt void
1079 1.56 yamt kstack_setup_magic(const struct lwp *l)
1080 1.48 yamt {
1081 1.48 yamt u_int32_t *ip;
1082 1.48 yamt u_int32_t const *end;
1083 1.48 yamt
1084 1.56 yamt KASSERT(l != NULL);
1085 1.56 yamt KASSERT(l != &lwp0);
1086 1.48 yamt
1087 1.48 yamt /*
1088 1.48 yamt * fill all the stack with magic number
1089 1.48 yamt * so that later modification on it can be detected.
1090 1.48 yamt */
1091 1.56 yamt ip = (u_int32_t *)KSTACK_LOWEST_ADDR(l);
1092 1.72 junyoung end = (u_int32_t *)((caddr_t)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE);
1093 1.48 yamt for (; ip < end; ip++) {
1094 1.48 yamt *ip = KSTACK_MAGIC;
1095 1.48 yamt }
1096 1.48 yamt }
1097 1.48 yamt
1098 1.48 yamt void
1099 1.56 yamt kstack_check_magic(const struct lwp *l)
1100 1.48 yamt {
1101 1.48 yamt u_int32_t const *ip, *end;
1102 1.48 yamt int stackleft;
1103 1.48 yamt
1104 1.56 yamt KASSERT(l != NULL);
1105 1.48 yamt
1106 1.48 yamt /* don't check proc0 */ /*XXX*/
1107 1.56 yamt if (l == &lwp0)
1108 1.48 yamt return;
1109 1.48 yamt
1110 1.48 yamt #ifdef __MACHINE_STACK_GROWS_UP
1111 1.48 yamt /* stack grows upwards (eg. hppa) */
1112 1.72 junyoung ip = (u_int32_t *)((caddr_t)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE);
1113 1.56 yamt end = (u_int32_t *)KSTACK_LOWEST_ADDR(l);
1114 1.48 yamt for (ip--; ip >= end; ip--)
1115 1.48 yamt if (*ip != KSTACK_MAGIC)
1116 1.48 yamt break;
1117 1.72 junyoung
1118 1.56 yamt stackleft = (caddr_t)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE - (caddr_t)ip;
1119 1.48 yamt #else /* __MACHINE_STACK_GROWS_UP */
1120 1.48 yamt /* stack grows downwards (eg. i386) */
1121 1.56 yamt ip = (u_int32_t *)KSTACK_LOWEST_ADDR(l);
1122 1.72 junyoung end = (u_int32_t *)((caddr_t)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE);
1123 1.48 yamt for (; ip < end; ip++)
1124 1.48 yamt if (*ip != KSTACK_MAGIC)
1125 1.48 yamt break;
1126 1.48 yamt
1127 1.56 yamt stackleft = (caddr_t)ip - KSTACK_LOWEST_ADDR(l);
1128 1.48 yamt #endif /* __MACHINE_STACK_GROWS_UP */
1129 1.48 yamt
1130 1.48 yamt if (kstackleftmin > stackleft) {
1131 1.48 yamt kstackleftmin = stackleft;
1132 1.48 yamt if (stackleft < kstackleftthres)
1133 1.56 yamt printf("warning: kernel stack left %d bytes"
1134 1.56 yamt "(pid %u:lid %u)\n", stackleft,
1135 1.56 yamt (u_int)l->l_proc->p_pid, (u_int)l->l_lid);
1136 1.48 yamt }
1137 1.48 yamt
1138 1.48 yamt if (stackleft <= 0) {
1139 1.56 yamt panic("magic on the top of kernel stack changed for "
1140 1.56 yamt "pid %u, lid %u: maybe kernel stack overflow",
1141 1.56 yamt (u_int)l->l_proc->p_pid, (u_int)l->l_lid);
1142 1.48 yamt }
1143 1.48 yamt }
1144 1.50 enami #endif /* KSTACK_CHECK_MAGIC */
1145