Home | History | Annotate | Line # | Download | only in procfs
procfs_subr.c revision 1.3.2.1
      1 /*
      2  * Copyright (c) 1993 Paul Kranenburg
      3  * All rights reserved.
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  * 1. Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  * 2. Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in the
     12  *    documentation and/or other materials provided with the distribution.
     13  * 3. All advertising materials mentioning features or use of this software
     14  *    must display the following acknowledgement:
     15  *      This product includes software developed by Paul Kranenburg.
     16  * 4. The name of the author may not be used to endorse or promote products
     17  *    derived from this software withough specific prior written permission
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29  *
     30  *	$Id: procfs_subr.c,v 1.3.2.1 1993/11/14 22:35:01 mycroft Exp $
     31  */
     32 
     33 #include <sys/param.h>
     34 #include <sys/systm.h>
     35 #include <sys/time.h>
     36 #include <sys/kernel.h>
     37 #include <sys/ioctl.h>
     38 #include <sys/proc.h>
     39 #include <sys/buf.h>
     40 #include <sys/vnode.h>
     41 #include <sys/file.h>
     42 #include <sys/resourcevar.h>
     43 
     44 #include <vm/vm.h>
     45 #include <vm/vm_page.h>
     46 #include <vm/vm_kern.h>
     47 
     48 #include <sys/kinfo.h>
     49 #include <sys/kinfo_proc.h>
     50 
     51 #include <miscfs/procfs/procfs.h>
     52 #include <miscfs/procfs/pfsnode.h>
     53 
     54 #include <machine/vmparam.h>
     55 
     56 /*
     57  * Get process address map (PIOCGMAP)
     58  */
     59 int
     60 pfs_vmmap(procp, pfsp, pmapp)
     61 struct proc	*procp;
     62 struct nfsnode	*pfsp;
     63 struct procmap	*pmapp;
     64 {
     65 	int		error = 0;
     66 	vm_map_t	map;
     67 	vm_map_entry_t	entry;
     68 	struct procmap	prmap;
     69 
     70 	map = &procp->p_vmspace->vm_map;
     71 	vm_map_lock(map);
     72 	entry = map->header.next;
     73 
     74 	while (entry != &map->header) {
     75 		if (entry->is_a_map) {
     76 			vm_map_t	submap = entry->object.share_map;
     77 			vm_map_entry_t	subentry;
     78 
     79 			vm_map_lock(submap);
     80 			subentry = submap->header.next;
     81 			while (subentry != &submap->header) {
     82 				prmap.vaddr = subentry->start;
     83 				prmap.size = subentry->end - subentry->start;
     84 				prmap.offset = subentry->offset;
     85 				prmap.prot = subentry->protection;
     86 				error = copyout(&prmap, pmapp, sizeof(prmap));
     87 				if (error)
     88 					break;
     89 				pmapp++;
     90 				subentry = subentry->next;
     91 			}
     92 			vm_map_unlock(submap);
     93 			if (error)
     94 				break;
     95 		}
     96 		prmap.vaddr = entry->start;
     97 		prmap.size = entry->end - entry->start;
     98 		prmap.offset = entry->offset;
     99 		prmap.prot = entry->protection;
    100 		error = copyout(&prmap, pmapp, sizeof(prmap));
    101 		if (error)
    102 			break;
    103 		pmapp++;
    104 		entry = entry->next;
    105 	}
    106 
    107 	vm_map_unlock(map);
    108 	return error;
    109 }
    110 
    111 /*
    112  * Count number of VM entries of process (PIOCNMAP)
    113  */
    114 int
    115 pfs_vm_nentries(procp, pfsp)
    116 struct proc	*procp;
    117 struct nfsnode	*pfsp;
    118 {
    119 	int		count = 0;
    120 	vm_map_t	map;
    121 	vm_map_entry_t	entry;
    122 
    123 	map = &procp->p_vmspace->vm_map;
    124 	vm_map_lock(map);
    125 	entry = map->header.next;
    126 
    127 	while (entry != &map->header) {
    128 		if (entry->is_a_map)
    129 			count += entry->object.share_map->nentries;
    130 		else
    131 			count++;
    132 		entry = entry->next;
    133 	}
    134 
    135 	vm_map_unlock(map);
    136 	return count;
    137 }
    138 
    139 /*
    140  * Map process mapped file to file descriptor (PIOCGMAPFD)
    141  */
    142 int
    143 pfs_vmfd(procp, pfsp, vmfdp, p)
    144 struct proc	*procp;
    145 struct pfsnode	*pfsp;
    146 struct vmfd	*vmfdp;
    147 struct proc	*p;
    148 {
    149 	int		rv;
    150 	vm_map_t	map;
    151 	vm_offset_t	addr;
    152 	vm_size_t	size;
    153 	vm_prot_t	prot, maxprot;
    154 	vm_inherit_t	inherit;
    155 	boolean_t	shared;
    156 	vm_object_t	object;
    157 	vm_offset_t	objoff;
    158 	struct vnode	*vp;
    159 	struct file	*fp;
    160 	extern struct fileops	vnops;
    161 
    162 	map = &procp->p_vmspace->vm_map;
    163 
    164 	addr = vmfdp->vaddr;
    165 	rv = vm_region(map, &addr, &size, &prot, &maxprot,
    166 			&inherit, &shared, &object, &objoff);
    167 
    168 	if (rv != KERN_SUCCESS)
    169 		return EINVAL;
    170 
    171 	while (object != NULL && object->pager == NULL)
    172 		object = object->shadow;
    173 
    174 	if (object == NULL || object->pager == NULL
    175 			/* Nobody seems to care || !object->pager_ready */ )
    176 		return ENOENT;
    177 
    178 	if (object->pager->pg_type != PG_VNODE)
    179 		return ENOENT;
    180 
    181 	/* We have a vnode pager, allocate file descriptor */
    182 	vp = (struct vnode *)object->pager->pg_handle;
    183 	if (VOP_ACCESS(vp, VREAD, p->p_ucred, p)) {
    184 		rv = EACCES;
    185 		goto out;
    186 	}
    187 	rv = falloc(p, &fp, &vmfdp->fd);
    188 	if (rv)
    189 		goto out;
    190 
    191 	VREF(vp);
    192 	fp->f_type = DTYPE_VNODE;
    193 	fp->f_ops = &vnops;
    194 	fp->f_data = (caddr_t)vp;
    195 	fp->f_flag = FREAD;
    196 
    197 out:
    198 	vm_object_unlock(object);
    199 	return rv;
    200 }
    201 
    202 /*
    203  * Vnode op for reading/writing.
    204  */
    205 /* ARGSUSED */
    206 pfs_doio(vp, uio, ioflag, cred)
    207 	struct vnode *vp;
    208 	register struct uio *uio;
    209 	int ioflag;
    210 	struct ucred *cred;
    211 {
    212 	struct pfsnode	*pfsp = VTOPFS(vp);
    213 	struct proc	*procp;
    214 	int		error = 0;
    215 	long		n, on;
    216 
    217 #ifdef DEBUG
    218 	if (pfs_debug)
    219 		printf("pfs_doio(%s): vp 0x%x, proc %x\n",
    220 			uio->uio_rw==UIO_READ?"R":"W", vp, uio->uio_procp);
    221 #endif
    222 
    223 #ifdef DIAGNOSTIC
    224 	if (vp->v_type != VPROC)
    225 		panic("pfs_doio vtype");
    226 #endif
    227 	procp = pfsp->pfs_pid?pfind(pfsp->pfs_pid):&proc0;
    228 	if (!procp)
    229 		return ESRCH;
    230 
    231 	if (procp->p_flag & SSYS)
    232 		return EACCES;
    233 
    234 	if (uio->uio_resid == 0)
    235 		return (0);
    236 	if (uio->uio_offset < 0)
    237 		return (EINVAL);
    238 
    239 	do { /* One page at a time */
    240 		int		rv;
    241 		vm_map_t	map;
    242 		vm_offset_t	offset;
    243 		vm_size_t	size;
    244 		vm_prot_t	oldprot = 0, prot, maxprot;
    245 		vm_inherit_t	inherit;
    246 		boolean_t	shared;
    247 		vm_object_t	object;
    248 		vm_offset_t	objoff;
    249 		vm_page_t	m;
    250 		vm_offset_t	kva;
    251 
    252 		on = uio->uio_offset - trunc_page(uio->uio_offset);
    253 		n = MIN(PAGE_SIZE-on, uio->uio_resid);
    254 
    255 		/* Map page into kernel space */
    256 
    257 		map = &procp->p_vmspace->vm_map;
    258 #if 0
    259 	vm_map_print(map, 1);
    260 #endif
    261 
    262 		offset = trunc_page(uio->uio_offset);
    263 
    264 		rv = vm_region(map, &offset, &size, &prot, &maxprot,
    265 				&inherit, &shared, &object, &objoff);
    266 		if (rv != KERN_SUCCESS)
    267 			return EINVAL;
    268 
    269 		vm_object_unlock(object);
    270 
    271 		if (uio->uio_rw == UIO_WRITE && (prot & VM_PROT_WRITE) == 0) {
    272 			oldprot = prot;
    273 			prot |= VM_PROT_WRITE;
    274 			rv = vm_protect(map, offset, PAGE_SIZE, FALSE, prot);
    275 			if (rv != KERN_SUCCESS)
    276 				return EPERM;
    277 		}
    278 		/* Just fault the page */
    279 		rv = vm_fault(map, offset, prot, FALSE);
    280 		if (rv != KERN_SUCCESS)
    281 			return EFAULT;
    282 
    283 		/* Look up again as vm_fault() may have inserted a shadow object */
    284 		rv = vm_region(map, &offset, &size, &prot, &maxprot,
    285 				&inherit, &shared, &object, &objoff);
    286 		if (rv != KERN_SUCCESS)
    287 			return EINVAL;
    288 
    289 		/* Now find the page */
    290 		/* XXX hope it's still there, should we have wired it? */
    291 		m = vm_page_lookup(object, objoff);
    292 		if (m == NULL)
    293 			return ESRCH;
    294 
    295 		kva = kmem_alloc_wait(kernel_map, PAGE_SIZE);
    296 
    297 		pmap_enter(vm_map_pmap(kernel_map), kva, VM_PAGE_TO_PHYS(m),
    298 			VM_PROT_DEFAULT, TRUE);
    299 
    300 		error = uiomove(kva + on, (int)n, uio);
    301 
    302 		pmap_remove(vm_map_pmap(kernel_map), kva, kva + PAGE_SIZE);
    303 		kmem_free_wakeup(kernel_map, kva, PAGE_SIZE);
    304 		if (oldprot) {
    305 			rv = vm_protect(map, offset, PAGE_SIZE, FALSE, oldprot);
    306 			if (rv != KERN_SUCCESS)
    307 				return EPERM;
    308 		}
    309 
    310 	} while (error == 0 && uio->uio_resid > 0);
    311 
    312 	return (error);
    313 }
    314 
    315 #if 00
    316 int
    317 pfs_map(procp, kva, rw, offset)
    318 struct proc	*procp;
    319 int		rw;
    320 vm_offset_t	*kva, offset;
    321 {
    322 	int		rv;
    323 	vm_map_t	map;
    324 	vm_size_t	size;
    325 	vm_prot_t	prot, maxprot;
    326 	vm_inherit_t	inherit;
    327 	boolean_t	shared;
    328 	vm_object_t	object;
    329 	vm_offset_t	objoff;
    330 	vm_page_t	m;
    331 
    332 	map = &procp->p_vmspace->vm_map;
    333 #if 0
    334 	vm_map_print(map, 1);
    335 #endif
    336 
    337 	offset = trunc_page(offset);
    338 
    339 	rv = vm_region(map, &offset, &size, &prot, &maxprot,
    340 			&inherit, &shared, &object, &objoff);
    341 	if (rv != KERN_SUCCESS)
    342 		return EINVAL;
    343 
    344 	vm_object_unlock(object);
    345 
    346 	if (rw == UIO_WRITE && (prot & VM_PROT_WRITE) == 0) {
    347 		prot |= VM_PROT_WRITE;
    348 		rv = vm_protect(map, offset, PAGE_SIZE, FALSE, prot);
    349 		if (rv != KERN_SUCCESS)
    350 			return EPERM;
    351 	}
    352 	/* Just fault page */
    353 	rv = vm_fault(map, offset, prot, FALSE);
    354 	if (rv != KERN_SUCCESS)
    355 		return EFAULT;
    356 
    357 	/* Look up again as vm_fault() may have inserted a shadow object */
    358 	rv = vm_region(map, &offset, &size, &prot, &maxprot,
    359 			&inherit, &shared, &object, &objoff);
    360 	if (rv != KERN_SUCCESS)
    361 		return EINVAL;
    362 
    363 	m = vm_page_lookup(object, objoff);
    364 	if (m == NULL)
    365 		return ESRCH;
    366 
    367 	*kva = kmem_alloc_wait(kernel_map, PAGE_SIZE);
    368 
    369 	pmap_enter(vm_map_pmap(kernel_map), *kva, VM_PAGE_TO_PHYS(m),
    370 			VM_PROT_DEFAULT, TRUE);
    371 
    372 	return 0;
    373 }
    374 
    375 int
    376 pfs_unmap(procp, kva)
    377 struct proc	*procp;
    378 vm_offset_t	kva;
    379 {
    380 	pmap_remove(vm_map_pmap(kernel_map), kva, kva + PAGE_SIZE);
    381 	kmem_free_wakeup(kernel_map, kva, PAGE_SIZE);
    382 }
    383 #endif
    384