Home | History | Annotate | Line # | Download | only in procfs
procfs_subr.c revision 1.1
      1 /*
      2  *	%W% (Erasmus) %G%	- pk (at) cs.few.eur.nl
      3  */
      4 
      5 #include "param.h"
      6 #include "systm.h"
      7 #include "time.h"
      8 #include "kernel.h"
      9 #include "ioctl.h"
     10 #include "proc.h"
     11 #include "buf.h"
     12 #include "vnode.h"
     13 #include "file.h"
     14 #include "resourcevar.h"
     15 #include "vm/vm.h"
     16 #include "vm/vm_page.h"
     17 #include "vm/vm_kern.h"
     18 #include "kinfo.h"
     19 #include "kinfo_proc.h"
     20 
     21 #include "procfs.h"
     22 #include "pfsnode.h"
     23 
     24 #include "machine/vmparam.h"
     25 
     26 /*
     27  * Get process address map (PIOCGMAP)
     28  */
     29 int
     30 pfs_vmmap(procp, pfsp, pmapp)
     31 struct proc	*procp;
     32 struct nfsnode	*pfsp;
     33 struct procmap	*pmapp;
     34 {
     35 	int		error = 0;
     36 	vm_map_t	map;
     37 	vm_map_entry_t	entry;
     38 	struct procmap	prmap;
     39 
     40 	map = &procp->p_vmspace->vm_map;
     41 	vm_map_lock(map);
     42 	entry = map->header.next;
     43 
     44 	while (entry != &map->header) {
     45 		if (entry->is_a_map) {
     46 			vm_map_t	submap = entry->object.share_map;
     47 			vm_map_entry_t	subentry;
     48 
     49 			vm_map_lock(submap);
     50 			subentry = submap->header.next;
     51 			while (subentry != &submap->header) {
     52 				prmap.vaddr = subentry->start;
     53 				prmap.size = subentry->end - subentry->start;
     54 				prmap.offset = subentry->offset;
     55 				prmap.prot = subentry->protection;
     56 				error = copyout(&prmap, pmapp, sizeof(prmap));
     57 				if (error)
     58 					break;
     59 				pmapp++;
     60 				subentry = subentry->next;
     61 			}
     62 			vm_map_unlock(submap);
     63 			if (error)
     64 				break;
     65 		}
     66 		prmap.vaddr = entry->start;
     67 		prmap.size = entry->end - entry->start;
     68 		prmap.offset = entry->offset;
     69 		prmap.prot = entry->protection;
     70 		error = copyout(&prmap, pmapp, sizeof(prmap));
     71 		if (error)
     72 			break;
     73 		pmapp++;
     74 		entry = entry->next;
     75 	}
     76 
     77 	vm_map_unlock(map);
     78 	return error;
     79 }
     80 
     81 /*
     82  * Count number of VM entries of process (PIOCNMAP)
     83  */
     84 int
     85 pfs_vm_nentries(procp, pfsp)
     86 struct proc	*procp;
     87 struct nfsnode	*pfsp;
     88 {
     89 	int		count = 0;
     90 	vm_map_t	map;
     91 	vm_map_entry_t	entry;
     92 
     93 	map = &procp->p_vmspace->vm_map;
     94 	vm_map_lock(map);
     95 	entry = map->header.next;
     96 
     97 	while (entry != &map->header) {
     98 		if (entry->is_a_map)
     99 			count += entry->object.share_map->nentries;
    100 		else
    101 			count++;
    102 		entry = entry->next;
    103 	}
    104 
    105 	vm_map_unlock(map);
    106 	return count;
    107 }
    108 
    109 /*
    110  * Map process mapped file to file descriptor (PIOCGMAPFD)
    111  */
    112 int
    113 pfs_vmfd(procp, pfsp, vmfdp, p)
    114 struct proc	*procp;
    115 struct pfsnode	*pfsp;
    116 struct vmfd	*vmfdp;
    117 struct proc	*p;
    118 {
    119 	int		rv;
    120 	vm_map_t	map;
    121 	vm_offset_t	addr;
    122 	vm_size_t	size;
    123 	vm_prot_t	prot, maxprot;
    124 	vm_inherit_t	inherit;
    125 	boolean_t	shared;
    126 	vm_object_t	object;
    127 	vm_offset_t	objoff;
    128 	struct vnode	*vp;
    129 	struct file	*fp;
    130 	extern struct fileops	vnops;
    131 
    132 	map = &procp->p_vmspace->vm_map;
    133 
    134 	addr = vmfdp->vaddr;
    135 	rv = vm_region(map, &addr, &size, &prot, &maxprot,
    136 			&inherit, &shared, &object, &objoff);
    137 
    138 	if (rv != KERN_SUCCESS)
    139 		return EINVAL;
    140 
    141 	while (object != NULL && object->pager == NULL)
    142 		object = object->shadow;
    143 
    144 	if (object == NULL || object->pager == NULL
    145 			/* Nobody seems to care || !object->pager_ready */ )
    146 		return ENOENT;
    147 
    148 	if (object->pager->pg_type != PG_VNODE)
    149 		return ENOENT;
    150 
    151 	/* We have a vnode pager, allocate file descriptor */
    152 	vp = (struct vnode *)object->pager->pg_handle;
    153 	if (VOP_ACCESS(vp, VREAD, p->p_ucred, p)) {
    154 		rv = EACCES;
    155 		goto out;
    156 	}
    157 	rv = falloc(p, &fp, &vmfdp->fd);
    158 	if (rv)
    159 		goto out;
    160 
    161 	VREF(vp);
    162 	fp->f_type = DTYPE_VNODE;
    163 	fp->f_ops = &vnops;
    164 	fp->f_data = (caddr_t)vp;
    165 	fp->f_flag = FREAD;
    166 
    167 out:
    168 	vm_object_unlock(object);
    169 	return rv;
    170 }
    171 
    172 /*
    173  * Vnode op for reading/writing.
    174  */
    175 /* ARGSUSED */
    176 pfs_doio(vp, uio, ioflag, cred)
    177 	struct vnode *vp;
    178 	register struct uio *uio;
    179 	int ioflag;
    180 	struct ucred *cred;
    181 {
    182 	struct pfsnode	*pfsp = VTOPFS(vp);
    183 	struct proc	*procp;
    184 	int		error = 0;
    185 	long		n, on;
    186 
    187 #ifdef DEBUG
    188 	if (pfs_debug)
    189 		printf("pfs_doio(%s): vp 0x%x, proc %x\n",
    190 			uio->uio_rw==UIO_READ?"R":"W", vp, uio->uio_procp);
    191 #endif
    192 
    193 #ifdef DIAGNOSTIC
    194 	if (vp->v_type != VPROC)
    195 		panic("pfs_doio vtype");
    196 #endif
    197 	procp = pfsp->pfs_pid?pfind(pfsp->pfs_pid):&proc0;
    198 	if (!procp)
    199 		return ESRCH;
    200 
    201 	if (procp->p_flag & SSYS)
    202 		return EACCES;
    203 
    204 	if (uio->uio_resid == 0)
    205 		return (0);
    206 	if (uio->uio_offset < 0)
    207 		return (EINVAL);
    208 
    209 	do { /* One page at a time */
    210 		int		rv;
    211 		vm_map_t	map;
    212 		vm_offset_t	offset;
    213 		vm_size_t	size;
    214 		vm_prot_t	oldprot = 0, prot, maxprot;
    215 		vm_inherit_t	inherit;
    216 		boolean_t	shared;
    217 		vm_object_t	object;
    218 		vm_offset_t	objoff;
    219 		vm_page_t	m;
    220 		vm_offset_t	kva;
    221 
    222 		on = uio->uio_offset - trunc_page(uio->uio_offset);
    223 		n = MIN(PAGE_SIZE-on, uio->uio_resid);
    224 
    225 		/* Map page into kernel space */
    226 
    227 		map = &procp->p_vmspace->vm_map;
    228 #if 0
    229 	vm_map_print(map, 1);
    230 #endif
    231 
    232 		offset = trunc_page(uio->uio_offset);
    233 
    234 		rv = vm_region(map, &offset, &size, &prot, &maxprot,
    235 				&inherit, &shared, &object, &objoff);
    236 		if (rv != KERN_SUCCESS)
    237 			return EINVAL;
    238 
    239 		vm_object_unlock(object);
    240 
    241 		if (uio->uio_rw == UIO_WRITE && (prot & VM_PROT_WRITE) == 0) {
    242 			oldprot = prot;
    243 			prot |= VM_PROT_WRITE;
    244 			rv = vm_protect(map, offset, PAGE_SIZE, FALSE, prot);
    245 			if (rv != KERN_SUCCESS)
    246 				return EPERM;
    247 		}
    248 		/* Just fault the page */
    249 		rv = vm_fault(map, offset, prot, FALSE);
    250 		if (rv != KERN_SUCCESS)
    251 			return EFAULT;
    252 
    253 		/* Look up again as vm_fault() may have inserted a shadow object */
    254 		rv = vm_region(map, &offset, &size, &prot, &maxprot,
    255 				&inherit, &shared, &object, &objoff);
    256 		if (rv != KERN_SUCCESS)
    257 			return EINVAL;
    258 
    259 		/* Now find the page */
    260 		/* XXX hope it's still there, should we have wired it? */
    261 		m = vm_page_lookup(object, objoff);
    262 		if (m == NULL)
    263 			return ESRCH;
    264 
    265 		kva = kmem_alloc_wait(kernel_map, PAGE_SIZE);
    266 
    267 		pmap_enter(vm_map_pmap(kernel_map), kva, VM_PAGE_TO_PHYS(m),
    268 			VM_PROT_DEFAULT, TRUE);
    269 
    270 		error = uiomove(kva + on, (int)n, uio);
    271 
    272 		pmap_remove(vm_map_pmap(kernel_map), kva, kva + PAGE_SIZE);
    273 		kmem_free_wakeup(kernel_map, kva, PAGE_SIZE);
    274 		if (oldprot) {
    275 			rv = vm_protect(map, offset, PAGE_SIZE, FALSE, oldprot);
    276 			if (rv != KERN_SUCCESS)
    277 				return EPERM;
    278 		}
    279 
    280 	} while (error == 0 && uio->uio_resid > 0);
    281 
    282 	return (error);
    283 }
    284 
    285 #if 00
    286 int
    287 pfs_map(procp, kva, rw, offset)
    288 struct proc	*procp;
    289 int		rw;
    290 vm_offset_t	*kva, offset;
    291 {
    292 	int		rv;
    293 	vm_map_t	map;
    294 	vm_size_t	size;
    295 	vm_prot_t	prot, maxprot;
    296 	vm_inherit_t	inherit;
    297 	boolean_t	shared;
    298 	vm_object_t	object;
    299 	vm_offset_t	objoff;
    300 	vm_page_t	m;
    301 
    302 	map = &procp->p_vmspace->vm_map;
    303 #if 0
    304 	vm_map_print(map, 1);
    305 #endif
    306 
    307 	offset = trunc_page(offset);
    308 
    309 	rv = vm_region(map, &offset, &size, &prot, &maxprot,
    310 			&inherit, &shared, &object, &objoff);
    311 	if (rv != KERN_SUCCESS)
    312 		return EINVAL;
    313 
    314 	vm_object_unlock(object);
    315 
    316 	if (rw == UIO_WRITE && (prot & VM_PROT_WRITE) == 0) {
    317 		prot |= VM_PROT_WRITE;
    318 		rv = vm_protect(map, offset, PAGE_SIZE, FALSE, prot);
    319 		if (rv != KERN_SUCCESS)
    320 			return EPERM;
    321 	}
    322 	/* Just fault page */
    323 	rv = vm_fault(map, offset, prot, FALSE);
    324 	if (rv != KERN_SUCCESS)
    325 		return EFAULT;
    326 
    327 	/* Look up again as vm_fault() may have inserted a shadow object */
    328 	rv = vm_region(map, &offset, &size, &prot, &maxprot,
    329 			&inherit, &shared, &object, &objoff);
    330 	if (rv != KERN_SUCCESS)
    331 		return EINVAL;
    332 
    333 	m = vm_page_lookup(object, objoff);
    334 	if (m == NULL)
    335 		return ESRCH;
    336 
    337 	*kva = kmem_alloc_wait(kernel_map, PAGE_SIZE);
    338 
    339 	pmap_enter(vm_map_pmap(kernel_map), *kva, VM_PAGE_TO_PHYS(m),
    340 			VM_PROT_DEFAULT, TRUE);
    341 
    342 	return 0;
    343 }
    344 
    345 int
    346 pfs_unmap(procp, kva)
    347 struct proc	*procp;
    348 vm_offset_t	kva;
    349 {
    350 	pmap_remove(vm_map_pmap(kernel_map), kva, kva + PAGE_SIZE);
    351 	kmem_free_wakeup(kernel_map, kva, PAGE_SIZE);
    352 }
    353 #endif
    354