Home | History | Annotate | Line # | Download | only in dev
mm.c revision 1.22.16.1
      1  1.22.16.1  christos /*	$NetBSD: mm.c,v 1.22.16.1 2019/06/10 22:07:04 christos Exp $	*/
      2        1.1  christos 
      3        1.1  christos /*-
      4       1.14     rmind  * Copyright (c) 2002, 2008, 2010 The NetBSD Foundation, Inc.
      5        1.1  christos  * All rights reserved.
      6        1.1  christos  *
      7        1.1  christos  * This code is derived from software contributed to The NetBSD Foundation
      8       1.14     rmind  * by Christos Zoulas, Joerg Sonnenberger and Mindaugas Rasiukevicius.
      9        1.1  christos  *
     10        1.1  christos  * Redistribution and use in source and binary forms, with or without
     11        1.1  christos  * modification, are permitted provided that the following conditions
     12        1.1  christos  * are met:
     13        1.1  christos  * 1. Redistributions of source code must retain the above copyright
     14        1.1  christos  *    notice, this list of conditions and the following disclaimer.
     15        1.1  christos  * 2. Redistributions in binary form must reproduce the above copyright
     16        1.1  christos  *    notice, this list of conditions and the following disclaimer in the
     17        1.1  christos  *    documentation and/or other materials provided with the distribution.
     18        1.1  christos  *
     19        1.1  christos  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20        1.1  christos  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21        1.1  christos  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22        1.1  christos  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23        1.1  christos  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24        1.1  christos  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25        1.1  christos  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26        1.1  christos  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27        1.1  christos  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28        1.1  christos  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29        1.1  christos  * POSSIBILITY OF SUCH DAMAGE.
     30        1.1  christos  */
     31        1.1  christos 
     32       1.14     rmind /*
     33       1.14     rmind  * Special /dev/{mem,kmem,zero,null} memory devices.
     34       1.14     rmind  */
     35        1.1  christos 
     36        1.1  christos #include <sys/cdefs.h>
     37  1.22.16.1  christos __KERNEL_RCSID(0, "$NetBSD: mm.c,v 1.22.16.1 2019/06/10 22:07:04 christos Exp $");
     38        1.1  christos 
     39       1.14     rmind #include "opt_compat_netbsd.h"
     40        1.1  christos 
     41        1.1  christos #include <sys/param.h>
     42        1.1  christos #include <sys/conf.h>
     43       1.14     rmind #include <sys/ioctl.h>
     44       1.14     rmind #include <sys/mman.h>
     45        1.1  christos #include <sys/uio.h>
     46       1.13     oster #include <sys/termios.h>
     47        1.2   gehenna 
     48       1.14     rmind #include <dev/mm.h>
     49       1.14     rmind 
     50       1.14     rmind #include <uvm/uvm_extern.h>
     51       1.14     rmind 
     52       1.14     rmind static void *		dev_zero_page	__read_mostly;
     53       1.14     rmind static kmutex_t		dev_mem_lock	__cacheline_aligned;
     54       1.14     rmind static vaddr_t		dev_mem_addr	__read_mostly;
     55       1.14     rmind 
     56  1.22.16.1  christos static dev_type_open(mm_open);
     57       1.14     rmind static dev_type_read(mm_readwrite);
     58       1.14     rmind static dev_type_ioctl(mm_ioctl);
     59       1.14     rmind static dev_type_mmap(mm_mmap);
     60       1.14     rmind static dev_type_ioctl(mm_ioctl);
     61       1.14     rmind 
     62       1.14     rmind const struct cdevsw mem_cdevsw = {
     63  1.22.16.1  christos 	.d_open = mm_open,
     64       1.18  dholland 	.d_close = nullclose,
     65       1.18  dholland 	.d_read = mm_readwrite,
     66       1.18  dholland 	.d_write = mm_readwrite,
     67       1.18  dholland 	.d_ioctl = mm_ioctl,
     68       1.18  dholland 	.d_stop = nostop,
     69       1.18  dholland 	.d_tty = notty,
     70       1.18  dholland 	.d_poll = nopoll,
     71       1.18  dholland 	.d_mmap = mm_mmap,
     72       1.18  dholland 	.d_kqfilter = nokqfilter,
     73       1.19  dholland 	.d_discard = nodiscard,
     74       1.18  dholland 	.d_flag = D_MPSAFE
     75       1.14     rmind };
     76       1.14     rmind 
     77       1.14     rmind #ifdef pmax	/* XXX */
     78       1.14     rmind const struct cdevsw mem_ultrix_cdevsw = {
     79       1.18  dholland 	.d_open = nullopen,
     80       1.18  dholland 	.d_close = nullclose,
     81       1.18  dholland 	.d_read = mm_readwrite,
     82       1.18  dholland 	.d_write = mm_readwrite,
     83       1.18  dholland 	.d_ioctl = mm_ioctl,
     84       1.18  dholland 	.d_stop = nostop,
     85       1.18  dholland 	.d_tty = notty,
     86       1.18  dholland 	.d_poll = nopoll,
     87       1.18  dholland 	.d_mmap = mm_mmap,
     88       1.18  dholland 	.d_kqfilter = nokqfilter,
     89       1.19  dholland 	.d_discard = nodiscard,
     90       1.18  dholland 	.d_flag = D_MPSAFE
     91       1.14     rmind };
     92       1.14     rmind #endif
     93       1.14     rmind 
     94  1.22.16.1  christos static int
     95  1.22.16.1  christos mm_open(dev_t dev, int flag, int mode, struct lwp *l)
     96  1.22.16.1  christos {
     97  1.22.16.1  christos #ifdef __HAVE_MM_MD_OPEN
     98  1.22.16.1  christos 	int error;
     99  1.22.16.1  christos 	if ((error = mm_md_open(dev, flag, mode, l)) != 0)
    100  1.22.16.1  christos 		return error;
    101  1.22.16.1  christos #endif
    102  1.22.16.1  christos 	l->l_proc->p_flag |= PK_KMEM;
    103  1.22.16.1  christos 	return 0;
    104  1.22.16.1  christos }
    105  1.22.16.1  christos 
    106       1.14     rmind /*
    107       1.14     rmind  * mm_init: initialize memory device driver.
    108       1.14     rmind  */
    109       1.14     rmind void
    110       1.14     rmind mm_init(void)
    111       1.14     rmind {
    112       1.14     rmind 	vaddr_t pg;
    113       1.14     rmind 
    114       1.14     rmind 	mutex_init(&dev_mem_lock, MUTEX_DEFAULT, IPL_NONE);
    115       1.14     rmind 
    116       1.14     rmind 	/* Read-only zero-page. */
    117       1.14     rmind 	pg = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
    118       1.14     rmind 	KASSERT(pg != 0);
    119       1.14     rmind 	pmap_protect(pmap_kernel(), pg, pg + PAGE_SIZE, VM_PROT_READ);
    120       1.14     rmind 	pmap_update(pmap_kernel());
    121       1.14     rmind 	dev_zero_page = (void *)pg;
    122       1.14     rmind 
    123       1.14     rmind #ifndef __HAVE_MM_MD_CACHE_ALIASING
    124       1.14     rmind 	/* KVA for mappings during I/O. */
    125       1.14     rmind 	dev_mem_addr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
    126       1.14     rmind 	    UVM_KMF_VAONLY|UVM_KMF_WAITVA);
    127       1.14     rmind 	KASSERT(dev_mem_addr != 0);
    128       1.14     rmind #else
    129       1.14     rmind 	dev_mem_addr = 0;
    130       1.14     rmind #endif
    131       1.14     rmind }
    132       1.14     rmind 
    133       1.14     rmind 
    134       1.14     rmind /*
    135       1.14     rmind  * dev_mem_getva: get a special virtual address.  If architecture requires,
    136       1.14     rmind  * allocate VA according to PA, which avoids cache-aliasing issues.  Use a
    137       1.14     rmind  * constant, general mapping address otherwise.
    138       1.14     rmind  */
    139       1.14     rmind static inline vaddr_t
    140       1.21      matt dev_mem_getva(paddr_t pa, int color)
    141       1.14     rmind {
    142       1.14     rmind #ifdef __HAVE_MM_MD_CACHE_ALIASING
    143       1.17      matt 	return uvm_km_alloc(kernel_map, PAGE_SIZE,
    144       1.21      matt 	    color & uvmexp.colormask,
    145       1.17      matt 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
    146       1.14     rmind #else
    147       1.14     rmind 	return dev_mem_addr;
    148       1.14     rmind #endif
    149       1.14     rmind }
    150       1.14     rmind 
    151       1.14     rmind static inline void
    152       1.14     rmind dev_mem_relva(paddr_t pa, vaddr_t va)
    153       1.14     rmind {
    154       1.14     rmind #ifdef __HAVE_MM_MD_CACHE_ALIASING
    155       1.17      matt 	uvm_km_free(kernel_map, va, PAGE_SIZE, UVM_KMF_VAONLY);
    156       1.14     rmind #else
    157       1.14     rmind 	KASSERT(dev_mem_addr == va);
    158       1.14     rmind #endif
    159       1.14     rmind }
    160       1.14     rmind 
    161       1.14     rmind /*
    162       1.14     rmind  * dev_kmem_readwrite: helper for DEV_MEM (/dev/mem) case of R/W.
    163       1.14     rmind  */
    164       1.14     rmind static int
    165       1.14     rmind dev_mem_readwrite(struct uio *uio, struct iovec *iov)
    166       1.14     rmind {
    167       1.14     rmind 	paddr_t paddr;
    168       1.14     rmind 	vaddr_t vaddr;
    169       1.14     rmind 	vm_prot_t prot;
    170       1.14     rmind 	size_t len, offset;
    171       1.14     rmind 	bool have_direct;
    172       1.14     rmind 	int error;
    173       1.21      matt 	int color = 0;
    174       1.14     rmind 
    175       1.14     rmind 	/* Check for wrap around. */
    176       1.22       ryo 	if ((uintptr_t)uio->uio_offset != uio->uio_offset) {
    177       1.14     rmind 		return EFAULT;
    178       1.14     rmind 	}
    179       1.14     rmind 	paddr = uio->uio_offset & ~PAGE_MASK;
    180       1.14     rmind 	prot = (uio->uio_rw == UIO_WRITE) ? VM_PROT_WRITE : VM_PROT_READ;
    181       1.14     rmind 	error = mm_md_physacc(paddr, prot);
    182       1.14     rmind 	if (error) {
    183       1.14     rmind 		return error;
    184       1.14     rmind 	}
    185       1.14     rmind 	offset = uio->uio_offset & PAGE_MASK;
    186       1.14     rmind 	len = MIN(uio->uio_resid, PAGE_SIZE - offset);
    187       1.14     rmind 
    188       1.21      matt #ifdef __HAVE_MM_MD_CACHE_ALIASING
    189       1.21      matt 	have_direct = mm_md_page_color(paddr, &color);
    190       1.21      matt #else
    191       1.21      matt 	have_direct = true;
    192       1.21      matt 	color = 0;
    193       1.21      matt #endif
    194       1.21      matt 
    195       1.14     rmind #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
    196       1.14     rmind 	/* Is physical address directly mapped?  Return VA. */
    197       1.21      matt 	if (have_direct)
    198       1.21      matt 		have_direct = mm_md_direct_mapped_phys(paddr, &vaddr);
    199       1.14     rmind #else
    200       1.15     joerg 	vaddr = 0;
    201       1.14     rmind 	have_direct = false;
    202       1.14     rmind #endif
    203       1.14     rmind 	if (!have_direct) {
    204       1.14     rmind 		/* Get a special virtual address. */
    205       1.21      matt 		const vaddr_t va = dev_mem_getva(paddr, color);
    206       1.14     rmind 
    207       1.14     rmind 		/* Map selected KVA to physical address. */
    208       1.14     rmind 		mutex_enter(&dev_mem_lock);
    209       1.14     rmind 		pmap_kenter_pa(va, paddr, prot, 0);
    210       1.14     rmind 		pmap_update(pmap_kernel());
    211       1.14     rmind 
    212       1.14     rmind 		/* Perform I/O. */
    213       1.14     rmind 		vaddr = va + offset;
    214       1.14     rmind 		error = uiomove((void *)vaddr, len, uio);
    215       1.14     rmind 
    216       1.14     rmind 		/* Unmap, flush before unlock. */
    217       1.14     rmind 		pmap_kremove(va, PAGE_SIZE);
    218       1.14     rmind 		pmap_update(pmap_kernel());
    219       1.14     rmind 		mutex_exit(&dev_mem_lock);
    220       1.14     rmind 
    221       1.14     rmind 		/* "Release" the virtual address. */
    222       1.14     rmind 		dev_mem_relva(paddr, va);
    223       1.14     rmind 	} else {
    224       1.14     rmind 		/* Direct map, just perform I/O. */
    225       1.14     rmind 		vaddr += offset;
    226       1.14     rmind 		error = uiomove((void *)vaddr, len, uio);
    227       1.14     rmind 	}
    228       1.14     rmind 	return error;
    229       1.14     rmind }
    230       1.14     rmind 
    231       1.14     rmind /*
    232       1.14     rmind  * dev_kmem_readwrite: helper for DEV_KMEM (/dev/kmem) case of R/W.
    233       1.14     rmind  */
    234       1.14     rmind static int
    235       1.14     rmind dev_kmem_readwrite(struct uio *uio, struct iovec *iov)
    236       1.14     rmind {
    237       1.14     rmind 	void *addr;
    238       1.14     rmind 	size_t len, offset;
    239       1.14     rmind 	vm_prot_t prot;
    240       1.14     rmind 	int error;
    241       1.14     rmind 	bool md_kva;
    242       1.14     rmind 
    243       1.14     rmind 	/* Check for wrap around. */
    244       1.14     rmind 	addr = (void *)(intptr_t)uio->uio_offset;
    245       1.14     rmind 	if ((uintptr_t)addr != uio->uio_offset) {
    246       1.14     rmind 		return EFAULT;
    247       1.14     rmind 	}
    248       1.14     rmind 	/*
    249       1.14     rmind 	 * Handle non-page aligned offset.
    250       1.14     rmind 	 * Otherwise, we operate in page-by-page basis.
    251       1.14     rmind 	 */
    252       1.14     rmind 	offset = uio->uio_offset & PAGE_MASK;
    253       1.14     rmind 	len = MIN(uio->uio_resid, PAGE_SIZE - offset);
    254       1.14     rmind 	prot = (uio->uio_rw == UIO_WRITE) ? VM_PROT_WRITE : VM_PROT_READ;
    255       1.14     rmind 
    256       1.14     rmind 	md_kva = false;
    257       1.14     rmind 
    258       1.14     rmind #ifdef __HAVE_MM_MD_DIRECT_MAPPED_IO
    259       1.14     rmind 	paddr_t paddr;
    260       1.14     rmind 	/* MD case: is this is a directly mapped address? */
    261       1.14     rmind 	if (mm_md_direct_mapped_io(addr, &paddr)) {
    262       1.14     rmind 		/* If so, validate physical address. */
    263       1.14     rmind 		error = mm_md_physacc(paddr, prot);
    264       1.14     rmind 		if (error) {
    265       1.14     rmind 			return error;
    266       1.14     rmind 		}
    267       1.14     rmind 		md_kva = true;
    268       1.14     rmind 	}
    269       1.14     rmind #endif
    270       1.14     rmind 	if (!md_kva) {
    271       1.14     rmind 		bool checked = false;
    272       1.14     rmind 
    273       1.14     rmind #ifdef __HAVE_MM_MD_KERNACC
    274       1.14     rmind 		/* MD check for the address. */
    275       1.14     rmind 		error = mm_md_kernacc(addr, prot, &checked);
    276       1.14     rmind 		if (error) {
    277       1.14     rmind 			return error;
    278       1.14     rmind 		}
    279       1.14     rmind #endif
    280       1.14     rmind 		/* UVM check for the address (unless MD indicated to not). */
    281       1.14     rmind 		if (!checked && !uvm_kernacc(addr, len, prot)) {
    282       1.14     rmind 			return EFAULT;
    283       1.14     rmind 		}
    284       1.14     rmind 	}
    285       1.14     rmind 	error = uiomove(addr, len, uio);
    286       1.14     rmind 	return error;
    287       1.14     rmind }
    288       1.14     rmind 
    289       1.14     rmind /*
    290       1.14     rmind  * dev_zero_readwrite: helper for DEV_ZERO (/dev/null) case of R/W.
    291       1.14     rmind  */
    292       1.14     rmind static inline int
    293       1.14     rmind dev_zero_readwrite(struct uio *uio, struct iovec *iov)
    294       1.14     rmind {
    295       1.14     rmind 	size_t len;
    296       1.14     rmind 
    297       1.14     rmind 	/* Nothing to do for the write case. */
    298       1.14     rmind 	if (uio->uio_rw == UIO_WRITE) {
    299       1.14     rmind 		uio->uio_resid = 0;
    300       1.14     rmind 		return 0;
    301       1.14     rmind 	}
    302       1.14     rmind 	/*
    303       1.14     rmind 	 * Read in page-by-page basis, caller will continue.
    304       1.14     rmind 	 * Cut appropriately for a single/last-iteration cases.
    305       1.14     rmind 	 */
    306       1.14     rmind 	len = MIN(iov->iov_len, PAGE_SIZE);
    307       1.14     rmind 	return uiomove(dev_zero_page, len, uio);
    308       1.14     rmind }
    309        1.1  christos 
    310       1.14     rmind /*
    311       1.14     rmind  * mm_readwrite: general memory R/W function.
    312       1.14     rmind  */
    313       1.14     rmind static int
    314       1.14     rmind mm_readwrite(dev_t dev, struct uio *uio, int flags)
    315        1.1  christos {
    316       1.14     rmind 	struct iovec *iov;
    317       1.14     rmind 	int error;
    318       1.14     rmind 
    319       1.14     rmind #ifdef __HAVE_MM_MD_READWRITE
    320       1.14     rmind 	/* If defined - there are extra MD cases. */
    321        1.1  christos 	switch (minor(dev)) {
    322       1.14     rmind 	case DEV_MEM:
    323       1.14     rmind 	case DEV_KMEM:
    324       1.14     rmind 	case DEV_NULL:
    325       1.14     rmind 	case DEV_ZERO:
    326       1.14     rmind #if defined(COMPAT_16) && defined(__arm)
    327       1.14     rmind 	case _DEV_ZERO_oARM:
    328       1.14     rmind #endif
    329       1.14     rmind 		break;
    330        1.6  jdolecek 	default:
    331       1.14     rmind 		return mm_md_readwrite(dev, uio);
    332       1.14     rmind 	}
    333       1.14     rmind #endif
    334       1.14     rmind 	error = 0;
    335       1.14     rmind 	while (uio->uio_resid > 0 && error == 0) {
    336       1.14     rmind 		iov = uio->uio_iov;
    337       1.14     rmind 		if (iov->iov_len == 0) {
    338       1.14     rmind 			/* Processed; next I/O vector. */
    339       1.14     rmind 			uio->uio_iov++;
    340       1.14     rmind 			uio->uio_iovcnt--;
    341       1.14     rmind 			KASSERT(uio->uio_iovcnt >= 0);
    342       1.14     rmind 			continue;
    343       1.14     rmind 		}
    344       1.14     rmind 		/* Helper functions will process in page-by-page basis. */
    345       1.14     rmind 		switch (minor(dev)) {
    346       1.14     rmind 		case DEV_MEM:
    347       1.14     rmind 			error = dev_mem_readwrite(uio, iov);
    348       1.14     rmind 			break;
    349       1.14     rmind 		case DEV_KMEM:
    350       1.14     rmind 			error = dev_kmem_readwrite(uio, iov);
    351       1.14     rmind 			break;
    352       1.14     rmind 		case DEV_NULL:
    353       1.14     rmind 			if (uio->uio_rw == UIO_WRITE) {
    354       1.14     rmind 				uio->uio_resid = 0;
    355       1.14     rmind 			}
    356       1.14     rmind 			/* Break directly out of the loop. */
    357        1.1  christos 			return 0;
    358       1.20  christos 		case DEV_FULL:
    359       1.20  christos 			if (uio->uio_rw == UIO_WRITE) {
    360       1.20  christos 				return ENOSPC;
    361       1.20  christos 			}
    362       1.14     rmind #if defined(COMPAT_16) && defined(__arm)
    363  1.22.16.1  christos 			/* FALLTHROUGH */
    364       1.14     rmind 		case _DEV_ZERO_oARM:
    365       1.14     rmind #endif
    366  1.22.16.1  christos 		/* FALLTHROUGH */
    367       1.14     rmind 		case DEV_ZERO:
    368       1.14     rmind 			error = dev_zero_readwrite(uio, iov);
    369       1.14     rmind 			break;
    370       1.14     rmind 		default:
    371       1.14     rmind 			error = ENXIO;
    372       1.14     rmind 			break;
    373        1.1  christos 		}
    374       1.14     rmind 	}
    375       1.14     rmind 	return error;
    376       1.14     rmind }
    377       1.14     rmind 
    378       1.14     rmind /*
    379       1.14     rmind  * mm_mmap: general mmap() handler.
    380       1.14     rmind  */
    381       1.14     rmind static paddr_t
    382       1.14     rmind mm_mmap(dev_t dev, off_t off, int acc)
    383       1.14     rmind {
    384       1.14     rmind 	vm_prot_t prot;
    385       1.14     rmind 
    386       1.14     rmind #ifdef __HAVE_MM_MD_MMAP
    387       1.14     rmind 	/* If defined - there are extra mmap() MD cases. */
    388       1.14     rmind 	switch (minor(dev)) {
    389       1.14     rmind 	case DEV_MEM:
    390       1.14     rmind 	case DEV_KMEM:
    391       1.14     rmind 	case DEV_NULL:
    392       1.14     rmind #if defined(COMPAT_16) && defined(__arm)
    393       1.14     rmind 	case _DEV_ZERO_oARM:
    394       1.14     rmind #endif
    395       1.14     rmind 	case DEV_ZERO:
    396       1.14     rmind 		break;
    397       1.14     rmind 	default:
    398       1.14     rmind 		return mm_md_mmap(dev, off, acc);
    399       1.14     rmind 	}
    400       1.14     rmind #endif
    401       1.14     rmind 	/*
    402       1.14     rmind 	 * /dev/null does not make sense, /dev/kmem is volatile and
    403       1.14     rmind 	 * /dev/zero is handled in mmap already.
    404       1.14     rmind 	 */
    405       1.14     rmind 	if (minor(dev) != DEV_MEM) {
    406       1.14     rmind 		return -1;
    407       1.14     rmind 	}
    408       1.14     rmind 
    409       1.14     rmind 	prot = 0;
    410       1.14     rmind 	if (acc & PROT_EXEC)
    411       1.14     rmind 		prot |= VM_PROT_EXECUTE;
    412       1.14     rmind 	if (acc & PROT_READ)
    413       1.14     rmind 		prot |= VM_PROT_READ;
    414       1.14     rmind 	if (acc & PROT_WRITE)
    415       1.14     rmind 		prot |= VM_PROT_WRITE;
    416       1.14     rmind 
    417       1.14     rmind 	/* Validate the physical address. */
    418       1.14     rmind 	if (mm_md_physacc(off, prot) != 0) {
    419       1.14     rmind 		return -1;
    420       1.14     rmind 	}
    421       1.14     rmind 	return off >> PGSHIFT;
    422       1.14     rmind }
    423       1.14     rmind 
    424       1.14     rmind static int
    425       1.14     rmind mm_ioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
    426       1.14     rmind {
    427       1.14     rmind 
    428       1.14     rmind 	switch (cmd) {
    429       1.14     rmind 	case FIONBIO:
    430       1.14     rmind 		/* We never block anyway. */
    431       1.14     rmind 		return 0;
    432       1.14     rmind 
    433       1.14     rmind 	case FIOSETOWN:
    434       1.14     rmind 	case FIOGETOWN:
    435       1.14     rmind 	case TIOCGPGRP:
    436       1.14     rmind 	case TIOCSPGRP:
    437       1.14     rmind 	case TIOCGETA:
    438       1.14     rmind 		return ENOTTY;
    439       1.14     rmind 
    440       1.14     rmind 	case FIOASYNC:
    441       1.14     rmind 		if ((*(int *)data) == 0) {
    442       1.14     rmind 			return 0;
    443       1.14     rmind 		}
    444       1.14     rmind 		/* FALLTHROUGH */
    445       1.14     rmind 	default:
    446        1.1  christos 		return EOPNOTSUPP;
    447        1.1  christos 	}
    448        1.1  christos }
    449