uvm_io.c revision 1.17 1 1.17 lukem /* $NetBSD: uvm_io.c,v 1.17 2001/11/10 07:37:00 lukem Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg *
5 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 1.1 mrg * All rights reserved.
7 1.1 mrg *
8 1.1 mrg * Redistribution and use in source and binary forms, with or without
9 1.1 mrg * modification, are permitted provided that the following conditions
10 1.1 mrg * are met:
11 1.1 mrg * 1. Redistributions of source code must retain the above copyright
12 1.1 mrg * notice, this list of conditions and the following disclaimer.
13 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 mrg * notice, this list of conditions and the following disclaimer in the
15 1.1 mrg * documentation and/or other materials provided with the distribution.
16 1.1 mrg * 3. All advertising materials mentioning features or use of this software
17 1.1 mrg * must display the following acknowledgement:
18 1.1 mrg * This product includes software developed by Charles D. Cranor and
19 1.1 mrg * Washington University.
20 1.1 mrg * 4. The name of the author may not be used to endorse or promote products
21 1.1 mrg * derived from this software without specific prior written permission.
22 1.1 mrg *
23 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 1.1 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 1.1 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 1.1 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 1.1 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 1.1 mrg * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 1.1 mrg * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 1.1 mrg * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 1.1 mrg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 1.1 mrg * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 1.3 mrg *
34 1.3 mrg * from: Id: uvm_io.c,v 1.1.2.2 1997/12/30 12:02:00 mrg Exp
35 1.1 mrg */
36 1.1 mrg
37 1.1 mrg /*
38 1.1 mrg * uvm_io.c: uvm i/o ops
39 1.1 mrg */
40 1.17 lukem
41 1.17 lukem #include <sys/cdefs.h>
42 1.17 lukem __KERNEL_RCSID(0, "$NetBSD: uvm_io.c,v 1.17 2001/11/10 07:37:00 lukem Exp $");
43 1.1 mrg
44 1.1 mrg #include <sys/param.h>
45 1.1 mrg #include <sys/systm.h>
46 1.1 mrg #include <sys/mman.h>
47 1.1 mrg #include <sys/proc.h>
48 1.1 mrg #include <sys/malloc.h>
49 1.1 mrg #include <sys/uio.h>
50 1.1 mrg
51 1.1 mrg #include <uvm/uvm.h>
52 1.1 mrg
53 1.1 mrg /*
54 1.1 mrg * functions
55 1.1 mrg */
56 1.1 mrg
57 1.1 mrg /*
58 1.1 mrg * uvm_io: perform I/O on a map
59 1.1 mrg *
60 1.1 mrg * => caller must have a reference to "map" so that it doesn't go away
61 1.1 mrg * while we are working.
62 1.1 mrg */
63 1.1 mrg
64 1.4 mrg int
65 1.4 mrg uvm_io(map, uio)
66 1.15 chs struct vm_map *map;
67 1.4 mrg struct uio *uio;
68 1.1 mrg {
69 1.6 eeh vaddr_t baseva, endva, pageoffset, kva;
70 1.6 eeh vsize_t chunksz, togo, sz;
71 1.15 chs struct vm_map_entry *dead_entries;
72 1.4 mrg int error;
73 1.4 mrg
74 1.4 mrg /*
75 1.4 mrg * step 0: sanity checks and set up for copy loop. start with a
76 1.4 mrg * large chunk size. if we have trouble finding vm space we will
77 1.4 mrg * reduce it.
78 1.4 mrg */
79 1.4 mrg
80 1.4 mrg if (uio->uio_resid == 0)
81 1.4 mrg return(0);
82 1.4 mrg togo = uio->uio_resid;
83 1.4 mrg
84 1.6 eeh baseva = (vaddr_t) uio->uio_offset;
85 1.4 mrg endva = baseva + (togo - 1);
86 1.4 mrg
87 1.4 mrg if (endva < baseva) /* wrap around? */
88 1.4 mrg return(EIO);
89 1.4 mrg
90 1.4 mrg if (baseva >= VM_MAXUSER_ADDRESS)
91 1.4 mrg return(0);
92 1.4 mrg if (endva >= VM_MAXUSER_ADDRESS)
93 1.4 mrg /* EOF truncate */
94 1.4 mrg togo = togo - (endva - VM_MAXUSER_ADDRESS + 1);
95 1.4 mrg pageoffset = baseva & PAGE_MASK;
96 1.4 mrg baseva = trunc_page(baseva);
97 1.16 chs chunksz = MIN(round_page(togo + pageoffset), MAXBSIZE);
98 1.4 mrg error = 0;
99 1.4 mrg
100 1.4 mrg /*
101 1.4 mrg * step 1: main loop... while we've got data to move
102 1.4 mrg */
103 1.4 mrg
104 1.4 mrg for (/*null*/; togo > 0 ; pageoffset = 0) {
105 1.4 mrg
106 1.4 mrg /*
107 1.4 mrg * step 2: extract mappings from the map into kernel_map
108 1.4 mrg */
109 1.1 mrg
110 1.4 mrg error = uvm_map_extract(map, baseva, chunksz, kernel_map, &kva,
111 1.14 chs UVM_EXTRACT_QREF | UVM_EXTRACT_CONTIG |
112 1.1 mrg UVM_EXTRACT_FIXPROT);
113 1.4 mrg if (error) {
114 1.1 mrg
115 1.4 mrg /* retry with a smaller chunk... */
116 1.4 mrg if (error == ENOMEM && chunksz > PAGE_SIZE) {
117 1.4 mrg chunksz = trunc_page(chunksz / 2);
118 1.4 mrg if (chunksz < PAGE_SIZE)
119 1.4 mrg chunksz = PAGE_SIZE;
120 1.4 mrg continue;
121 1.4 mrg }
122 1.4 mrg
123 1.4 mrg break;
124 1.4 mrg }
125 1.4 mrg
126 1.4 mrg /*
127 1.4 mrg * step 3: move a chunk of data
128 1.4 mrg */
129 1.4 mrg
130 1.4 mrg sz = chunksz - pageoffset;
131 1.4 mrg if (sz > togo)
132 1.4 mrg sz = togo;
133 1.4 mrg error = uiomove((caddr_t) (kva + pageoffset), sz, uio);
134 1.4 mrg if (error)
135 1.4 mrg break;
136 1.4 mrg togo -= sz;
137 1.4 mrg baseva += chunksz;
138 1.4 mrg
139 1.4 mrg
140 1.4 mrg /*
141 1.4 mrg * step 4: unmap the area of kernel memory
142 1.4 mrg */
143 1.4 mrg
144 1.4 mrg vm_map_lock(kernel_map);
145 1.13 chs uvm_unmap_remove(kernel_map, kva, kva + chunksz, &dead_entries);
146 1.4 mrg vm_map_unlock(kernel_map);
147 1.4 mrg if (dead_entries != NULL)
148 1.4 mrg uvm_unmap_detach(dead_entries, AMAP_REFALL);
149 1.4 mrg }
150 1.4 mrg return (error);
151 1.1 mrg }
152