uvm_bio.c revision 1.81 1 1.81 riastrad /* $NetBSD: uvm_bio.c,v 1.81 2014/07/07 20:14:43 riastradh Exp $ */
2 1.2 chs
3 1.13 chs /*
4 1.2 chs * Copyright (c) 1998 Chuck Silvers.
5 1.2 chs * All rights reserved.
6 1.2 chs *
7 1.2 chs * Redistribution and use in source and binary forms, with or without
8 1.2 chs * modification, are permitted provided that the following conditions
9 1.2 chs * are met:
10 1.2 chs * 1. Redistributions of source code must retain the above copyright
11 1.2 chs * notice, this list of conditions and the following disclaimer.
12 1.2 chs * 2. Redistributions in binary form must reproduce the above copyright
13 1.2 chs * notice, this list of conditions and the following disclaimer in the
14 1.2 chs * documentation and/or other materials provided with the distribution.
15 1.2 chs * 3. The name of the author may not be used to endorse or promote products
16 1.2 chs * derived from this software without specific prior written permission.
17 1.2 chs *
18 1.2 chs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 1.2 chs * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 1.2 chs * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 1.2 chs * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 1.2 chs * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 1.2 chs * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 1.2 chs * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 1.2 chs * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 1.2 chs * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.2 chs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.2 chs * SUCH DAMAGE.
29 1.2 chs *
30 1.2 chs */
31 1.2 chs
32 1.2 chs /*
33 1.33 chs * uvm_bio.c: buffered i/o object mapping cache
34 1.2 chs */
35 1.2 chs
36 1.21 lukem #include <sys/cdefs.h>
37 1.81 riastrad __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.81 2014/07/07 20:14:43 riastradh Exp $");
38 1.21 lukem
39 1.21 lukem #include "opt_uvmhist.h"
40 1.50 yamt #include "opt_ubc.h"
41 1.2 chs
42 1.2 chs #include <sys/param.h>
43 1.2 chs #include <sys/systm.h>
44 1.65 ad #include <sys/kmem.h>
45 1.2 chs #include <sys/kernel.h>
46 1.60 ad #include <sys/proc.h>
47 1.67 pooka #include <sys/vnode.h>
48 1.2 chs
49 1.2 chs #include <uvm/uvm.h>
50 1.2 chs
51 1.2 chs /*
52 1.2 chs * global data structures
53 1.2 chs */
54 1.2 chs
55 1.2 chs /*
56 1.2 chs * local functions
57 1.2 chs */
58 1.2 chs
59 1.39 thorpej static int ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
60 1.44 drochner int, int, vm_prot_t, int);
61 1.39 thorpej static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
62 1.2 chs
63 1.2 chs /*
64 1.2 chs * local data structues
65 1.2 chs */
66 1.2 chs
67 1.18 chs #define UBC_HASH(uobj, offset) \
68 1.18 chs (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
69 1.2 chs ubc_object.hashmask)
70 1.2 chs
71 1.18 chs #define UBC_QUEUE(offset) \
72 1.18 chs (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \
73 1.18 chs (UBC_NQUEUES - 1)])
74 1.18 chs
75 1.18 chs #define UBC_UMAP_ADDR(u) \
76 1.18 chs (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
77 1.18 chs
78 1.18 chs
79 1.18 chs #define UMAP_PAGES_LOCKED 0x0001
80 1.18 chs #define UMAP_MAPPING_CACHED 0x0002
81 1.2 chs
82 1.73 rmind struct ubc_map {
83 1.2 chs struct uvm_object * uobj; /* mapped object */
84 1.2 chs voff_t offset; /* offset into uobj */
85 1.33 chs voff_t writeoff; /* write offset */
86 1.33 chs vsize_t writelen; /* write len */
87 1.18 chs int refcount; /* refcount on mapping */
88 1.18 chs int flags; /* extra state */
89 1.42 yamt int advice;
90 1.2 chs
91 1.2 chs LIST_ENTRY(ubc_map) hash; /* hash table */
92 1.2 chs TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
93 1.73 rmind LIST_ENTRY(ubc_map) list; /* per-object list */
94 1.2 chs };
95 1.2 chs
96 1.73 rmind static struct ubc_object {
97 1.2 chs struct uvm_object uobj; /* glue for uvm_map() */
98 1.2 chs char *kva; /* where ubc_object is mapped */
99 1.2 chs struct ubc_map *umap; /* array of ubc_map's */
100 1.2 chs
101 1.2 chs LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
102 1.2 chs u_long hashmask; /* mask for hashtable */
103 1.2 chs
104 1.2 chs TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
105 1.2 chs /* inactive queues for ubc_map's */
106 1.2 chs } ubc_object;
107 1.2 chs
108 1.63 yamt const struct uvm_pagerops ubc_pager = {
109 1.48 christos .pgo_fault = ubc_fault,
110 1.2 chs /* ... rest are NULL */
111 1.2 chs };
112 1.2 chs
113 1.2 chs int ubc_nwins = UBC_NWINS;
114 1.11 chs int ubc_winshift = UBC_WINSHIFT;
115 1.11 chs int ubc_winsize;
116 1.27 thorpej #if defined(PMAP_PREFER)
117 1.2 chs int ubc_nqueues;
118 1.2 chs #define UBC_NQUEUES ubc_nqueues
119 1.2 chs #else
120 1.2 chs #define UBC_NQUEUES 1
121 1.2 chs #endif
122 1.2 chs
123 1.50 yamt #if defined(UBC_STATS)
124 1.50 yamt
125 1.50 yamt #define UBC_EVCNT_DEFINE(name) \
126 1.50 yamt struct evcnt ubc_evcnt_##name = \
127 1.50 yamt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
128 1.50 yamt EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
129 1.50 yamt #define UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
130 1.50 yamt
131 1.50 yamt #else /* defined(UBC_STATS) */
132 1.50 yamt
133 1.50 yamt #define UBC_EVCNT_DEFINE(name) /* nothing */
134 1.50 yamt #define UBC_EVCNT_INCR(name) /* nothing */
135 1.50 yamt
136 1.50 yamt #endif /* defined(UBC_STATS) */
137 1.50 yamt
138 1.50 yamt UBC_EVCNT_DEFINE(wincachehit)
139 1.50 yamt UBC_EVCNT_DEFINE(wincachemiss)
140 1.57 yamt UBC_EVCNT_DEFINE(faultbusy)
141 1.50 yamt
142 1.2 chs /*
143 1.2 chs * ubc_init
144 1.2 chs *
145 1.2 chs * init pager private data structures.
146 1.2 chs */
147 1.2 chs
148 1.2 chs void
149 1.2 chs ubc_init(void)
150 1.2 chs {
151 1.2 chs struct ubc_map *umap;
152 1.2 chs vaddr_t va;
153 1.2 chs int i;
154 1.15 simonb
155 1.15 simonb /*
156 1.15 simonb * Make sure ubc_winshift is sane.
157 1.15 simonb */
158 1.15 simonb if (ubc_winshift < PAGE_SHIFT)
159 1.15 simonb ubc_winshift = PAGE_SHIFT;
160 1.2 chs
161 1.2 chs /*
162 1.2 chs * init ubc_object.
163 1.2 chs * alloc and init ubc_map's.
164 1.2 chs * init inactive queues.
165 1.2 chs * alloc and init hashtable.
166 1.2 chs * map in ubc_object.
167 1.2 chs */
168 1.2 chs
169 1.73 rmind uvm_obj_init(&ubc_object.uobj, &ubc_pager, true, UVM_OBJ_KERN);
170 1.2 chs
171 1.65 ad ubc_object.umap = kmem_zalloc(ubc_nwins * sizeof(struct ubc_map),
172 1.65 ad KM_SLEEP);
173 1.7 enami if (ubc_object.umap == NULL)
174 1.7 enami panic("ubc_init: failed to allocate ubc_map");
175 1.2 chs
176 1.18 chs if (ubc_winshift < PAGE_SHIFT) {
177 1.18 chs ubc_winshift = PAGE_SHIFT;
178 1.18 chs }
179 1.2 chs va = (vaddr_t)1L;
180 1.2 chs #ifdef PMAP_PREFER
181 1.36 atatat PMAP_PREFER(0, &va, 0, 0); /* kernel is never topdown */
182 1.11 chs ubc_nqueues = va >> ubc_winshift;
183 1.11 chs if (ubc_nqueues == 0) {
184 1.11 chs ubc_nqueues = 1;
185 1.2 chs }
186 1.2 chs #endif
187 1.11 chs ubc_winsize = 1 << ubc_winshift;
188 1.65 ad ubc_object.inactive = kmem_alloc(UBC_NQUEUES *
189 1.65 ad sizeof(struct ubc_inactive_head), KM_SLEEP);
190 1.7 enami if (ubc_object.inactive == NULL)
191 1.7 enami panic("ubc_init: failed to allocate inactive queue heads");
192 1.2 chs for (i = 0; i < UBC_NQUEUES; i++) {
193 1.2 chs TAILQ_INIT(&ubc_object.inactive[i]);
194 1.2 chs }
195 1.2 chs for (i = 0; i < ubc_nwins; i++) {
196 1.2 chs umap = &ubc_object.umap[i];
197 1.2 chs TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
198 1.2 chs umap, inactive);
199 1.2 chs }
200 1.2 chs
201 1.65 ad ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, true,
202 1.65 ad &ubc_object.hashmask);
203 1.2 chs for (i = 0; i <= ubc_object.hashmask; i++) {
204 1.2 chs LIST_INIT(&ubc_object.hash[i]);
205 1.2 chs }
206 1.2 chs
207 1.2 chs if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
208 1.11 chs ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
209 1.2 chs UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
210 1.9 chs UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
211 1.26 provos panic("ubc_init: failed to map ubc_object");
212 1.2 chs }
213 1.81 riastrad }
214 1.81 riastrad
215 1.81 riastrad void
216 1.81 riastrad ubchist_init(void)
217 1.81 riastrad {
218 1.81 riastrad
219 1.2 chs UVMHIST_INIT(ubchist, 300);
220 1.2 chs }
221 1.2 chs
222 1.2 chs /*
223 1.69 rmind * ubc_fault_page: helper of ubc_fault to handle a single page.
224 1.70 rmind *
225 1.70 rmind * => Caller has UVM object locked.
226 1.73 rmind * => Caller will perform pmap_update().
227 1.69 rmind */
228 1.69 rmind
229 1.70 rmind static inline int
230 1.69 rmind ubc_fault_page(const struct uvm_faultinfo *ufi, const struct ubc_map *umap,
231 1.69 rmind struct vm_page *pg, vm_prot_t prot, vm_prot_t access_type, vaddr_t va)
232 1.69 rmind {
233 1.69 rmind struct uvm_object *uobj;
234 1.69 rmind vm_prot_t mask;
235 1.69 rmind int error;
236 1.69 rmind bool rdonly;
237 1.69 rmind
238 1.69 rmind uobj = pg->uobject;
239 1.73 rmind KASSERT(mutex_owned(uobj->vmobjlock));
240 1.70 rmind
241 1.69 rmind if (pg->flags & PG_WANTED) {
242 1.69 rmind wakeup(pg);
243 1.69 rmind }
244 1.69 rmind KASSERT((pg->flags & PG_FAKE) == 0);
245 1.69 rmind if (pg->flags & PG_RELEASED) {
246 1.69 rmind mutex_enter(&uvm_pageqlock);
247 1.69 rmind uvm_pagefree(pg);
248 1.69 rmind mutex_exit(&uvm_pageqlock);
249 1.70 rmind return 0;
250 1.69 rmind }
251 1.69 rmind if (pg->loan_count != 0) {
252 1.69 rmind
253 1.69 rmind /*
254 1.69 rmind * Avoid unneeded loan break, if possible.
255 1.69 rmind */
256 1.69 rmind
257 1.69 rmind if ((access_type & VM_PROT_WRITE) == 0) {
258 1.69 rmind prot &= ~VM_PROT_WRITE;
259 1.69 rmind }
260 1.69 rmind if (prot & VM_PROT_WRITE) {
261 1.69 rmind struct vm_page *newpg;
262 1.69 rmind
263 1.69 rmind newpg = uvm_loanbreak(pg);
264 1.69 rmind if (newpg == NULL) {
265 1.69 rmind uvm_page_unbusy(&pg, 1);
266 1.70 rmind return ENOMEM;
267 1.69 rmind }
268 1.69 rmind pg = newpg;
269 1.69 rmind }
270 1.69 rmind }
271 1.69 rmind
272 1.69 rmind /*
273 1.69 rmind * Note that a page whose backing store is partially allocated
274 1.69 rmind * is marked as PG_RDONLY.
275 1.69 rmind */
276 1.69 rmind
277 1.69 rmind KASSERT((pg->flags & PG_RDONLY) == 0 ||
278 1.69 rmind (access_type & VM_PROT_WRITE) == 0 ||
279 1.69 rmind pg->offset < umap->writeoff ||
280 1.69 rmind pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
281 1.69 rmind
282 1.69 rmind rdonly = ((access_type & VM_PROT_WRITE) == 0 &&
283 1.69 rmind (pg->flags & PG_RDONLY) != 0) ||
284 1.69 rmind UVM_OBJ_NEEDS_WRITEFAULT(uobj);
285 1.69 rmind mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
286 1.69 rmind
287 1.69 rmind error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
288 1.69 rmind prot & mask, PMAP_CANFAIL | (access_type & mask));
289 1.69 rmind
290 1.69 rmind mutex_enter(&uvm_pageqlock);
291 1.69 rmind uvm_pageactivate(pg);
292 1.69 rmind mutex_exit(&uvm_pageqlock);
293 1.69 rmind pg->flags &= ~(PG_BUSY|PG_WANTED);
294 1.69 rmind UVM_PAGE_OWN(pg, NULL);
295 1.69 rmind
296 1.70 rmind return error;
297 1.69 rmind }
298 1.69 rmind
299 1.69 rmind /*
300 1.2 chs * ubc_fault: fault routine for ubc mapping
301 1.2 chs */
302 1.18 chs
303 1.39 thorpej static int
304 1.54 yamt ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
305 1.54 yamt int ign3, int ign4, vm_prot_t access_type, int flags)
306 1.2 chs {
307 1.2 chs struct uvm_object *uobj;
308 1.2 chs struct ubc_map *umap;
309 1.2 chs vaddr_t va, eva, ubc_offset, slot_offset;
310 1.69 rmind struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT];
311 1.18 chs int i, error, npages;
312 1.23 chs vm_prot_t prot;
313 1.69 rmind
314 1.69 rmind UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
315 1.2 chs
316 1.2 chs /*
317 1.2 chs * no need to try with PGO_LOCKED...
318 1.2 chs * we don't need to have the map locked since we know that
319 1.2 chs * no one will mess with it until our reference is released.
320 1.2 chs */
321 1.18 chs
322 1.2 chs if (flags & PGO_LOCKED) {
323 1.73 rmind uvmfault_unlockall(ufi, NULL, &ubc_object.uobj);
324 1.2 chs flags &= ~PGO_LOCKED;
325 1.2 chs }
326 1.2 chs
327 1.2 chs va = ufi->orig_rvaddr;
328 1.2 chs ubc_offset = va - (vaddr_t)ubc_object.kva;
329 1.11 chs umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
330 1.2 chs KASSERT(umap->refcount != 0);
331 1.53 yamt KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
332 1.18 chs slot_offset = ubc_offset & (ubc_winsize - 1);
333 1.2 chs
334 1.34 chs /*
335 1.34 chs * some platforms cannot write to individual bytes atomically, so
336 1.34 chs * software has to do read/modify/write of larger quantities instead.
337 1.34 chs * this means that the access_type for "write" operations
338 1.34 chs * can be VM_PROT_READ, which confuses us mightily.
339 1.37 perry *
340 1.34 chs * deal with this by resetting access_type based on the info
341 1.34 chs * that ubc_alloc() stores for us.
342 1.34 chs */
343 1.34 chs
344 1.34 chs access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
345 1.34 chs UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d",
346 1.34 chs va, ubc_offset, access_type, 0);
347 1.34 chs
348 1.33 chs #ifdef DIAGNOSTIC
349 1.33 chs if ((access_type & VM_PROT_WRITE) != 0) {
350 1.33 chs if (slot_offset < trunc_page(umap->writeoff) ||
351 1.33 chs umap->writeoff + umap->writelen <= slot_offset) {
352 1.33 chs panic("ubc_fault: out of range write");
353 1.33 chs }
354 1.33 chs }
355 1.33 chs #endif
356 1.33 chs
357 1.2 chs /* no umap locking needed since we have a ref on the umap */
358 1.2 chs uobj = umap->uobj;
359 1.2 chs
360 1.33 chs if ((access_type & VM_PROT_WRITE) == 0) {
361 1.33 chs npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
362 1.33 chs } else {
363 1.33 chs npages = (round_page(umap->offset + umap->writeoff +
364 1.33 chs umap->writelen) - (umap->offset + slot_offset))
365 1.33 chs >> PAGE_SHIFT;
366 1.33 chs flags |= PGO_PASTEOF;
367 1.33 chs }
368 1.2 chs
369 1.2 chs again:
370 1.2 chs memset(pgs, 0, sizeof (pgs));
371 1.73 rmind mutex_enter(uobj->vmobjlock);
372 1.2 chs
373 1.33 chs UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
374 1.33 chs slot_offset, umap->writeoff, umap->writelen, 0);
375 1.33 chs UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d",
376 1.18 chs uobj, umap->offset + slot_offset, npages, 0);
377 1.2 chs
378 1.33 chs error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
379 1.42 yamt &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
380 1.41 yamt PGO_NOTIMESTAMP);
381 1.24 simonb UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
382 1.24 simonb 0);
383 1.2 chs
384 1.5 chs if (error == EAGAIN) {
385 1.70 rmind kpause("ubc_fault", false, hz >> 2, NULL);
386 1.2 chs goto again;
387 1.2 chs }
388 1.5 chs if (error) {
389 1.10 chs return error;
390 1.5 chs }
391 1.2 chs
392 1.69 rmind /*
393 1.69 rmind * For virtually-indexed, virtually-tagged caches we should avoid
394 1.69 rmind * creating writable mappings when we do not absolutely need them,
395 1.69 rmind * since the "compatible alias" trick does not work on such caches.
396 1.69 rmind * Otherwise, we can always map the pages writable.
397 1.69 rmind */
398 1.69 rmind
399 1.69 rmind #ifdef PMAP_CACHE_VIVT
400 1.69 rmind prot = VM_PROT_READ | access_type;
401 1.69 rmind #else
402 1.69 rmind prot = VM_PROT_READ | VM_PROT_WRITE;
403 1.69 rmind #endif
404 1.70 rmind
405 1.2 chs va = ufi->orig_rvaddr;
406 1.2 chs eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
407 1.2 chs
408 1.28 yamt UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
409 1.73 rmind
410 1.73 rmind /*
411 1.73 rmind * Note: normally all returned pages would have the same UVM object.
412 1.73 rmind * However, layered file-systems and e.g. tmpfs, may return pages
413 1.73 rmind * which belong to underlying UVM object. In such case, lock is
414 1.73 rmind * shared amongst the objects.
415 1.73 rmind */
416 1.73 rmind mutex_enter(uobj->vmobjlock);
417 1.28 yamt for (i = 0; va < eva; i++, va += PAGE_SIZE) {
418 1.69 rmind struct vm_page *pg;
419 1.34 chs
420 1.24 simonb UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
421 1.2 chs pg = pgs[i];
422 1.2 chs
423 1.2 chs if (pg == NULL || pg == PGO_DONTCARE) {
424 1.2 chs continue;
425 1.2 chs }
426 1.73 rmind KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
427 1.70 rmind error = ubc_fault_page(ufi, umap, pg, prot, access_type, va);
428 1.70 rmind if (error) {
429 1.70 rmind /*
430 1.70 rmind * Flush (there might be pages entered), drop the lock,
431 1.73 rmind * and perform uvm_wait(). Note: page will re-fault.
432 1.70 rmind */
433 1.70 rmind pmap_update(ufi->orig_map->pmap);
434 1.73 rmind mutex_exit(uobj->vmobjlock);
435 1.70 rmind uvm_wait("ubc_fault");
436 1.73 rmind mutex_enter(uobj->vmobjlock);
437 1.70 rmind }
438 1.70 rmind }
439 1.73 rmind /* Must make VA visible before the unlock. */
440 1.73 rmind pmap_update(ufi->orig_map->pmap);
441 1.73 rmind mutex_exit(uobj->vmobjlock);
442 1.73 rmind
443 1.8 chs return 0;
444 1.2 chs }
445 1.2 chs
446 1.2 chs /*
447 1.2 chs * local functions
448 1.2 chs */
449 1.2 chs
450 1.39 thorpej static struct ubc_map *
451 1.39 thorpej ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
452 1.2 chs {
453 1.2 chs struct ubc_map *umap;
454 1.2 chs
455 1.2 chs LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
456 1.2 chs if (umap->uobj == uobj && umap->offset == offset) {
457 1.2 chs return umap;
458 1.2 chs }
459 1.2 chs }
460 1.2 chs return NULL;
461 1.2 chs }
462 1.2 chs
463 1.2 chs
464 1.2 chs /*
465 1.2 chs * ubc interface functions
466 1.2 chs */
467 1.2 chs
468 1.2 chs /*
469 1.18 chs * ubc_alloc: allocate a file mapping window
470 1.2 chs */
471 1.18 chs
472 1.2 chs void *
473 1.42 yamt ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
474 1.42 yamt int flags)
475 1.2 chs {
476 1.6 chs vaddr_t slot_offset, va;
477 1.2 chs struct ubc_map *umap;
478 1.6 chs voff_t umap_offset;
479 1.18 chs int error;
480 1.2 chs UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
481 1.2 chs
482 1.33 chs UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx",
483 1.33 chs uobj, offset, *lenp, 0);
484 1.2 chs
485 1.34 chs KASSERT(*lenp > 0);
486 1.6 chs umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
487 1.4 enami slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
488 1.18 chs *lenp = MIN(*lenp, ubc_winsize - slot_offset);
489 1.2 chs
490 1.76 rmind mutex_enter(ubc_object.uobj.vmobjlock);
491 1.76 rmind again:
492 1.2 chs /*
493 1.76 rmind * The UVM object is already referenced.
494 1.73 rmind * Lock order: UBC object -> ubc_map::uobj.
495 1.2 chs */
496 1.2 chs umap = ubc_find_mapping(uobj, umap_offset);
497 1.2 chs if (umap == NULL) {
498 1.73 rmind struct uvm_object *oobj;
499 1.73 rmind
500 1.50 yamt UBC_EVCNT_INCR(wincachemiss);
501 1.2 chs umap = TAILQ_FIRST(UBC_QUEUE(offset));
502 1.2 chs if (umap == NULL) {
503 1.73 rmind kpause("ubc_alloc", false, hz >> 2,
504 1.73 rmind ubc_object.uobj.vmobjlock);
505 1.2 chs goto again;
506 1.2 chs }
507 1.2 chs
508 1.73 rmind va = UBC_UMAP_ADDR(umap);
509 1.73 rmind oobj = umap->uobj;
510 1.73 rmind
511 1.2 chs /*
512 1.76 rmind * Remove from old hash (if any), add to new hash.
513 1.2 chs */
514 1.2 chs
515 1.73 rmind if (oobj != NULL) {
516 1.76 rmind /*
517 1.76 rmind * Mapping must be removed before the list entry,
518 1.76 rmind * since there is a race with ubc_purge().
519 1.76 rmind */
520 1.73 rmind if (umap->flags & UMAP_MAPPING_CACHED) {
521 1.73 rmind umap->flags &= ~UMAP_MAPPING_CACHED;
522 1.73 rmind mutex_enter(oobj->vmobjlock);
523 1.73 rmind pmap_remove(pmap_kernel(), va,
524 1.73 rmind va + ubc_winsize);
525 1.73 rmind pmap_update(pmap_kernel());
526 1.73 rmind mutex_exit(oobj->vmobjlock);
527 1.73 rmind }
528 1.75 hannken LIST_REMOVE(umap, hash);
529 1.75 hannken LIST_REMOVE(umap, list);
530 1.73 rmind } else {
531 1.73 rmind KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
532 1.2 chs }
533 1.2 chs umap->uobj = uobj;
534 1.2 chs umap->offset = umap_offset;
535 1.2 chs LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
536 1.18 chs umap, hash);
537 1.73 rmind LIST_INSERT_HEAD(&uobj->uo_ubc, umap, list);
538 1.18 chs } else {
539 1.50 yamt UBC_EVCNT_INCR(wincachehit);
540 1.18 chs va = UBC_UMAP_ADDR(umap);
541 1.2 chs }
542 1.2 chs
543 1.2 chs if (umap->refcount == 0) {
544 1.2 chs TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
545 1.2 chs }
546 1.2 chs
547 1.2 chs if (flags & UBC_WRITE) {
548 1.73 rmind KASSERTMSG(umap->writeoff == 0 && umap->writelen == 0,
549 1.79 jym "ubc_alloc: concurrent writes to uobj %p", uobj);
550 1.2 chs umap->writeoff = slot_offset;
551 1.2 chs umap->writelen = *lenp;
552 1.2 chs }
553 1.2 chs
554 1.2 chs umap->refcount++;
555 1.42 yamt umap->advice = advice;
556 1.73 rmind mutex_exit(ubc_object.uobj.vmobjlock);
557 1.18 chs UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
558 1.18 chs umap, umap->refcount, va, flags);
559 1.18 chs
560 1.18 chs if (flags & UBC_FAULTBUSY) {
561 1.18 chs int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
562 1.18 chs struct vm_page *pgs[npages];
563 1.40 yamt int gpflags =
564 1.41 yamt PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
565 1.41 yamt PGO_NOTIMESTAMP;
566 1.18 chs int i;
567 1.30 dbj KDASSERT(flags & UBC_WRITE);
568 1.57 yamt KASSERT(umap->refcount == 1);
569 1.2 chs
570 1.57 yamt UBC_EVCNT_INCR(faultbusy);
571 1.73 rmind again_faultbusy:
572 1.73 rmind mutex_enter(uobj->vmobjlock);
573 1.18 chs if (umap->flags & UMAP_MAPPING_CACHED) {
574 1.18 chs umap->flags &= ~UMAP_MAPPING_CACHED;
575 1.18 chs pmap_remove(pmap_kernel(), va, va + ubc_winsize);
576 1.18 chs }
577 1.22 enami memset(pgs, 0, sizeof(pgs));
578 1.73 rmind
579 1.33 chs error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
580 1.42 yamt &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
581 1.24 simonb UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
582 1.18 chs if (error) {
583 1.18 chs goto out;
584 1.18 chs }
585 1.18 chs for (i = 0; i < npages; i++) {
586 1.59 yamt struct vm_page *pg = pgs[i];
587 1.59 yamt
588 1.59 yamt KASSERT(pg->uobject == uobj);
589 1.59 yamt if (pg->loan_count != 0) {
590 1.73 rmind mutex_enter(uobj->vmobjlock);
591 1.59 yamt if (pg->loan_count != 0) {
592 1.59 yamt pg = uvm_loanbreak(pg);
593 1.59 yamt }
594 1.59 yamt if (pg == NULL) {
595 1.59 yamt pmap_kremove(va, ubc_winsize);
596 1.59 yamt pmap_update(pmap_kernel());
597 1.59 yamt uvm_page_unbusy(pgs, npages);
598 1.73 rmind mutex_exit(uobj->vmobjlock);
599 1.59 yamt uvm_wait("ubc_alloc");
600 1.59 yamt goto again_faultbusy;
601 1.59 yamt }
602 1.73 rmind mutex_exit(uobj->vmobjlock);
603 1.59 yamt pgs[i] = pg;
604 1.59 yamt }
605 1.18 chs pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
606 1.68 cegger VM_PAGE_TO_PHYS(pg),
607 1.68 cegger VM_PROT_READ | VM_PROT_WRITE, 0);
608 1.18 chs }
609 1.18 chs pmap_update(pmap_kernel());
610 1.18 chs umap->flags |= UMAP_PAGES_LOCKED;
611 1.57 yamt } else {
612 1.57 yamt KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
613 1.18 chs }
614 1.18 chs
615 1.18 chs out:
616 1.18 chs return (void *)(va + slot_offset);
617 1.2 chs }
618 1.2 chs
619 1.18 chs /*
620 1.18 chs * ubc_release: free a file mapping window.
621 1.18 chs */
622 1.2 chs
623 1.2 chs void
624 1.39 thorpej ubc_release(void *va, int flags)
625 1.2 chs {
626 1.2 chs struct ubc_map *umap;
627 1.2 chs struct uvm_object *uobj;
628 1.18 chs vaddr_t umapva;
629 1.55 thorpej bool unmapped;
630 1.2 chs UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
631 1.2 chs
632 1.24 simonb UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
633 1.11 chs umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
634 1.18 chs umapva = UBC_UMAP_ADDR(umap);
635 1.2 chs uobj = umap->uobj;
636 1.2 chs KASSERT(uobj != NULL);
637 1.2 chs
638 1.18 chs if (umap->flags & UMAP_PAGES_LOCKED) {
639 1.72 rmind const voff_t slot_offset = umap->writeoff;
640 1.72 rmind const voff_t endoff = umap->writeoff + umap->writelen;
641 1.72 rmind const voff_t zerolen = round_page(endoff) - endoff;
642 1.72 rmind const u_int npages = (round_page(endoff) -
643 1.72 rmind trunc_page(slot_offset)) >> PAGE_SHIFT;
644 1.18 chs struct vm_page *pgs[npages];
645 1.18 chs
646 1.57 yamt KASSERT((umap->flags & UMAP_MAPPING_CACHED) == 0);
647 1.18 chs if (zerolen) {
648 1.18 chs memset((char *)umapva + endoff, 0, zerolen);
649 1.18 chs }
650 1.18 chs umap->flags &= ~UMAP_PAGES_LOCKED;
651 1.73 rmind mutex_enter(uobj->vmobjlock);
652 1.64 ad mutex_enter(&uvm_pageqlock);
653 1.72 rmind for (u_int i = 0; i < npages; i++) {
654 1.72 rmind paddr_t pa;
655 1.80 martin bool rv __diagused;
656 1.72 rmind
657 1.18 chs rv = pmap_extract(pmap_kernel(),
658 1.18 chs umapva + slot_offset + (i << PAGE_SHIFT), &pa);
659 1.18 chs KASSERT(rv);
660 1.18 chs pgs[i] = PHYS_TO_VM_PAGE(pa);
661 1.18 chs pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
662 1.28 yamt KASSERT(pgs[i]->loan_count == 0);
663 1.18 chs uvm_pageactivate(pgs[i]);
664 1.18 chs }
665 1.64 ad mutex_exit(&uvm_pageqlock);
666 1.18 chs pmap_kremove(umapva, ubc_winsize);
667 1.18 chs pmap_update(pmap_kernel());
668 1.18 chs uvm_page_unbusy(pgs, npages);
669 1.73 rmind mutex_exit(uobj->vmobjlock);
670 1.56 thorpej unmapped = true;
671 1.18 chs } else {
672 1.56 thorpej unmapped = false;
673 1.18 chs }
674 1.18 chs
675 1.73 rmind mutex_enter(ubc_object.uobj.vmobjlock);
676 1.2 chs umap->writeoff = 0;
677 1.2 chs umap->writelen = 0;
678 1.2 chs umap->refcount--;
679 1.2 chs if (umap->refcount == 0) {
680 1.33 chs if (flags & UBC_UNMAP) {
681 1.2 chs /*
682 1.33 chs * Invalidate any cached mappings if requested.
683 1.33 chs * This is typically used to avoid leaving
684 1.33 chs * incompatible cache aliases around indefinitely.
685 1.2 chs */
686 1.73 rmind mutex_enter(uobj->vmobjlock);
687 1.18 chs pmap_remove(pmap_kernel(), umapva,
688 1.18 chs umapva + ubc_winsize);
689 1.73 rmind pmap_update(pmap_kernel());
690 1.73 rmind mutex_exit(uobj->vmobjlock);
691 1.73 rmind
692 1.18 chs umap->flags &= ~UMAP_MAPPING_CACHED;
693 1.2 chs LIST_REMOVE(umap, hash);
694 1.77 rmind LIST_REMOVE(umap, list);
695 1.2 chs umap->uobj = NULL;
696 1.2 chs TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
697 1.2 chs inactive);
698 1.2 chs } else {
699 1.18 chs if (!unmapped) {
700 1.18 chs umap->flags |= UMAP_MAPPING_CACHED;
701 1.18 chs }
702 1.2 chs TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
703 1.2 chs inactive);
704 1.2 chs }
705 1.2 chs }
706 1.24 simonb UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
707 1.73 rmind mutex_exit(ubc_object.uobj.vmobjlock);
708 1.2 chs }
709 1.2 chs
710 1.58 yamt /*
711 1.58 yamt * ubc_uiomove: move data to/from an object.
712 1.58 yamt */
713 1.58 yamt
714 1.58 yamt int
715 1.62 yamt ubc_uiomove(struct uvm_object *uobj, struct uio *uio, vsize_t todo, int advice,
716 1.62 yamt int flags)
717 1.58 yamt {
718 1.72 rmind const bool overwrite = (flags & UBC_FAULTBUSY) != 0;
719 1.58 yamt voff_t off;
720 1.58 yamt int error;
721 1.58 yamt
722 1.58 yamt KASSERT(todo <= uio->uio_resid);
723 1.58 yamt KASSERT(((flags & UBC_WRITE) != 0 && uio->uio_rw == UIO_WRITE) ||
724 1.58 yamt ((flags & UBC_READ) != 0 && uio->uio_rw == UIO_READ));
725 1.58 yamt
726 1.58 yamt off = uio->uio_offset;
727 1.58 yamt error = 0;
728 1.58 yamt while (todo > 0) {
729 1.58 yamt vsize_t bytelen = todo;
730 1.58 yamt void *win;
731 1.58 yamt
732 1.62 yamt win = ubc_alloc(uobj, off, &bytelen, advice, flags);
733 1.58 yamt if (error == 0) {
734 1.58 yamt error = uiomove(win, bytelen, uio);
735 1.58 yamt }
736 1.58 yamt if (error != 0 && overwrite) {
737 1.58 yamt /*
738 1.58 yamt * if we haven't initialized the pages yet,
739 1.58 yamt * do it now. it's safe to use memset here
740 1.58 yamt * because we just mapped the pages above.
741 1.58 yamt */
742 1.64 ad printf("%s: error=%d\n", __func__, error);
743 1.58 yamt memset(win, 0, bytelen);
744 1.58 yamt }
745 1.58 yamt ubc_release(win, flags);
746 1.58 yamt off += bytelen;
747 1.58 yamt todo -= bytelen;
748 1.58 yamt if (error != 0 && (flags & UBC_PARTIALOK) != 0) {
749 1.58 yamt break;
750 1.58 yamt }
751 1.58 yamt }
752 1.58 yamt
753 1.58 yamt return error;
754 1.58 yamt }
755 1.67 pooka
756 1.67 pooka /*
757 1.74 hannken * ubc_zerorange: set a range of bytes in an object to zero.
758 1.67 pooka */
759 1.67 pooka
760 1.67 pooka void
761 1.74 hannken ubc_zerorange(struct uvm_object *uobj, off_t off, size_t len, int flags)
762 1.67 pooka {
763 1.67 pooka void *win;
764 1.67 pooka
765 1.67 pooka /*
766 1.67 pooka * XXXUBC invent kzero() and use it
767 1.67 pooka */
768 1.67 pooka
769 1.67 pooka while (len) {
770 1.67 pooka vsize_t bytelen = len;
771 1.67 pooka
772 1.74 hannken win = ubc_alloc(uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE);
773 1.67 pooka memset(win, 0, bytelen);
774 1.67 pooka ubc_release(win, flags);
775 1.67 pooka
776 1.67 pooka off += bytelen;
777 1.67 pooka len -= bytelen;
778 1.67 pooka }
779 1.67 pooka }
780 1.73 rmind
781 1.73 rmind /*
782 1.73 rmind * ubc_purge: disassociate ubc_map structures from an empty uvm_object.
783 1.73 rmind */
784 1.73 rmind
785 1.73 rmind void
786 1.73 rmind ubc_purge(struct uvm_object *uobj)
787 1.73 rmind {
788 1.73 rmind struct ubc_map *umap;
789 1.73 rmind vaddr_t va;
790 1.73 rmind
791 1.73 rmind KASSERT(uobj->uo_npages == 0);
792 1.73 rmind
793 1.76 rmind /*
794 1.76 rmind * Safe to check without lock held, as ubc_alloc() removes
795 1.76 rmind * the mapping and list entry in the correct order.
796 1.76 rmind */
797 1.76 rmind if (__predict_true(LIST_EMPTY(&uobj->uo_ubc))) {
798 1.76 rmind return;
799 1.76 rmind }
800 1.73 rmind mutex_enter(ubc_object.uobj.vmobjlock);
801 1.73 rmind while ((umap = LIST_FIRST(&uobj->uo_ubc)) != NULL) {
802 1.73 rmind KASSERT(umap->refcount == 0);
803 1.73 rmind for (va = 0; va < ubc_winsize; va += PAGE_SIZE) {
804 1.73 rmind KASSERT(!pmap_extract(pmap_kernel(),
805 1.73 rmind va + UBC_UMAP_ADDR(umap), NULL));
806 1.73 rmind }
807 1.73 rmind LIST_REMOVE(umap, list);
808 1.73 rmind LIST_REMOVE(umap, hash);
809 1.73 rmind umap->flags &= ~UMAP_MAPPING_CACHED;
810 1.73 rmind umap->uobj = NULL;
811 1.73 rmind }
812 1.73 rmind mutex_exit(ubc_object.uobj.vmobjlock);
813 1.73 rmind }
814