uvm_bio.c revision 1.55 1 1.55 thorpej /* $NetBSD: uvm_bio.c,v 1.55 2007/02/21 23:00:12 thorpej Exp $ */
2 1.2 chs
3 1.13 chs /*
4 1.2 chs * Copyright (c) 1998 Chuck Silvers.
5 1.2 chs * All rights reserved.
6 1.2 chs *
7 1.2 chs * Redistribution and use in source and binary forms, with or without
8 1.2 chs * modification, are permitted provided that the following conditions
9 1.2 chs * are met:
10 1.2 chs * 1. Redistributions of source code must retain the above copyright
11 1.2 chs * notice, this list of conditions and the following disclaimer.
12 1.2 chs * 2. Redistributions in binary form must reproduce the above copyright
13 1.2 chs * notice, this list of conditions and the following disclaimer in the
14 1.2 chs * documentation and/or other materials provided with the distribution.
15 1.2 chs * 3. The name of the author may not be used to endorse or promote products
16 1.2 chs * derived from this software without specific prior written permission.
17 1.2 chs *
18 1.2 chs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 1.2 chs * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 1.2 chs * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 1.2 chs * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 1.2 chs * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 1.2 chs * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 1.2 chs * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 1.2 chs * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 1.2 chs * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.2 chs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.2 chs * SUCH DAMAGE.
29 1.2 chs *
30 1.2 chs */
31 1.2 chs
32 1.2 chs /*
33 1.33 chs * uvm_bio.c: buffered i/o object mapping cache
34 1.2 chs */
35 1.2 chs
36 1.21 lukem #include <sys/cdefs.h>
37 1.55 thorpej __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.55 2007/02/21 23:00:12 thorpej Exp $");
38 1.21 lukem
39 1.21 lukem #include "opt_uvmhist.h"
40 1.50 yamt #include "opt_ubc.h"
41 1.2 chs
42 1.2 chs #include <sys/param.h>
43 1.2 chs #include <sys/systm.h>
44 1.2 chs #include <sys/malloc.h>
45 1.2 chs #include <sys/kernel.h>
46 1.2 chs
47 1.2 chs #include <uvm/uvm.h>
48 1.2 chs
49 1.2 chs /*
50 1.2 chs * global data structures
51 1.2 chs */
52 1.2 chs
53 1.2 chs /*
54 1.2 chs * local functions
55 1.2 chs */
56 1.2 chs
57 1.39 thorpej static int ubc_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
58 1.44 drochner int, int, vm_prot_t, int);
59 1.39 thorpej static struct ubc_map *ubc_find_mapping(struct uvm_object *, voff_t);
60 1.2 chs
61 1.2 chs /*
62 1.2 chs * local data structues
63 1.2 chs */
64 1.2 chs
65 1.18 chs #define UBC_HASH(uobj, offset) \
66 1.18 chs (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
67 1.2 chs ubc_object.hashmask)
68 1.2 chs
69 1.18 chs #define UBC_QUEUE(offset) \
70 1.18 chs (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \
71 1.18 chs (UBC_NQUEUES - 1)])
72 1.18 chs
73 1.18 chs #define UBC_UMAP_ADDR(u) \
74 1.18 chs (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
75 1.18 chs
76 1.18 chs
77 1.18 chs #define UMAP_PAGES_LOCKED 0x0001
78 1.18 chs #define UMAP_MAPPING_CACHED 0x0002
79 1.2 chs
80 1.2 chs struct ubc_map
81 1.2 chs {
82 1.2 chs struct uvm_object * uobj; /* mapped object */
83 1.2 chs voff_t offset; /* offset into uobj */
84 1.33 chs voff_t writeoff; /* write offset */
85 1.33 chs vsize_t writelen; /* write len */
86 1.18 chs int refcount; /* refcount on mapping */
87 1.18 chs int flags; /* extra state */
88 1.42 yamt int advice;
89 1.2 chs
90 1.2 chs LIST_ENTRY(ubc_map) hash; /* hash table */
91 1.2 chs TAILQ_ENTRY(ubc_map) inactive; /* inactive queue */
92 1.2 chs };
93 1.2 chs
94 1.2 chs static struct ubc_object
95 1.2 chs {
96 1.2 chs struct uvm_object uobj; /* glue for uvm_map() */
97 1.2 chs char *kva; /* where ubc_object is mapped */
98 1.2 chs struct ubc_map *umap; /* array of ubc_map's */
99 1.2 chs
100 1.2 chs LIST_HEAD(, ubc_map) *hash; /* hashtable for cached ubc_map's */
101 1.2 chs u_long hashmask; /* mask for hashtable */
102 1.2 chs
103 1.2 chs TAILQ_HEAD(ubc_inactive_head, ubc_map) *inactive;
104 1.2 chs /* inactive queues for ubc_map's */
105 1.2 chs
106 1.2 chs } ubc_object;
107 1.2 chs
108 1.2 chs struct uvm_pagerops ubc_pager =
109 1.2 chs {
110 1.48 christos .pgo_fault = ubc_fault,
111 1.2 chs /* ... rest are NULL */
112 1.2 chs };
113 1.2 chs
114 1.2 chs int ubc_nwins = UBC_NWINS;
115 1.11 chs int ubc_winshift = UBC_WINSHIFT;
116 1.11 chs int ubc_winsize;
117 1.27 thorpej #if defined(PMAP_PREFER)
118 1.2 chs int ubc_nqueues;
119 1.2 chs #define UBC_NQUEUES ubc_nqueues
120 1.2 chs #else
121 1.2 chs #define UBC_NQUEUES 1
122 1.2 chs #endif
123 1.2 chs
124 1.50 yamt #if defined(UBC_STATS)
125 1.50 yamt
126 1.50 yamt #define UBC_EVCNT_DEFINE(name) \
127 1.50 yamt struct evcnt ubc_evcnt_##name = \
128 1.50 yamt EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
129 1.50 yamt EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
130 1.50 yamt #define UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
131 1.50 yamt
132 1.50 yamt #else /* defined(UBC_STATS) */
133 1.50 yamt
134 1.50 yamt #define UBC_EVCNT_DEFINE(name) /* nothing */
135 1.50 yamt #define UBC_EVCNT_INCR(name) /* nothing */
136 1.50 yamt
137 1.50 yamt #endif /* defined(UBC_STATS) */
138 1.50 yamt
139 1.50 yamt UBC_EVCNT_DEFINE(wincachehit)
140 1.50 yamt UBC_EVCNT_DEFINE(wincachemiss)
141 1.50 yamt
142 1.2 chs /*
143 1.2 chs * ubc_init
144 1.2 chs *
145 1.2 chs * init pager private data structures.
146 1.2 chs */
147 1.2 chs
148 1.2 chs void
149 1.2 chs ubc_init(void)
150 1.2 chs {
151 1.2 chs struct ubc_map *umap;
152 1.2 chs vaddr_t va;
153 1.2 chs int i;
154 1.15 simonb
155 1.15 simonb /*
156 1.15 simonb * Make sure ubc_winshift is sane.
157 1.15 simonb */
158 1.15 simonb if (ubc_winshift < PAGE_SHIFT)
159 1.15 simonb ubc_winshift = PAGE_SHIFT;
160 1.2 chs
161 1.2 chs /*
162 1.2 chs * init ubc_object.
163 1.2 chs * alloc and init ubc_map's.
164 1.2 chs * init inactive queues.
165 1.2 chs * alloc and init hashtable.
166 1.2 chs * map in ubc_object.
167 1.2 chs */
168 1.2 chs
169 1.38 yamt UVM_OBJ_INIT(&ubc_object.uobj, &ubc_pager, UVM_OBJ_KERN);
170 1.2 chs
171 1.2 chs ubc_object.umap = malloc(ubc_nwins * sizeof(struct ubc_map),
172 1.2 chs M_TEMP, M_NOWAIT);
173 1.7 enami if (ubc_object.umap == NULL)
174 1.7 enami panic("ubc_init: failed to allocate ubc_map");
175 1.16 thorpej memset(ubc_object.umap, 0, ubc_nwins * sizeof(struct ubc_map));
176 1.2 chs
177 1.18 chs if (ubc_winshift < PAGE_SHIFT) {
178 1.18 chs ubc_winshift = PAGE_SHIFT;
179 1.18 chs }
180 1.2 chs va = (vaddr_t)1L;
181 1.2 chs #ifdef PMAP_PREFER
182 1.36 atatat PMAP_PREFER(0, &va, 0, 0); /* kernel is never topdown */
183 1.11 chs ubc_nqueues = va >> ubc_winshift;
184 1.11 chs if (ubc_nqueues == 0) {
185 1.11 chs ubc_nqueues = 1;
186 1.2 chs }
187 1.2 chs #endif
188 1.11 chs ubc_winsize = 1 << ubc_winshift;
189 1.2 chs ubc_object.inactive = malloc(UBC_NQUEUES *
190 1.18 chs sizeof(struct ubc_inactive_head), M_TEMP, M_NOWAIT);
191 1.7 enami if (ubc_object.inactive == NULL)
192 1.7 enami panic("ubc_init: failed to allocate inactive queue heads");
193 1.2 chs for (i = 0; i < UBC_NQUEUES; i++) {
194 1.2 chs TAILQ_INIT(&ubc_object.inactive[i]);
195 1.2 chs }
196 1.2 chs for (i = 0; i < ubc_nwins; i++) {
197 1.2 chs umap = &ubc_object.umap[i];
198 1.2 chs TAILQ_INSERT_TAIL(&ubc_object.inactive[i & (UBC_NQUEUES - 1)],
199 1.2 chs umap, inactive);
200 1.2 chs }
201 1.2 chs
202 1.2 chs ubc_object.hash = hashinit(ubc_nwins, HASH_LIST, M_TEMP, M_NOWAIT,
203 1.2 chs &ubc_object.hashmask);
204 1.2 chs for (i = 0; i <= ubc_object.hashmask; i++) {
205 1.2 chs LIST_INIT(&ubc_object.hash[i]);
206 1.2 chs }
207 1.2 chs
208 1.2 chs if (uvm_map(kernel_map, (vaddr_t *)&ubc_object.kva,
209 1.11 chs ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va,
210 1.2 chs UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
211 1.9 chs UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != 0) {
212 1.26 provos panic("ubc_init: failed to map ubc_object");
213 1.2 chs }
214 1.2 chs UVMHIST_INIT(ubchist, 300);
215 1.2 chs }
216 1.2 chs
217 1.2 chs /*
218 1.2 chs * ubc_fault: fault routine for ubc mapping
219 1.2 chs */
220 1.18 chs
221 1.39 thorpej static int
222 1.54 yamt ubc_fault(struct uvm_faultinfo *ufi, vaddr_t ign1, struct vm_page **ign2,
223 1.54 yamt int ign3, int ign4, vm_prot_t access_type, int flags)
224 1.2 chs {
225 1.2 chs struct uvm_object *uobj;
226 1.2 chs struct ubc_map *umap;
227 1.2 chs vaddr_t va, eva, ubc_offset, slot_offset;
228 1.18 chs int i, error, npages;
229 1.18 chs struct vm_page *pgs[ubc_winsize >> PAGE_SHIFT], *pg;
230 1.23 chs vm_prot_t prot;
231 1.2 chs UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
232 1.2 chs
233 1.2 chs /*
234 1.2 chs * no need to try with PGO_LOCKED...
235 1.2 chs * we don't need to have the map locked since we know that
236 1.2 chs * no one will mess with it until our reference is released.
237 1.2 chs */
238 1.18 chs
239 1.2 chs if (flags & PGO_LOCKED) {
240 1.2 chs uvmfault_unlockall(ufi, NULL, &ubc_object.uobj, NULL);
241 1.2 chs flags &= ~PGO_LOCKED;
242 1.2 chs }
243 1.2 chs
244 1.2 chs va = ufi->orig_rvaddr;
245 1.2 chs ubc_offset = va - (vaddr_t)ubc_object.kva;
246 1.11 chs umap = &ubc_object.umap[ubc_offset >> ubc_winshift];
247 1.2 chs KASSERT(umap->refcount != 0);
248 1.53 yamt KASSERT((umap->flags & UMAP_PAGES_LOCKED) == 0);
249 1.18 chs slot_offset = ubc_offset & (ubc_winsize - 1);
250 1.2 chs
251 1.34 chs /*
252 1.34 chs * some platforms cannot write to individual bytes atomically, so
253 1.34 chs * software has to do read/modify/write of larger quantities instead.
254 1.34 chs * this means that the access_type for "write" operations
255 1.34 chs * can be VM_PROT_READ, which confuses us mightily.
256 1.37 perry *
257 1.34 chs * deal with this by resetting access_type based on the info
258 1.34 chs * that ubc_alloc() stores for us.
259 1.34 chs */
260 1.34 chs
261 1.34 chs access_type = umap->writelen ? VM_PROT_WRITE : VM_PROT_READ;
262 1.34 chs UVMHIST_LOG(ubchist, "va 0x%lx ubc_offset 0x%lx access_type %d",
263 1.34 chs va, ubc_offset, access_type, 0);
264 1.34 chs
265 1.33 chs #ifdef DIAGNOSTIC
266 1.33 chs if ((access_type & VM_PROT_WRITE) != 0) {
267 1.33 chs if (slot_offset < trunc_page(umap->writeoff) ||
268 1.33 chs umap->writeoff + umap->writelen <= slot_offset) {
269 1.33 chs panic("ubc_fault: out of range write");
270 1.33 chs }
271 1.33 chs }
272 1.33 chs #endif
273 1.33 chs
274 1.2 chs /* no umap locking needed since we have a ref on the umap */
275 1.2 chs uobj = umap->uobj;
276 1.2 chs
277 1.33 chs if ((access_type & VM_PROT_WRITE) == 0) {
278 1.33 chs npages = (ubc_winsize - slot_offset) >> PAGE_SHIFT;
279 1.33 chs } else {
280 1.33 chs npages = (round_page(umap->offset + umap->writeoff +
281 1.33 chs umap->writelen) - (umap->offset + slot_offset))
282 1.33 chs >> PAGE_SHIFT;
283 1.33 chs flags |= PGO_PASTEOF;
284 1.33 chs }
285 1.2 chs
286 1.2 chs again:
287 1.2 chs memset(pgs, 0, sizeof (pgs));
288 1.2 chs simple_lock(&uobj->vmobjlock);
289 1.2 chs
290 1.33 chs UVMHIST_LOG(ubchist, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
291 1.33 chs slot_offset, umap->writeoff, umap->writelen, 0);
292 1.33 chs UVMHIST_LOG(ubchist, "getpages uobj %p offset 0x%x npages %d",
293 1.18 chs uobj, umap->offset + slot_offset, npages, 0);
294 1.2 chs
295 1.33 chs error = (*uobj->pgops->pgo_get)(uobj, umap->offset + slot_offset, pgs,
296 1.42 yamt &npages, 0, access_type, umap->advice, flags | PGO_NOBLOCKALLOC |
297 1.41 yamt PGO_NOTIMESTAMP);
298 1.24 simonb UVMHIST_LOG(ubchist, "getpages error %d npages %d", error, npages, 0,
299 1.24 simonb 0);
300 1.2 chs
301 1.5 chs if (error == EAGAIN) {
302 1.2 chs tsleep(&lbolt, PVM, "ubc_fault", 0);
303 1.2 chs goto again;
304 1.2 chs }
305 1.5 chs if (error) {
306 1.10 chs return error;
307 1.5 chs }
308 1.2 chs
309 1.2 chs va = ufi->orig_rvaddr;
310 1.2 chs eva = ufi->orig_rvaddr + (npages << PAGE_SHIFT);
311 1.2 chs
312 1.28 yamt UVMHIST_LOG(ubchist, "va 0x%lx eva 0x%lx", va, eva, 0, 0);
313 1.28 yamt for (i = 0; va < eva; i++, va += PAGE_SIZE) {
314 1.55 thorpej bool rdonly;
315 1.40 yamt vm_prot_t mask;
316 1.34 chs
317 1.28 yamt /*
318 1.28 yamt * for virtually-indexed, virtually-tagged caches we should
319 1.28 yamt * avoid creating writable mappings when we don't absolutely
320 1.28 yamt * need them, since the "compatible alias" trick doesn't work
321 1.28 yamt * on such caches. otherwise, we can always map the pages
322 1.28 yamt * writable.
323 1.28 yamt */
324 1.23 chs
325 1.23 chs #ifdef PMAP_CACHE_VIVT
326 1.28 yamt prot = VM_PROT_READ | access_type;
327 1.23 chs #else
328 1.28 yamt prot = VM_PROT_READ | VM_PROT_WRITE;
329 1.23 chs #endif
330 1.24 simonb UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i], 0, 0);
331 1.2 chs pg = pgs[i];
332 1.2 chs
333 1.2 chs if (pg == NULL || pg == PGO_DONTCARE) {
334 1.2 chs continue;
335 1.2 chs }
336 1.43 yamt
337 1.43 yamt uobj = pg->uobject;
338 1.43 yamt simple_lock(&uobj->vmobjlock);
339 1.2 chs if (pg->flags & PG_WANTED) {
340 1.2 chs wakeup(pg);
341 1.2 chs }
342 1.2 chs KASSERT((pg->flags & PG_FAKE) == 0);
343 1.2 chs if (pg->flags & PG_RELEASED) {
344 1.43 yamt uvm_lock_pageq();
345 1.18 chs uvm_pagefree(pg);
346 1.43 yamt uvm_unlock_pageq();
347 1.43 yamt simple_unlock(&uobj->vmobjlock);
348 1.2 chs continue;
349 1.2 chs }
350 1.28 yamt if (pg->loan_count != 0) {
351 1.34 chs
352 1.28 yamt /*
353 1.28 yamt * avoid unneeded loan break if possible.
354 1.28 yamt */
355 1.34 chs
356 1.28 yamt if ((access_type & VM_PROT_WRITE) == 0)
357 1.28 yamt prot &= ~VM_PROT_WRITE;
358 1.28 yamt
359 1.28 yamt if (prot & VM_PROT_WRITE) {
360 1.47 yamt struct vm_page *newpg;
361 1.47 yamt
362 1.47 yamt newpg = uvm_loanbreak(pg);
363 1.47 yamt if (newpg == NULL) {
364 1.47 yamt uvm_page_unbusy(&pg, 1);
365 1.47 yamt simple_unlock(&uobj->vmobjlock);
366 1.47 yamt uvm_wait("ubc_loanbrk");
367 1.28 yamt continue; /* will re-fault */
368 1.47 yamt }
369 1.47 yamt pg = newpg;
370 1.28 yamt }
371 1.28 yamt }
372 1.40 yamt
373 1.40 yamt /*
374 1.40 yamt * note that a page whose backing store is partially allocated
375 1.40 yamt * is marked as PG_RDONLY.
376 1.40 yamt */
377 1.40 yamt
378 1.49 yamt rdonly = ((access_type & VM_PROT_WRITE) == 0 &&
379 1.49 yamt (pg->flags & PG_RDONLY) != 0) ||
380 1.49 yamt UVM_OBJ_NEEDS_WRITEFAULT(uobj);
381 1.40 yamt KASSERT((pg->flags & PG_RDONLY) == 0 ||
382 1.40 yamt (access_type & VM_PROT_WRITE) == 0 ||
383 1.40 yamt pg->offset < umap->writeoff ||
384 1.40 yamt pg->offset + PAGE_SIZE > umap->writeoff + umap->writelen);
385 1.40 yamt mask = rdonly ? ~VM_PROT_WRITE : VM_PROT_ALL;
386 1.46 yamt error = pmap_enter(ufi->orig_map->pmap, va, VM_PAGE_TO_PHYS(pg),
387 1.46 yamt prot & mask, PMAP_CANFAIL | (access_type & mask));
388 1.43 yamt uvm_lock_pageq();
389 1.2 chs uvm_pageactivate(pg);
390 1.43 yamt uvm_unlock_pageq();
391 1.45 yamt pg->flags &= ~(PG_BUSY|PG_WANTED);
392 1.2 chs UVM_PAGE_OWN(pg, NULL);
393 1.43 yamt simple_unlock(&uobj->vmobjlock);
394 1.46 yamt if (error) {
395 1.46 yamt UVMHIST_LOG(ubchist, "pmap_enter fail %d",
396 1.46 yamt error, 0, 0, 0);
397 1.46 yamt uvm_wait("ubc_pmfail");
398 1.46 yamt /* will refault */
399 1.46 yamt }
400 1.2 chs }
401 1.17 chris pmap_update(ufi->orig_map->pmap);
402 1.8 chs return 0;
403 1.2 chs }
404 1.2 chs
405 1.2 chs /*
406 1.2 chs * local functions
407 1.2 chs */
408 1.2 chs
409 1.39 thorpej static struct ubc_map *
410 1.39 thorpej ubc_find_mapping(struct uvm_object *uobj, voff_t offset)
411 1.2 chs {
412 1.2 chs struct ubc_map *umap;
413 1.2 chs
414 1.2 chs LIST_FOREACH(umap, &ubc_object.hash[UBC_HASH(uobj, offset)], hash) {
415 1.2 chs if (umap->uobj == uobj && umap->offset == offset) {
416 1.2 chs return umap;
417 1.2 chs }
418 1.2 chs }
419 1.2 chs return NULL;
420 1.2 chs }
421 1.2 chs
422 1.2 chs
423 1.2 chs /*
424 1.2 chs * ubc interface functions
425 1.2 chs */
426 1.2 chs
427 1.2 chs /*
428 1.18 chs * ubc_alloc: allocate a file mapping window
429 1.2 chs */
430 1.18 chs
431 1.2 chs void *
432 1.42 yamt ubc_alloc(struct uvm_object *uobj, voff_t offset, vsize_t *lenp, int advice,
433 1.42 yamt int flags)
434 1.2 chs {
435 1.6 chs vaddr_t slot_offset, va;
436 1.2 chs struct ubc_map *umap;
437 1.6 chs voff_t umap_offset;
438 1.18 chs int error;
439 1.2 chs UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
440 1.2 chs
441 1.33 chs UVMHIST_LOG(ubchist, "uobj %p offset 0x%lx len 0x%lx",
442 1.33 chs uobj, offset, *lenp, 0);
443 1.2 chs
444 1.34 chs KASSERT(*lenp > 0);
445 1.6 chs umap_offset = (offset & ~((voff_t)ubc_winsize - 1));
446 1.4 enami slot_offset = (vaddr_t)(offset & ((voff_t)ubc_winsize - 1));
447 1.18 chs *lenp = MIN(*lenp, ubc_winsize - slot_offset);
448 1.2 chs
449 1.2 chs /*
450 1.33 chs * the object is always locked here, so we don't need to add a ref.
451 1.2 chs */
452 1.2 chs
453 1.2 chs again:
454 1.2 chs simple_lock(&ubc_object.uobj.vmobjlock);
455 1.2 chs umap = ubc_find_mapping(uobj, umap_offset);
456 1.2 chs if (umap == NULL) {
457 1.50 yamt UBC_EVCNT_INCR(wincachemiss);
458 1.2 chs umap = TAILQ_FIRST(UBC_QUEUE(offset));
459 1.2 chs if (umap == NULL) {
460 1.2 chs simple_unlock(&ubc_object.uobj.vmobjlock);
461 1.2 chs tsleep(&lbolt, PVM, "ubc_alloc", 0);
462 1.2 chs goto again;
463 1.2 chs }
464 1.2 chs
465 1.2 chs /*
466 1.18 chs * remove from old hash (if any), add to new hash.
467 1.2 chs */
468 1.2 chs
469 1.2 chs if (umap->uobj != NULL) {
470 1.2 chs LIST_REMOVE(umap, hash);
471 1.2 chs }
472 1.2 chs umap->uobj = uobj;
473 1.2 chs umap->offset = umap_offset;
474 1.2 chs LIST_INSERT_HEAD(&ubc_object.hash[UBC_HASH(uobj, umap_offset)],
475 1.18 chs umap, hash);
476 1.18 chs va = UBC_UMAP_ADDR(umap);
477 1.18 chs if (umap->flags & UMAP_MAPPING_CACHED) {
478 1.18 chs umap->flags &= ~UMAP_MAPPING_CACHED;
479 1.18 chs pmap_remove(pmap_kernel(), va, va + ubc_winsize);
480 1.18 chs pmap_update(pmap_kernel());
481 1.18 chs }
482 1.18 chs } else {
483 1.50 yamt UBC_EVCNT_INCR(wincachehit);
484 1.18 chs va = UBC_UMAP_ADDR(umap);
485 1.2 chs }
486 1.2 chs
487 1.2 chs if (umap->refcount == 0) {
488 1.2 chs TAILQ_REMOVE(UBC_QUEUE(offset), umap, inactive);
489 1.2 chs }
490 1.2 chs
491 1.2 chs #ifdef DIAGNOSTIC
492 1.18 chs if ((flags & UBC_WRITE) && (umap->writeoff || umap->writelen)) {
493 1.33 chs panic("ubc_alloc: concurrent writes uobj %p", uobj);
494 1.2 chs }
495 1.2 chs #endif
496 1.2 chs if (flags & UBC_WRITE) {
497 1.2 chs umap->writeoff = slot_offset;
498 1.2 chs umap->writelen = *lenp;
499 1.2 chs }
500 1.2 chs
501 1.2 chs umap->refcount++;
502 1.42 yamt umap->advice = advice;
503 1.2 chs simple_unlock(&ubc_object.uobj.vmobjlock);
504 1.18 chs UVMHIST_LOG(ubchist, "umap %p refs %d va %p flags 0x%x",
505 1.18 chs umap, umap->refcount, va, flags);
506 1.18 chs
507 1.18 chs if (flags & UBC_FAULTBUSY) {
508 1.18 chs int npages = (*lenp + PAGE_SIZE - 1) >> PAGE_SHIFT;
509 1.18 chs struct vm_page *pgs[npages];
510 1.40 yamt int gpflags =
511 1.41 yamt PGO_SYNCIO|PGO_OVERWRITE|PGO_PASTEOF|PGO_NOBLOCKALLOC|
512 1.41 yamt PGO_NOTIMESTAMP;
513 1.18 chs int i;
514 1.30 dbj KDASSERT(flags & UBC_WRITE);
515 1.2 chs
516 1.18 chs if (umap->flags & UMAP_MAPPING_CACHED) {
517 1.18 chs umap->flags &= ~UMAP_MAPPING_CACHED;
518 1.18 chs pmap_remove(pmap_kernel(), va, va + ubc_winsize);
519 1.18 chs }
520 1.22 enami memset(pgs, 0, sizeof(pgs));
521 1.18 chs simple_lock(&uobj->vmobjlock);
522 1.33 chs error = (*uobj->pgops->pgo_get)(uobj, trunc_page(offset), pgs,
523 1.42 yamt &npages, 0, VM_PROT_READ | VM_PROT_WRITE, advice, gpflags);
524 1.24 simonb UVMHIST_LOG(ubchist, "faultbusy getpages %d", error, 0, 0, 0);
525 1.18 chs if (error) {
526 1.18 chs goto out;
527 1.18 chs }
528 1.18 chs for (i = 0; i < npages; i++) {
529 1.18 chs pmap_kenter_pa(va + slot_offset + (i << PAGE_SHIFT),
530 1.18 chs VM_PAGE_TO_PHYS(pgs[i]),
531 1.18 chs VM_PROT_READ | VM_PROT_WRITE);
532 1.18 chs }
533 1.18 chs pmap_update(pmap_kernel());
534 1.18 chs umap->flags |= UMAP_PAGES_LOCKED;
535 1.18 chs }
536 1.18 chs
537 1.18 chs out:
538 1.18 chs return (void *)(va + slot_offset);
539 1.2 chs }
540 1.2 chs
541 1.18 chs /*
542 1.18 chs * ubc_release: free a file mapping window.
543 1.18 chs */
544 1.2 chs
545 1.2 chs void
546 1.39 thorpej ubc_release(void *va, int flags)
547 1.2 chs {
548 1.2 chs struct ubc_map *umap;
549 1.2 chs struct uvm_object *uobj;
550 1.18 chs vaddr_t umapva;
551 1.55 thorpej bool unmapped;
552 1.2 chs UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
553 1.2 chs
554 1.24 simonb UVMHIST_LOG(ubchist, "va %p", va, 0, 0, 0);
555 1.11 chs umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
556 1.18 chs umapva = UBC_UMAP_ADDR(umap);
557 1.2 chs uobj = umap->uobj;
558 1.2 chs KASSERT(uobj != NULL);
559 1.2 chs
560 1.18 chs if (umap->flags & UMAP_PAGES_LOCKED) {
561 1.18 chs int slot_offset = umap->writeoff;
562 1.18 chs int endoff = umap->writeoff + umap->writelen;
563 1.18 chs int zerolen = round_page(endoff) - endoff;
564 1.18 chs int npages = (int)(round_page(umap->writeoff + umap->writelen)
565 1.18 chs - trunc_page(umap->writeoff)) >> PAGE_SHIFT;
566 1.18 chs struct vm_page *pgs[npages];
567 1.18 chs paddr_t pa;
568 1.18 chs int i;
569 1.55 thorpej bool rv;
570 1.18 chs
571 1.18 chs if (zerolen) {
572 1.18 chs memset((char *)umapva + endoff, 0, zerolen);
573 1.18 chs }
574 1.18 chs umap->flags &= ~UMAP_PAGES_LOCKED;
575 1.18 chs uvm_lock_pageq();
576 1.18 chs for (i = 0; i < npages; i++) {
577 1.18 chs rv = pmap_extract(pmap_kernel(),
578 1.18 chs umapva + slot_offset + (i << PAGE_SHIFT), &pa);
579 1.18 chs KASSERT(rv);
580 1.18 chs pgs[i] = PHYS_TO_VM_PAGE(pa);
581 1.18 chs pgs[i]->flags &= ~(PG_FAKE|PG_CLEAN);
582 1.28 yamt KASSERT(pgs[i]->loan_count == 0);
583 1.18 chs uvm_pageactivate(pgs[i]);
584 1.18 chs }
585 1.18 chs uvm_unlock_pageq();
586 1.18 chs pmap_kremove(umapva, ubc_winsize);
587 1.18 chs pmap_update(pmap_kernel());
588 1.32 yamt simple_lock(&uobj->vmobjlock);
589 1.18 chs uvm_page_unbusy(pgs, npages);
590 1.32 yamt simple_unlock(&uobj->vmobjlock);
591 1.18 chs unmapped = TRUE;
592 1.18 chs } else {
593 1.18 chs unmapped = FALSE;
594 1.18 chs }
595 1.18 chs
596 1.18 chs simple_lock(&ubc_object.uobj.vmobjlock);
597 1.2 chs umap->writeoff = 0;
598 1.2 chs umap->writelen = 0;
599 1.2 chs umap->refcount--;
600 1.2 chs if (umap->refcount == 0) {
601 1.33 chs if (flags & UBC_UNMAP) {
602 1.2 chs
603 1.2 chs /*
604 1.33 chs * Invalidate any cached mappings if requested.
605 1.33 chs * This is typically used to avoid leaving
606 1.33 chs * incompatible cache aliases around indefinitely.
607 1.2 chs */
608 1.2 chs
609 1.18 chs pmap_remove(pmap_kernel(), umapva,
610 1.18 chs umapva + ubc_winsize);
611 1.18 chs umap->flags &= ~UMAP_MAPPING_CACHED;
612 1.17 chris pmap_update(pmap_kernel());
613 1.2 chs LIST_REMOVE(umap, hash);
614 1.2 chs umap->uobj = NULL;
615 1.2 chs TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap,
616 1.2 chs inactive);
617 1.2 chs } else {
618 1.18 chs if (!unmapped) {
619 1.18 chs umap->flags |= UMAP_MAPPING_CACHED;
620 1.18 chs }
621 1.2 chs TAILQ_INSERT_TAIL(UBC_QUEUE(umap->offset), umap,
622 1.2 chs inactive);
623 1.2 chs }
624 1.2 chs }
625 1.24 simonb UVMHIST_LOG(ubchist, "umap %p refs %d", umap, umap->refcount, 0, 0);
626 1.2 chs simple_unlock(&ubc_object.uobj.vmobjlock);
627 1.2 chs }
628 1.2 chs
629 1.2 chs
630 1.29 yamt #if 0 /* notused */
631 1.2 chs /*
632 1.2 chs * removing a range of mappings from the ubc mapping cache.
633 1.2 chs */
634 1.2 chs
635 1.2 chs void
636 1.39 thorpej ubc_flush(struct uvm_object *uobj, voff_t start, voff_t end)
637 1.2 chs {
638 1.2 chs struct ubc_map *umap;
639 1.2 chs vaddr_t va;
640 1.2 chs UVMHIST_FUNC("ubc_flush"); UVMHIST_CALLED(ubchist);
641 1.2 chs
642 1.2 chs UVMHIST_LOG(ubchist, "uobj %p start 0x%lx end 0x%lx",
643 1.24 simonb uobj, start, end, 0);
644 1.2 chs
645 1.2 chs simple_lock(&ubc_object.uobj.vmobjlock);
646 1.2 chs for (umap = ubc_object.umap;
647 1.2 chs umap < &ubc_object.umap[ubc_nwins];
648 1.2 chs umap++) {
649 1.2 chs
650 1.18 chs if (umap->uobj != uobj || umap->offset < start ||
651 1.2 chs (umap->offset >= end && end != 0) ||
652 1.2 chs umap->refcount > 0) {
653 1.2 chs continue;
654 1.2 chs }
655 1.2 chs
656 1.2 chs /*
657 1.2 chs * remove from hash,
658 1.2 chs * move to head of inactive queue.
659 1.2 chs */
660 1.2 chs
661 1.2 chs va = (vaddr_t)(ubc_object.kva +
662 1.18 chs ((umap - ubc_object.umap) << ubc_winshift));
663 1.4 enami pmap_remove(pmap_kernel(), va, va + ubc_winsize);
664 1.2 chs
665 1.2 chs LIST_REMOVE(umap, hash);
666 1.2 chs umap->uobj = NULL;
667 1.2 chs TAILQ_REMOVE(UBC_QUEUE(umap->offset), umap, inactive);
668 1.2 chs TAILQ_INSERT_HEAD(UBC_QUEUE(umap->offset), umap, inactive);
669 1.2 chs }
670 1.18 chs pmap_update(pmap_kernel());
671 1.2 chs simple_unlock(&ubc_object.uobj.vmobjlock);
672 1.2 chs }
673 1.29 yamt #endif /* notused */
674