uvm_device.c revision 1.1 1 1.1 mrg /* $Id: uvm_device.c,v 1.1 1998/02/05 06:25:10 mrg Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
5 1.1 mrg * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
6 1.1 mrg */
7 1.1 mrg /*
8 1.1 mrg *
9 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
10 1.1 mrg * All rights reserved.
11 1.1 mrg *
12 1.1 mrg * Redistribution and use in source and binary forms, with or without
13 1.1 mrg * modification, are permitted provided that the following conditions
14 1.1 mrg * are met:
15 1.1 mrg * 1. Redistributions of source code must retain the above copyright
16 1.1 mrg * notice, this list of conditions and the following disclaimer.
17 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
18 1.1 mrg * notice, this list of conditions and the following disclaimer in the
19 1.1 mrg * documentation and/or other materials provided with the distribution.
20 1.1 mrg * 3. All advertising materials mentioning features or use of this software
21 1.1 mrg * must display the following acknowledgement:
22 1.1 mrg * This product includes software developed by Charles D. Cranor and
23 1.1 mrg * Washington University.
24 1.1 mrg * 4. The name of the author may not be used to endorse or promote products
25 1.1 mrg * derived from this software without specific prior written permission.
26 1.1 mrg *
27 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 1.1 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 1.1 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 1.1 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 1.1 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32 1.1 mrg * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 1.1 mrg * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 1.1 mrg * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 1.1 mrg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36 1.1 mrg * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 1.1 mrg */
38 1.1 mrg
39 1.1 mrg /*
40 1.1 mrg * uvm_device.c: the device pager.
41 1.1 mrg */
42 1.1 mrg
43 1.1 mrg #include <sys/param.h>
44 1.1 mrg #include <sys/systm.h>
45 1.1 mrg #include <sys/conf.h>
46 1.1 mrg #include <sys/mount.h>
47 1.1 mrg #include <sys/proc.h>
48 1.1 mrg #include <sys/malloc.h>
49 1.1 mrg #include <sys/vnode.h>
50 1.1 mrg
51 1.1 mrg #include <vm/vm.h>
52 1.1 mrg #include <vm/vm_page.h>
53 1.1 mrg #include <vm/vm_kern.h>
54 1.1 mrg
55 1.1 mrg #include <sys/syscallargs.h>
56 1.1 mrg
57 1.1 mrg #include <uvm/uvm.h>
58 1.1 mrg #include <uvm/uvm_device.h>
59 1.1 mrg
60 1.1 mrg UVMHIST_DECL(maphist);
61 1.1 mrg
62 1.1 mrg /*
63 1.1 mrg * private global data structure
64 1.1 mrg *
65 1.1 mrg * we keep a list of active device objects in the system.
66 1.1 mrg */
67 1.1 mrg
68 1.1 mrg LIST_HEAD(udv_list_struct, uvm_device);
69 1.1 mrg static struct udv_list_struct udv_list;
70 1.1 mrg #if NCPU > 1
71 1.1 mrg static simple_lock_data_t udv_lock;
72 1.1 mrg #endif
73 1.1 mrg
74 1.1 mrg /*
75 1.1 mrg * functions
76 1.1 mrg */
77 1.1 mrg
78 1.1 mrg static void udv_init __P((void));
79 1.1 mrg struct uvm_object *udv_attach __P((void *, vm_prot_t));
80 1.1 mrg static void udv_reference __P((struct uvm_object *));
81 1.1 mrg static void udv_detach __P((struct uvm_object *));
82 1.1 mrg static int udv_fault __P((struct uvm_faultinfo *, vm_offset_t,
83 1.1 mrg vm_page_t *, int, int, vm_fault_t,
84 1.1 mrg vm_prot_t, int));
85 1.1 mrg static boolean_t udv_flush __P((struct uvm_object *, vm_offset_t,
86 1.1 mrg vm_offset_t, int));
87 1.1 mrg static int udv_asyncget __P((struct uvm_object *, vm_offset_t,
88 1.1 mrg int));
89 1.1 mrg static int udv_put __P((struct uvm_object *, vm_page_t *,
90 1.1 mrg int, boolean_t));
91 1.1 mrg
92 1.1 mrg /*
93 1.1 mrg * master pager structure
94 1.1 mrg */
95 1.1 mrg
96 1.1 mrg struct uvm_pagerops uvm_deviceops = {
97 1.1 mrg udv_init,
98 1.1 mrg udv_attach,
99 1.1 mrg udv_reference,
100 1.1 mrg udv_detach,
101 1.1 mrg udv_fault,
102 1.1 mrg udv_flush,
103 1.1 mrg NULL, /* no get function since we have udv_fault */
104 1.1 mrg udv_asyncget,
105 1.1 mrg udv_put,
106 1.1 mrg NULL, /* no cluster function */
107 1.1 mrg NULL, /* no put cluster function */
108 1.1 mrg NULL, /* no share protect. no share maps for us */
109 1.1 mrg NULL, /* no AIO-DONE function since no async i/o */
110 1.1 mrg NULL, /* no releasepg function since no normal pages */
111 1.1 mrg };
112 1.1 mrg
113 1.1 mrg /*
114 1.1 mrg * the ops!
115 1.1 mrg */
116 1.1 mrg
117 1.1 mrg /*
118 1.1 mrg * udv_init
119 1.1 mrg *
120 1.1 mrg * init pager private data structures.
121 1.1 mrg */
122 1.1 mrg
123 1.1 mrg void udv_init()
124 1.1 mrg
125 1.1 mrg {
126 1.1 mrg LIST_INIT(&udv_list);
127 1.1 mrg simple_lock_init(&udv_lock);
128 1.1 mrg }
129 1.1 mrg
130 1.1 mrg /*
131 1.1 mrg * udv_attach
132 1.1 mrg *
133 1.1 mrg * get a VM object that is associated with a device. allocate a new
134 1.1 mrg * one if needed.
135 1.1 mrg *
136 1.1 mrg * => caller must _not_ already be holding the lock on the uvm_object.
137 1.1 mrg * => in fact, nothing should be locked so that we can sleep here.
138 1.1 mrg */
139 1.1 mrg
140 1.1 mrg struct uvm_object *udv_attach(arg, accessprot)
141 1.1 mrg
142 1.1 mrg void *arg;
143 1.1 mrg vm_prot_t accessprot;
144 1.1 mrg
145 1.1 mrg {
146 1.1 mrg dev_t device = *((dev_t *) arg);
147 1.1 mrg struct uvm_device *udv, *lcv;
148 1.1 mrg int (*mapfn) __P((dev_t, int, int));
149 1.1 mrg UVMHIST_FUNC("udv_attach"); UVMHIST_CALLED(maphist);
150 1.1 mrg
151 1.1 mrg UVMHIST_LOG(maphist, "(device=0x%x)", device,0,0,0);
152 1.1 mrg
153 1.1 mrg /*
154 1.1 mrg * before we do anything, ensure this device supports mmap
155 1.1 mrg */
156 1.1 mrg
157 1.1 mrg mapfn = cdevsw[major(device)].d_mmap;
158 1.1 mrg if (mapfn == NULL ||
159 1.1 mrg mapfn == (int (*) __P((dev_t, int, int))) enodev ||
160 1.1 mrg mapfn == (int (*) __P((dev_t, int, int))) nullop)
161 1.1 mrg return(NULL);
162 1.1 mrg
163 1.1 mrg /*
164 1.1 mrg * keep looping until we get it
165 1.1 mrg */
166 1.1 mrg
167 1.1 mrg while (1) {
168 1.1 mrg
169 1.1 mrg /*
170 1.1 mrg * first, attempt to find it on the main list
171 1.1 mrg */
172 1.1 mrg
173 1.1 mrg simple_lock(&udv_lock);
174 1.1 mrg for (lcv = udv_list.lh_first ; lcv != NULL ; lcv = lcv->u_list.le_next) {
175 1.1 mrg if (device == lcv->u_device)
176 1.1 mrg break;
177 1.1 mrg }
178 1.1 mrg
179 1.1 mrg /*
180 1.1 mrg * got it on main list. put a hold on it and unlock udv_lock.
181 1.1 mrg */
182 1.1 mrg
183 1.1 mrg if (lcv) {
184 1.1 mrg
185 1.1 mrg /*
186 1.1 mrg * if someone else has a hold on it, sleep and start over again.
187 1.1 mrg */
188 1.1 mrg
189 1.1 mrg if (lcv->u_flags & UVM_DEVICE_HOLD) {
190 1.1 mrg lcv->u_flags |= UVM_DEVICE_WANTED;
191 1.1 mrg UVM_UNLOCK_AND_WAIT(lcv, &udv_lock, FALSE, "udv_attach",0);
192 1.1 mrg continue;
193 1.1 mrg }
194 1.1 mrg
195 1.1 mrg lcv->u_flags |= UVM_DEVICE_HOLD; /* we are now holding it */
196 1.1 mrg simple_unlock(&udv_lock);
197 1.1 mrg
198 1.1 mrg /*
199 1.1 mrg * bump reference count, unhold, return.
200 1.1 mrg */
201 1.1 mrg
202 1.1 mrg simple_lock(&lcv->u_obj.vmobjlock);
203 1.1 mrg lcv->u_obj.uo_refs++;
204 1.1 mrg simple_unlock(&lcv->u_obj.vmobjlock);
205 1.1 mrg
206 1.1 mrg simple_lock(&udv_lock);
207 1.1 mrg if (lcv->u_flags & UVM_DEVICE_WANTED)
208 1.1 mrg wakeup(lcv);
209 1.1 mrg lcv->u_flags &= ~(UVM_DEVICE_WANTED|UVM_DEVICE_HOLD);
210 1.1 mrg simple_unlock(&udv_lock);
211 1.1 mrg return(&lcv->u_obj);
212 1.1 mrg }
213 1.1 mrg
214 1.1 mrg /*
215 1.1 mrg * did not find it on main list. need to malloc a new one.
216 1.1 mrg */
217 1.1 mrg
218 1.1 mrg simple_unlock(&udv_lock);
219 1.1 mrg /* NOTE: we could sleep in the following malloc() */
220 1.1 mrg MALLOC(udv, struct uvm_device *, sizeof(*udv), M_TEMP, M_WAITOK);
221 1.1 mrg simple_lock(&udv_lock);
222 1.1 mrg
223 1.1 mrg /*
224 1.1 mrg * now we have to double check to make sure no one added it to the
225 1.1 mrg * list while we were sleeping...
226 1.1 mrg */
227 1.1 mrg
228 1.1 mrg for (lcv = udv_list.lh_first ; lcv != NULL ; lcv = lcv->u_list.le_next) {
229 1.1 mrg if (device == lcv->u_device)
230 1.1 mrg break;
231 1.1 mrg }
232 1.1 mrg
233 1.1 mrg /*
234 1.1 mrg * did we lose a race to someone else? free our memory and retry.
235 1.1 mrg */
236 1.1 mrg
237 1.1 mrg if (lcv) {
238 1.1 mrg simple_unlock(&udv_lock);
239 1.1 mrg FREE(udv, M_TEMP);
240 1.1 mrg continue;
241 1.1 mrg }
242 1.1 mrg
243 1.1 mrg /*
244 1.1 mrg * we have it! init the data structures, add to list and return.
245 1.1 mrg */
246 1.1 mrg
247 1.1 mrg simple_lock_init(&udv->u_obj.vmobjlock);
248 1.1 mrg udv->u_obj.pgops = &uvm_deviceops;
249 1.1 mrg TAILQ_INIT(&udv->u_obj.memq); /* not used, but be safe */
250 1.1 mrg udv->u_obj.uo_npages = 0;
251 1.1 mrg udv->u_obj.uo_refs = 1;
252 1.1 mrg udv->u_flags = 0;
253 1.1 mrg udv->u_device = device;
254 1.1 mrg LIST_INSERT_HEAD(&udv_list, udv, u_list);
255 1.1 mrg simple_unlock(&udv_lock);
256 1.1 mrg
257 1.1 mrg return(&udv->u_obj);
258 1.1 mrg
259 1.1 mrg } /* while(1) loop */
260 1.1 mrg
261 1.1 mrg /*NOTREACHED*/
262 1.1 mrg }
263 1.1 mrg
264 1.1 mrg /*
265 1.1 mrg * udv_reference
266 1.1 mrg *
267 1.1 mrg * add a reference to a VM object. Note that the reference count must
268 1.1 mrg * already be one (the passed in reference) so there is no chance of the
269 1.1 mrg * udv being released or locked out here.
270 1.1 mrg *
271 1.1 mrg * => caller must call with object unlocked.
272 1.1 mrg */
273 1.1 mrg
274 1.1 mrg
275 1.1 mrg static void udv_reference(uobj)
276 1.1 mrg
277 1.1 mrg struct uvm_object *uobj;
278 1.1 mrg
279 1.1 mrg {
280 1.1 mrg UVMHIST_FUNC("udv_reference"); UVMHIST_CALLED(maphist);
281 1.1 mrg
282 1.1 mrg simple_lock(&uobj->vmobjlock);
283 1.1 mrg uobj->uo_refs++;
284 1.1 mrg UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
285 1.1 mrg uobj, uobj->uo_refs,0,0);
286 1.1 mrg simple_unlock(&uobj->vmobjlock);
287 1.1 mrg }
288 1.1 mrg
289 1.1 mrg /*
290 1.1 mrg * udv_detach
291 1.1 mrg *
292 1.1 mrg * remove a reference to a VM object.
293 1.1 mrg *
294 1.1 mrg * => caller must call with object unlocked and map locked.
295 1.1 mrg */
296 1.1 mrg
297 1.1 mrg static void udv_detach(uobj)
298 1.1 mrg
299 1.1 mrg struct uvm_object *uobj;
300 1.1 mrg
301 1.1 mrg {
302 1.1 mrg struct uvm_device *udv = (struct uvm_device *) uobj;
303 1.1 mrg UVMHIST_FUNC("udv_detach"); UVMHIST_CALLED(maphist);
304 1.1 mrg
305 1.1 mrg /*
306 1.1 mrg * loop until done
307 1.1 mrg */
308 1.1 mrg
309 1.1 mrg while (1) {
310 1.1 mrg simple_lock(&uobj->vmobjlock);
311 1.1 mrg
312 1.1 mrg if (uobj->uo_refs > 1) {
313 1.1 mrg uobj->uo_refs--; /* drop ref! */
314 1.1 mrg simple_unlock(&uobj->vmobjlock);
315 1.1 mrg UVMHIST_LOG(maphist," <- done, uobj=0x%x, ref=%d",
316 1.1 mrg uobj,uobj->uo_refs,0,0);
317 1.1 mrg return;
318 1.1 mrg }
319 1.1 mrg
320 1.1 mrg #ifdef DIAGNOSTIC
321 1.1 mrg if (uobj->uo_npages || uobj->memq.tqh_first)
322 1.1 mrg panic("udv_detach: pages in a device object?");
323 1.1 mrg #endif
324 1.1 mrg
325 1.1 mrg /*
326 1.1 mrg * now lock udv_lock
327 1.1 mrg */
328 1.1 mrg simple_lock(&udv_lock);
329 1.1 mrg
330 1.1 mrg /*
331 1.1 mrg * is it being held? if so, wait until others are done.
332 1.1 mrg */
333 1.1 mrg if (udv->u_flags & UVM_DEVICE_HOLD) {
334 1.1 mrg
335 1.1 mrg /*
336 1.1 mrg * want it
337 1.1 mrg */
338 1.1 mrg udv->u_flags |= UVM_DEVICE_WANTED;
339 1.1 mrg simple_unlock(&uobj->vmobjlock);
340 1.1 mrg UVM_UNLOCK_AND_WAIT(udv, &udv_lock, FALSE, "udv_detach",0);
341 1.1 mrg continue;
342 1.1 mrg }
343 1.1 mrg
344 1.1 mrg /*
345 1.1 mrg * got it! nuke it now.
346 1.1 mrg */
347 1.1 mrg
348 1.1 mrg LIST_REMOVE(udv, u_list);
349 1.1 mrg if (udv->u_flags & UVM_DEVICE_WANTED)
350 1.1 mrg wakeup(udv);
351 1.1 mrg FREE(udv, M_TEMP);
352 1.1 mrg break; /* DONE! */
353 1.1 mrg
354 1.1 mrg } /* while (1) loop */
355 1.1 mrg
356 1.1 mrg UVMHIST_LOG(maphist," <- done, freed uobj=0x%x", uobj,0,0,0);
357 1.1 mrg return;
358 1.1 mrg }
359 1.1 mrg
360 1.1 mrg
361 1.1 mrg /*
362 1.1 mrg * udv_flush
363 1.1 mrg *
364 1.1 mrg * flush pages out of a uvm object. a no-op for devices.
365 1.1 mrg */
366 1.1 mrg
367 1.1 mrg static boolean_t udv_flush(uobj, start, stop, flags)
368 1.1 mrg
369 1.1 mrg struct uvm_object *uobj;
370 1.1 mrg vm_offset_t start, stop;
371 1.1 mrg int flags;
372 1.1 mrg
373 1.1 mrg {
374 1.1 mrg return(TRUE);
375 1.1 mrg }
376 1.1 mrg
377 1.1 mrg /*
378 1.1 mrg * udv_fault: non-standard fault routine for device "pages"
379 1.1 mrg *
380 1.1 mrg * => rather than having a "get" function, we have a fault routine
381 1.1 mrg * since we don't return vm_pages we need full control over the
382 1.1 mrg * pmap_enter map in
383 1.1 mrg * => all the usual fault data structured are locked by the caller
384 1.1 mrg * (i.e. maps(read), amap (if any), uobj)
385 1.1 mrg * => on return, we unlock all fault data structures
386 1.1 mrg * => flags: PGO_ALLPAGES: get all of the pages
387 1.1 mrg * PGO_LOCKED: fault data structures are locked
388 1.1 mrg * XXX: currently PGO_LOCKED is always required ... consider removing
389 1.1 mrg * it as a flag
390 1.1 mrg * => NOTE: vaddr is the VA of pps[0] in ufi->entry, _NOT_ pps[centeridx]
391 1.1 mrg */
392 1.1 mrg
393 1.1 mrg static int udv_fault(ufi, vaddr, pps, npages, centeridx, fault_type,
394 1.1 mrg access_type, flags)
395 1.1 mrg
396 1.1 mrg struct uvm_faultinfo *ufi;
397 1.1 mrg vm_offset_t vaddr;
398 1.1 mrg vm_page_t *pps;
399 1.1 mrg int npages, centeridx, flags;
400 1.1 mrg vm_fault_t fault_type;
401 1.1 mrg vm_prot_t access_type;
402 1.1 mrg
403 1.1 mrg {
404 1.1 mrg struct uvm_object *uobj;
405 1.1 mrg struct uvm_device *udv;
406 1.1 mrg vm_offset_t curr_offset, curr_va, paddr;
407 1.1 mrg struct vm_map_entry *entry = ufi->entry;
408 1.1 mrg int lcv, retval;
409 1.1 mrg dev_t device;
410 1.1 mrg int (*mapfn) __P((dev_t, int, int));
411 1.1 mrg UVMHIST_FUNC("udv_fault"); UVMHIST_CALLED(maphist);
412 1.1 mrg UVMHIST_LOG(maphist," flags=%d", flags,0,0,0);
413 1.1 mrg
414 1.1 mrg /*
415 1.1 mrg * XXX: !PGO_LOCKED calls are currently not allowed (or used)
416 1.1 mrg */
417 1.1 mrg
418 1.1 mrg if ((flags & PGO_LOCKED) == 0)
419 1.1 mrg panic("udv_fault: !PGO_LOCKED fault");
420 1.1 mrg
421 1.1 mrg /*
422 1.1 mrg * we do not allow device mappings to be mapped copy-on-write
423 1.1 mrg * so we kill any attempt to do so here.
424 1.1 mrg */
425 1.1 mrg
426 1.1 mrg if (UVM_ET_ISCOPYONWRITE(entry)) {
427 1.1 mrg UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=0x%x)",
428 1.1 mrg entry->etype, 0,0,0);
429 1.1 mrg uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
430 1.1 mrg return(VM_PAGER_ERROR);
431 1.1 mrg }
432 1.1 mrg
433 1.1 mrg /*
434 1.1 mrg * get object pointers and map function.
435 1.1 mrg */
436 1.1 mrg uobj = entry->object.uvm_obj;
437 1.1 mrg udv = (struct uvm_device *) uobj;
438 1.1 mrg device = udv->u_device;
439 1.1 mrg mapfn = cdevsw[major(device)].d_mmap;
440 1.1 mrg
441 1.1 mrg /*
442 1.1 mrg * now we must determine the offset in udv to use and the VA to use
443 1.1 mrg * for pmap_enter. note that we always pmap_enter() in the
444 1.1 mrg * ufi->orig_map's pmap, but that our ufi->entry may be from some
445 1.1 mrg * other map (in the submap/sharemap case). so we must convert the
446 1.1 mrg * VA from ufi->map to ufi->orig_map (note that in many cases these
447 1.1 mrg * maps are the same). note that ufi->orig_rvaddr and ufi->rvaddr
448 1.1 mrg * refer to the same physical page.
449 1.1 mrg */
450 1.1 mrg /* udv offset = (offset from start of entry) + entry's offset */
451 1.1 mrg curr_offset = (vaddr - entry->start) + entry->offset;
452 1.1 mrg /* pmap va = orig_va + (offset of vaddr from translated va) */
453 1.1 mrg curr_va = ufi->orig_rvaddr + (vaddr - ufi->rvaddr);
454 1.1 mrg
455 1.1 mrg /*
456 1.1 mrg * loop over the page range entering in as needed
457 1.1 mrg */
458 1.1 mrg
459 1.1 mrg retval = VM_PAGER_OK;
460 1.1 mrg for (lcv = 0 ; lcv < npages ;
461 1.1 mrg lcv++, curr_offset += PAGE_SIZE, curr_va += PAGE_SIZE) {
462 1.1 mrg
463 1.1 mrg if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
464 1.1 mrg continue;
465 1.1 mrg
466 1.1 mrg if (pps[lcv] == PGO_DONTCARE)
467 1.1 mrg continue;
468 1.1 mrg
469 1.1 mrg paddr = pmap_phys_address((*mapfn)(device, (int)curr_offset, access_type));
470 1.1 mrg
471 1.1 mrg if (paddr == -1) {
472 1.1 mrg retval = VM_PAGER_ERROR;
473 1.1 mrg break;
474 1.1 mrg }
475 1.1 mrg
476 1.1 mrg UVMHIST_LOG(maphist, " MAPPING: device: pm=0x%x, va=0x%x, pa=0x%x, at=%d",
477 1.1 mrg ufi->orig_map->pmap, curr_va, paddr, access_type);
478 1.1 mrg pmap_enter(ufi->orig_map->pmap, curr_va, paddr, access_type, 0);
479 1.1 mrg
480 1.1 mrg }
481 1.1 mrg
482 1.1 mrg uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
483 1.1 mrg return(retval);
484 1.1 mrg }
485 1.1 mrg
486 1.1 mrg /*
487 1.1 mrg * udv_asyncget: start async I/O to bring pages into ram
488 1.1 mrg *
489 1.1 mrg * => caller must lock object(???XXX: see if this is best)
490 1.1 mrg * => a no-op for devices
491 1.1 mrg */
492 1.1 mrg
493 1.1 mrg static int udv_asyncget(uobj, offset, npages)
494 1.1 mrg
495 1.1 mrg struct uvm_object *uobj;
496 1.1 mrg vm_offset_t offset;
497 1.1 mrg int npages;
498 1.1 mrg
499 1.1 mrg {
500 1.1 mrg return(KERN_SUCCESS);
501 1.1 mrg }
502 1.1 mrg
503 1.1 mrg /*
504 1.1 mrg * udv_put: flush page data to backing store.
505 1.1 mrg *
506 1.1 mrg * => this function should never be called (since we never have any
507 1.1 mrg * page structures to "put")
508 1.1 mrg */
509 1.1 mrg
510 1.1 mrg static int udv_put(uobj, pps, npages, flags)
511 1.1 mrg
512 1.1 mrg struct uvm_object *uobj;
513 1.1 mrg struct vm_page **pps;
514 1.1 mrg int npages, flags;
515 1.1 mrg
516 1.1 mrg {
517 1.1 mrg panic("udv_put: trying to page out to a device!");
518 1.1 mrg }
519