uvm_object.h revision 1.29.4.1 1 1.29.4.1 bouyer /* $NetBSD: uvm_object.h,v 1.29.4.1 2011/02/08 16:20:07 bouyer Exp $ */
2 1.1 mrg
3 1.1 mrg /*
4 1.1 mrg * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.1 mrg * All rights reserved.
6 1.1 mrg *
7 1.1 mrg * Redistribution and use in source and binary forms, with or without
8 1.1 mrg * modification, are permitted provided that the following conditions
9 1.1 mrg * are met:
10 1.1 mrg * 1. Redistributions of source code must retain the above copyright
11 1.1 mrg * notice, this list of conditions and the following disclaimer.
12 1.1 mrg * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 mrg * notice, this list of conditions and the following disclaimer in the
14 1.1 mrg * documentation and/or other materials provided with the distribution.
15 1.1 mrg *
16 1.1 mrg * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 1.1 mrg * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 1.1 mrg * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 1.1 mrg * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 1.1 mrg * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 1.1 mrg * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 1.1 mrg * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 1.1 mrg * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 1.1 mrg * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 1.1 mrg * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 1.3 mrg *
27 1.3 mrg * from: Id: uvm_object.h,v 1.1.2.2 1998/01/04 22:44:51 chuck Exp
28 1.1 mrg */
29 1.1 mrg
30 1.4 perry #ifndef _UVM_UVM_OBJECT_H_
31 1.4 perry #define _UVM_UVM_OBJECT_H_
32 1.4 perry
33 1.1 mrg /*
34 1.1 mrg * uvm_object.h
35 1.1 mrg */
36 1.1 mrg
37 1.28 matt #include <sys/rbtree.h>
38 1.29 uebayasi #include <uvm/uvm_pglist.h>
39 1.26 ad
40 1.1 mrg /*
41 1.1 mrg * uvm_object: all that is left of mach objects.
42 1.1 mrg */
43 1.1 mrg
44 1.1 mrg struct uvm_object {
45 1.24 ad kmutex_t vmobjlock; /* lock on memq */
46 1.23 yamt const struct uvm_pagerops *pgops; /* pager ops */
47 1.5 mrg struct pglist memq; /* pages in this object */
48 1.5 mrg int uo_npages; /* # of pages in memq */
49 1.25 ad unsigned uo_refs; /* reference count */
50 1.26 ad struct rb_tree rb_tree; /* tree of pages */
51 1.1 mrg };
52 1.1 mrg
53 1.1 mrg /*
54 1.1 mrg * UVM_OBJ_KERN is a 'special' uo_refs value which indicates that the
55 1.1 mrg * object is a kernel memory object rather than a normal one (kernel
56 1.1 mrg * memory objects don't have reference counts -- they never die).
57 1.1 mrg *
58 1.1 mrg * this value is used to detected kernel object mappings at uvm_unmap()
59 1.1 mrg * time. normally when an object is unmapped its pages eventaully become
60 1.1 mrg * deactivated and then paged out and/or freed. this is not useful
61 1.1 mrg * for kernel objects... when a kernel object is unmapped we always want
62 1.1 mrg * to free the resources associated with the mapping. UVM_OBJ_KERN
63 1.1 mrg * allows us to decide which type of unmapping we want to do.
64 1.1 mrg */
65 1.8 thorpej #define UVM_OBJ_KERN (-2)
66 1.8 thorpej
67 1.8 thorpej #define UVM_OBJ_IS_KERN_OBJECT(uobj) \
68 1.13 chs ((uobj)->uo_refs == UVM_OBJ_KERN)
69 1.9 thorpej
70 1.10 thorpej #ifdef _KERNEL
71 1.10 thorpej
72 1.23 yamt extern const struct uvm_pagerops uvm_vnodeops;
73 1.23 yamt extern const struct uvm_pagerops uvm_deviceops;
74 1.23 yamt extern const struct uvm_pagerops ubc_pager;
75 1.23 yamt extern const struct uvm_pagerops aobj_pager;
76 1.10 thorpej
77 1.9 thorpej #define UVM_OBJ_IS_VNODE(uobj) \
78 1.9 thorpej ((uobj)->pgops == &uvm_vnodeops)
79 1.17 yamt
80 1.17 yamt #define UVM_OBJ_IS_DEVICE(uobj) \
81 1.17 yamt ((uobj)->pgops == &uvm_deviceops)
82 1.11 chs
83 1.11 chs #define UVM_OBJ_IS_VTEXT(uobj) \
84 1.22 yamt (UVM_OBJ_IS_VNODE(uobj) && uvn_text_p(uobj))
85 1.11 chs
86 1.19 yamt #define UVM_OBJ_IS_CLEAN(uobj) \
87 1.22 yamt (UVM_OBJ_IS_VNODE(uobj) && uvn_clean_p(uobj))
88 1.19 yamt
89 1.20 yamt /*
90 1.20 yamt * UVM_OBJ_NEEDS_WRITEFAULT: true if the uobj needs to detect modification.
91 1.20 yamt * (ie. wants to avoid writable user mappings.)
92 1.20 yamt *
93 1.20 yamt * XXX bad name
94 1.20 yamt */
95 1.20 yamt
96 1.20 yamt #define UVM_OBJ_NEEDS_WRITEFAULT(uobj) \
97 1.22 yamt (UVM_OBJ_IS_VNODE(uobj) && uvn_needs_writefault_p(uobj))
98 1.20 yamt
99 1.16 chs #define UVM_OBJ_IS_AOBJ(uobj) \
100 1.16 chs ((uobj)->pgops == &aobj_pager)
101 1.10 thorpej
102 1.27 rmind extern const rb_tree_ops_t uvm_page_tree_ops;
103 1.26 ad
104 1.18 yamt #define UVM_OBJ_INIT(uobj, ops, refs) \
105 1.18 yamt do { \
106 1.24 ad mutex_init(&(uobj)->vmobjlock, MUTEX_DEFAULT, IPL_NONE);\
107 1.18 yamt (uobj)->pgops = (ops); \
108 1.18 yamt TAILQ_INIT(&(uobj)->memq); \
109 1.18 yamt (uobj)->uo_npages = 0; \
110 1.18 yamt (uobj)->uo_refs = (refs); \
111 1.26 ad rb_tree_init(&(uobj)->rb_tree, &uvm_page_tree_ops); \
112 1.18 yamt } while (/* CONSTCOND */ 0)
113 1.18 yamt
114 1.26 ad #ifdef DIAGNOSTIC
115 1.26 ad #define UVM_OBJ_DESTROY(uobj) \
116 1.26 ad do { \
117 1.26 ad voff_t _xo = 0; \
118 1.26 ad void *_xn; \
119 1.26 ad mutex_destroy(&(uobj)->vmobjlock); \
120 1.26 ad _xn = rb_tree_find_node_geq(&(uobj)->rb_tree, &_xo); \
121 1.26 ad KASSERT(_xn == NULL); \
122 1.26 ad } while (/* CONSTCOND */ 0)
123 1.26 ad #else
124 1.24 ad #define UVM_OBJ_DESTROY(uobj) \
125 1.24 ad do { \
126 1.24 ad mutex_destroy(&(uobj)->vmobjlock); \
127 1.24 ad } while (/* CONSTCOND */ 0)
128 1.26 ad #endif /* DIAGNOSTIC */
129 1.24 ad
130 1.10 thorpej #endif /* _KERNEL */
131 1.1 mrg
132 1.4 perry #endif /* _UVM_UVM_OBJECT_H_ */
133