kern_ras.c revision 1.30.2.1 1 /* $NetBSD: kern_ras.c,v 1.30.2.1 2008/05/10 23:49:04 wrstuden Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Gregory McGarry, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: kern_ras.c,v 1.30.2.1 2008/05/10 23:49:04 wrstuden Exp $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/pool.h>
39 #include <sys/proc.h>
40 #include <sys/ras.h>
41 #include <sys/sa.h>
42 #include <sys/savar.h>
43 #include <sys/xcall.h>
44 #include <sys/syscallargs.h>
45
46 #include <uvm/uvm_extern.h>
47
48 POOL_INIT(ras_pool, sizeof(struct ras), 0, 0, 0, "raspl",
49 &pool_allocator_nointr, IPL_NONE);
50
51 #define MAX_RAS_PER_PROC 16
52
53 u_int ras_per_proc = MAX_RAS_PER_PROC;
54
55 #ifdef DEBUG
56 int ras_debug = 0;
57 #define DPRINTF(x) if (ras_debug) printf x
58 #else
59 #define DPRINTF(x) /* nothing */
60 #endif
61
62 /*
63 * Force all CPUs through cpu_switchto(), waiting until complete.
64 * Context switching will drain the write buffer on the calling
65 * CPU.
66 */
67 static void
68 ras_sync(void)
69 {
70
71 /* No need to sync if exiting or single threaded. */
72 if (curproc->p_nlwps > 1 && ncpu > 1) {
73 #ifdef NO_SOFTWARE_PATENTS
74 uint64_t where;
75 where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
76 xc_wait(where);
77 #else
78 /*
79 * Assumptions:
80 *
81 * o preemption is disabled by the thread in
82 * ras_lookup().
83 * o proc::p_raslist is only inspected with
84 * preemption disabled.
85 * o ras_lookup() plus loads reordered in advance
86 * will take no longer than 1/8s to complete.
87 */
88 const int delta = hz >> 3;
89 int target = hardclock_ticks + delta;
90 do {
91 kpause("ras", false, delta, NULL);
92 } while (hardclock_ticks < target);
93 #endif
94 }
95 }
96
97 /*
98 * Check the specified address to see if it is within the
99 * sequence. If it is found, we return the restart address,
100 * otherwise we return -1. If we do perform a restart, we
101 * mark the sequence as hit.
102 *
103 * No locking required: we disable preemption and ras_sync()
104 * guarantees that individual entries are valid while we still
105 * have visibility of them.
106 */
107 void *
108 ras_lookup(struct proc *p, void *addr)
109 {
110 struct ras *rp;
111 void *startaddr;
112 lwp_t *l;
113
114 startaddr = (void *)-1;
115 l = curlwp;
116
117 KPREEMPT_DISABLE(l);
118 for (rp = p->p_raslist; rp != NULL; rp = rp->ras_next) {
119 if (addr > rp->ras_startaddr && addr < rp->ras_endaddr) {
120 startaddr = rp->ras_startaddr;
121 DPRINTF(("RAS hit: p=%p %p\n", p, addr));
122 break;
123 }
124 }
125 KPREEMPT_ENABLE(l);
126
127 return startaddr;
128 }
129
130 /*
131 * During a fork, we copy all of the sequences from parent p1 to
132 * the child p2.
133 *
134 * No locking required as the parent must be paused.
135 */
136 int
137 ras_fork(struct proc *p1, struct proc *p2)
138 {
139 struct ras *rp, *nrp;
140
141 for (rp = p1->p_raslist; rp != NULL; rp = rp->ras_next) {
142 nrp = pool_get(&ras_pool, PR_WAITOK);
143 nrp->ras_startaddr = rp->ras_startaddr;
144 nrp->ras_endaddr = rp->ras_endaddr;
145 nrp->ras_next = p2->p_raslist;
146 p2->p_raslist = nrp;
147 }
148
149 DPRINTF(("ras_fork: p1=%p, p2=%p\n", p1, p2));
150
151 return 0;
152 }
153
154 /*
155 * Nuke all sequences for this process.
156 */
157 int
158 ras_purgeall(void)
159 {
160 struct ras *rp, *nrp;
161 proc_t *p;
162
163 p = curproc;
164
165 mutex_enter(&p->p_auxlock);
166 if ((rp = p->p_raslist) != NULL) {
167 p->p_raslist = NULL;
168 ras_sync();
169 for(; rp != NULL; rp = nrp) {
170 nrp = rp->ras_next;
171 pool_put(&ras_pool, rp);
172 }
173 }
174 mutex_exit(&p->p_auxlock);
175
176 return 0;
177 }
178
179 #if defined(__HAVE_RAS)
180
181 /*
182 * Install the new sequence. If it already exists, return
183 * an error.
184 */
185 static int
186 ras_install(void *addr, size_t len)
187 {
188 struct ras *rp;
189 struct ras *newrp;
190 void *endaddr;
191 int nras, error;
192 proc_t *p;
193
194 endaddr = (char *)addr + len;
195
196 if (addr < (void *)VM_MIN_ADDRESS ||
197 endaddr > (void *)VM_MAXUSER_ADDRESS)
198 return (EINVAL);
199
200 if (len <= 0)
201 return (EINVAL);
202
203 newrp = pool_get(&ras_pool, PR_WAITOK);
204 newrp->ras_startaddr = addr;
205 newrp->ras_endaddr = endaddr;
206 error = 0;
207 nras = 0;
208 p = curproc;
209
210 mutex_enter(&p->p_auxlock);
211 for (rp = p->p_raslist; rp != NULL; rp = rp->ras_next) {
212 if (++nras >= ras_per_proc) {
213 error = EINVAL;
214 break;
215 }
216 if (addr < rp->ras_endaddr && endaddr > rp->ras_startaddr) {
217 error = EEXIST;
218 break;
219 }
220 }
221 if (rp == NULL) {
222 newrp->ras_next = p->p_raslist;
223 p->p_raslist = newrp;
224 ras_sync();
225 mutex_exit(&p->p_auxlock);
226 } else {
227 mutex_exit(&p->p_auxlock);
228 pool_put(&ras_pool, newrp);
229 }
230
231 return error;
232 }
233
234 /*
235 * Nuke the specified sequence. Both address and len must
236 * match, otherwise we return an error.
237 */
238 static int
239 ras_purge(void *addr, size_t len)
240 {
241 struct ras *rp, **link;
242 void *endaddr;
243 proc_t *p;
244
245 endaddr = (char *)addr + len;
246 p = curproc;
247
248 mutex_enter(&p->p_auxlock);
249 link = &p->p_raslist;
250 for (rp = *link; rp != NULL; link = &rp->ras_next, rp = *link) {
251 if (addr == rp->ras_startaddr && endaddr == rp->ras_endaddr)
252 break;
253 }
254 if (rp != NULL) {
255 *link = rp->ras_next;
256 ras_sync();
257 mutex_exit(&p->p_auxlock);
258 pool_put(&ras_pool, rp);
259 return 0;
260 } else {
261 mutex_exit(&p->p_auxlock);
262 return ESRCH;
263 }
264 }
265
266 #endif /* defined(__HAVE_RAS) */
267
268 /*ARGSUSED*/
269 int
270 sys_rasctl(struct lwp *l, const struct sys_rasctl_args *uap, register_t *retval)
271 {
272
273 #if defined(__HAVE_RAS)
274 /* {
275 syscallarg(void *) addr;
276 syscallarg(size_t) len;
277 syscallarg(int) op;
278 } */
279 void *addr;
280 size_t len;
281 int op;
282 int error;
283
284 /*
285 * first, extract syscall args from the uap.
286 */
287
288 addr = (void *)SCARG(uap, addr);
289 len = (size_t)SCARG(uap, len);
290 op = SCARG(uap, op);
291
292 DPRINTF(("sys_rasctl: p=%p addr=%p, len=%ld, op=0x%x\n",
293 curproc, addr, (long)len, op));
294
295 switch (op) {
296 case RAS_INSTALL:
297 error = ras_install(addr, len);
298 break;
299 case RAS_PURGE:
300 error = ras_purge(addr, len);
301 break;
302 case RAS_PURGE_ALL:
303 error = ras_purgeall();
304 break;
305 default:
306 error = EINVAL;
307 break;
308 }
309
310 return (error);
311
312 #else
313
314 return (EOPNOTSUPP);
315
316 #endif
317
318 }
319