locks_up.c revision 1.1.2.2 1 1.1.2.2 rmind /* $NetBSD: locks_up.c,v 1.1.2.2 2010/05/30 05:18:06 rmind Exp $ */
2 1.1.2.2 rmind
3 1.1.2.2 rmind /*
4 1.1.2.2 rmind * Copyright (c) 2010 Antti Kantee. All Rights Reserved.
5 1.1.2.2 rmind *
6 1.1.2.2 rmind * Redistribution and use in source and binary forms, with or without
7 1.1.2.2 rmind * modification, are permitted provided that the following conditions
8 1.1.2.2 rmind * are met:
9 1.1.2.2 rmind * 1. Redistributions of source code must retain the above copyright
10 1.1.2.2 rmind * notice, this list of conditions and the following disclaimer.
11 1.1.2.2 rmind * 2. Redistributions in binary form must reproduce the above copyright
12 1.1.2.2 rmind * notice, this list of conditions and the following disclaimer in the
13 1.1.2.2 rmind * documentation and/or other materials provided with the distribution.
14 1.1.2.2 rmind *
15 1.1.2.2 rmind * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 1.1.2.2 rmind * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 1.1.2.2 rmind * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 1.1.2.2 rmind * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 1.1.2.2 rmind * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 1.1.2.2 rmind * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 1.1.2.2 rmind * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 1.1.2.2 rmind * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 1.1.2.2 rmind * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 1.1.2.2 rmind * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 1.1.2.2 rmind * SUCH DAMAGE.
26 1.1.2.2 rmind */
27 1.1.2.2 rmind
28 1.1.2.2 rmind /*
29 1.1.2.2 rmind * Virtual uniprocessor rump kernel version of locks. Since the entire
30 1.1.2.2 rmind * kernel is running on only one CPU in the system, there is no need
31 1.1.2.2 rmind * to perform slow cache-coherent MP locking operations. This speeds
32 1.1.2.2 rmind * up things quite dramatically and is a good example of that two
33 1.1.2.2 rmind * disjoint kernels running simultaneously in an MP system can be
34 1.1.2.2 rmind * massively faster than one with fine-grained locking.
35 1.1.2.2 rmind */
36 1.1.2.2 rmind
37 1.1.2.2 rmind #include <sys/cdefs.h>
38 1.1.2.2 rmind __KERNEL_RCSID(0, "$NetBSD: locks_up.c,v 1.1.2.2 2010/05/30 05:18:06 rmind Exp $");
39 1.1.2.2 rmind
40 1.1.2.2 rmind #include <sys/param.h>
41 1.1.2.2 rmind #include <sys/kernel.h>
42 1.1.2.2 rmind #include <sys/kmem.h>
43 1.1.2.2 rmind #include <sys/mutex.h>
44 1.1.2.2 rmind #include <sys/rwlock.h>
45 1.1.2.2 rmind
46 1.1.2.2 rmind #include <rump/rumpuser.h>
47 1.1.2.2 rmind
48 1.1.2.2 rmind #include "rump_private.h"
49 1.1.2.2 rmind
50 1.1.2.2 rmind struct upmtx {
51 1.1.2.2 rmind struct lwp *upm_owner;
52 1.1.2.2 rmind int upm_wanted;
53 1.1.2.2 rmind struct rumpuser_cv *upm_rucv;
54 1.1.2.2 rmind };
55 1.1.2.2 rmind #define UPMTX(mtx) struct upmtx *upm = *(struct upmtx **)mtx
56 1.1.2.2 rmind
57 1.1.2.2 rmind static inline void
58 1.1.2.2 rmind checkncpu(void)
59 1.1.2.2 rmind {
60 1.1.2.2 rmind
61 1.1.2.2 rmind if (__predict_false(ncpu != 1))
62 1.1.2.2 rmind panic("UP lock implementation requires RUMP_NCPU == 1");
63 1.1.2.2 rmind }
64 1.1.2.2 rmind
65 1.1.2.2 rmind void
66 1.1.2.2 rmind mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
67 1.1.2.2 rmind {
68 1.1.2.2 rmind struct upmtx *upm;
69 1.1.2.2 rmind
70 1.1.2.2 rmind CTASSERT(sizeof(kmutex_t) >= sizeof(void *));
71 1.1.2.2 rmind checkncpu();
72 1.1.2.2 rmind
73 1.1.2.2 rmind /*
74 1.1.2.2 rmind * XXX: pool_cache would be nice, but not easily possible,
75 1.1.2.2 rmind * as pool cache init wants to call mutex_init() ...
76 1.1.2.2 rmind */
77 1.1.2.2 rmind upm = rumpuser_malloc(sizeof(*upm), 1);
78 1.1.2.2 rmind memset(upm, 0, sizeof(*upm));
79 1.1.2.2 rmind rumpuser_cv_init(&upm->upm_rucv);
80 1.1.2.2 rmind memcpy(mtx, &upm, sizeof(void *));
81 1.1.2.2 rmind }
82 1.1.2.2 rmind
83 1.1.2.2 rmind void
84 1.1.2.2 rmind mutex_destroy(kmutex_t *mtx)
85 1.1.2.2 rmind {
86 1.1.2.2 rmind UPMTX(mtx);
87 1.1.2.2 rmind
88 1.1.2.2 rmind KASSERT(upm->upm_owner == NULL);
89 1.1.2.2 rmind KASSERT(upm->upm_wanted == 0);
90 1.1.2.2 rmind rumpuser_cv_destroy(upm->upm_rucv);
91 1.1.2.2 rmind rumpuser_free(upm);
92 1.1.2.2 rmind }
93 1.1.2.2 rmind
94 1.1.2.2 rmind void
95 1.1.2.2 rmind mutex_enter(kmutex_t *mtx)
96 1.1.2.2 rmind {
97 1.1.2.2 rmind UPMTX(mtx);
98 1.1.2.2 rmind
99 1.1.2.2 rmind /* fastpath? */
100 1.1.2.2 rmind if (mutex_tryenter(mtx))
101 1.1.2.2 rmind return;
102 1.1.2.2 rmind
103 1.1.2.2 rmind /*
104 1.1.2.2 rmind * No? bummer, do it the slow and painful way then.
105 1.1.2.2 rmind */
106 1.1.2.2 rmind upm->upm_wanted++;
107 1.1.2.2 rmind while (!mutex_tryenter(mtx)) {
108 1.1.2.2 rmind rump_schedlock_cv_wait(upm->upm_rucv);
109 1.1.2.2 rmind }
110 1.1.2.2 rmind upm->upm_wanted--;
111 1.1.2.2 rmind
112 1.1.2.2 rmind KASSERT(upm->upm_wanted >= 0);
113 1.1.2.2 rmind }
114 1.1.2.2 rmind
115 1.1.2.2 rmind void
116 1.1.2.2 rmind mutex_spin_enter(kmutex_t *mtx)
117 1.1.2.2 rmind {
118 1.1.2.2 rmind
119 1.1.2.2 rmind mutex_enter(mtx);
120 1.1.2.2 rmind }
121 1.1.2.2 rmind
122 1.1.2.2 rmind int
123 1.1.2.2 rmind mutex_tryenter(kmutex_t *mtx)
124 1.1.2.2 rmind {
125 1.1.2.2 rmind UPMTX(mtx);
126 1.1.2.2 rmind
127 1.1.2.2 rmind if (upm->upm_owner)
128 1.1.2.2 rmind return 0;
129 1.1.2.2 rmind
130 1.1.2.2 rmind upm->upm_owner = curlwp;
131 1.1.2.2 rmind return 1;
132 1.1.2.2 rmind }
133 1.1.2.2 rmind
134 1.1.2.2 rmind void
135 1.1.2.2 rmind mutex_exit(kmutex_t *mtx)
136 1.1.2.2 rmind {
137 1.1.2.2 rmind UPMTX(mtx);
138 1.1.2.2 rmind
139 1.1.2.2 rmind if (upm->upm_wanted) {
140 1.1.2.2 rmind rumpuser_cv_signal(upm->upm_rucv); /* CPU is our interlock */
141 1.1.2.2 rmind }
142 1.1.2.2 rmind upm->upm_owner = NULL;
143 1.1.2.2 rmind }
144 1.1.2.2 rmind
145 1.1.2.2 rmind void
146 1.1.2.2 rmind mutex_spin_exit(kmutex_t *mtx)
147 1.1.2.2 rmind {
148 1.1.2.2 rmind
149 1.1.2.2 rmind mutex_exit(mtx);
150 1.1.2.2 rmind }
151 1.1.2.2 rmind
152 1.1.2.2 rmind int
153 1.1.2.2 rmind mutex_owned(kmutex_t *mtx)
154 1.1.2.2 rmind {
155 1.1.2.2 rmind UPMTX(mtx);
156 1.1.2.2 rmind
157 1.1.2.2 rmind return upm->upm_owner == curlwp;
158 1.1.2.2 rmind }
159 1.1.2.2 rmind
160 1.1.2.2 rmind struct uprw {
161 1.1.2.2 rmind struct lwp *uprw_owner;
162 1.1.2.2 rmind int uprw_readers;
163 1.1.2.2 rmind uint16_t uprw_rwant;
164 1.1.2.2 rmind uint16_t uprw_wwant;
165 1.1.2.2 rmind struct rumpuser_cv *uprw_rucv_reader;
166 1.1.2.2 rmind struct rumpuser_cv *uprw_rucv_writer;
167 1.1.2.2 rmind };
168 1.1.2.2 rmind
169 1.1.2.2 rmind #define UPRW(rw) struct uprw *uprw = *(struct uprw **)rw
170 1.1.2.2 rmind
171 1.1.2.2 rmind /* reader/writer locks */
172 1.1.2.2 rmind
173 1.1.2.2 rmind void
174 1.1.2.2 rmind rw_init(krwlock_t *rw)
175 1.1.2.2 rmind {
176 1.1.2.2 rmind struct uprw *uprw;
177 1.1.2.2 rmind
178 1.1.2.2 rmind CTASSERT(sizeof(krwlock_t) >= sizeof(void *));
179 1.1.2.2 rmind checkncpu();
180 1.1.2.2 rmind
181 1.1.2.2 rmind uprw = rumpuser_malloc(sizeof(*uprw), 0);
182 1.1.2.2 rmind memset(uprw, 0, sizeof(*uprw));
183 1.1.2.2 rmind rumpuser_cv_init(&uprw->uprw_rucv_reader);
184 1.1.2.2 rmind rumpuser_cv_init(&uprw->uprw_rucv_writer);
185 1.1.2.2 rmind memcpy(rw, &uprw, sizeof(void *));
186 1.1.2.2 rmind }
187 1.1.2.2 rmind
188 1.1.2.2 rmind void
189 1.1.2.2 rmind rw_destroy(krwlock_t *rw)
190 1.1.2.2 rmind {
191 1.1.2.2 rmind UPRW(rw);
192 1.1.2.2 rmind
193 1.1.2.2 rmind rumpuser_cv_destroy(uprw->uprw_rucv_reader);
194 1.1.2.2 rmind rumpuser_cv_destroy(uprw->uprw_rucv_writer);
195 1.1.2.2 rmind rumpuser_free(uprw);
196 1.1.2.2 rmind }
197 1.1.2.2 rmind
198 1.1.2.2 rmind /* take rwlock. prefer writers over readers (see rw_tryenter and rw_exit) */
199 1.1.2.2 rmind void
200 1.1.2.2 rmind rw_enter(krwlock_t *rw, const krw_t op)
201 1.1.2.2 rmind {
202 1.1.2.2 rmind UPRW(rw);
203 1.1.2.2 rmind struct rumpuser_cv *rucv;
204 1.1.2.2 rmind uint16_t *wp;
205 1.1.2.2 rmind
206 1.1.2.2 rmind if (rw_tryenter(rw, op))
207 1.1.2.2 rmind return;
208 1.1.2.2 rmind
209 1.1.2.2 rmind /* lagpath */
210 1.1.2.2 rmind if (op == RW_READER) {
211 1.1.2.2 rmind rucv = uprw->uprw_rucv_reader;
212 1.1.2.2 rmind wp = &uprw->uprw_rwant;
213 1.1.2.2 rmind } else {
214 1.1.2.2 rmind rucv = uprw->uprw_rucv_writer;
215 1.1.2.2 rmind wp = &uprw->uprw_wwant;
216 1.1.2.2 rmind }
217 1.1.2.2 rmind
218 1.1.2.2 rmind (*wp)++;
219 1.1.2.2 rmind while (!rw_tryenter(rw, op)) {
220 1.1.2.2 rmind rump_schedlock_cv_wait(rucv);
221 1.1.2.2 rmind }
222 1.1.2.2 rmind (*wp)--;
223 1.1.2.2 rmind }
224 1.1.2.2 rmind
225 1.1.2.2 rmind int
226 1.1.2.2 rmind rw_tryenter(krwlock_t *rw, const krw_t op)
227 1.1.2.2 rmind {
228 1.1.2.2 rmind UPRW(rw);
229 1.1.2.2 rmind
230 1.1.2.2 rmind switch (op) {
231 1.1.2.2 rmind case RW_READER:
232 1.1.2.2 rmind if (uprw->uprw_owner == NULL && uprw->uprw_wwant == 0) {
233 1.1.2.2 rmind uprw->uprw_readers++;
234 1.1.2.2 rmind return 1;
235 1.1.2.2 rmind }
236 1.1.2.2 rmind break;
237 1.1.2.2 rmind case RW_WRITER:
238 1.1.2.2 rmind if (uprw->uprw_owner == NULL && uprw->uprw_readers == 0) {
239 1.1.2.2 rmind uprw->uprw_owner = curlwp;
240 1.1.2.2 rmind return 1;
241 1.1.2.2 rmind }
242 1.1.2.2 rmind break;
243 1.1.2.2 rmind }
244 1.1.2.2 rmind
245 1.1.2.2 rmind return 0;
246 1.1.2.2 rmind }
247 1.1.2.2 rmind
248 1.1.2.2 rmind void
249 1.1.2.2 rmind rw_exit(krwlock_t *rw)
250 1.1.2.2 rmind {
251 1.1.2.2 rmind UPRW(rw);
252 1.1.2.2 rmind
253 1.1.2.2 rmind if (uprw->uprw_readers > 0) {
254 1.1.2.2 rmind uprw->uprw_readers--;
255 1.1.2.2 rmind } else {
256 1.1.2.2 rmind KASSERT(uprw->uprw_owner == curlwp);
257 1.1.2.2 rmind uprw->uprw_owner = NULL;
258 1.1.2.2 rmind }
259 1.1.2.2 rmind
260 1.1.2.2 rmind if (uprw->uprw_wwant) {
261 1.1.2.2 rmind rumpuser_cv_signal(uprw->uprw_rucv_writer);
262 1.1.2.2 rmind } else if (uprw->uprw_rwant) {
263 1.1.2.2 rmind rumpuser_cv_signal(uprw->uprw_rucv_reader);
264 1.1.2.2 rmind }
265 1.1.2.2 rmind }
266 1.1.2.2 rmind
267 1.1.2.2 rmind int
268 1.1.2.2 rmind rw_tryupgrade(krwlock_t *rw)
269 1.1.2.2 rmind {
270 1.1.2.2 rmind UPRW(rw);
271 1.1.2.2 rmind
272 1.1.2.2 rmind if (uprw->uprw_readers == 1 && uprw->uprw_owner == NULL) {
273 1.1.2.2 rmind uprw->uprw_readers = 0;
274 1.1.2.2 rmind uprw->uprw_owner = curlwp;
275 1.1.2.2 rmind return 1;
276 1.1.2.2 rmind } else {
277 1.1.2.2 rmind return 0;
278 1.1.2.2 rmind }
279 1.1.2.2 rmind }
280 1.1.2.2 rmind
281 1.1.2.2 rmind int
282 1.1.2.2 rmind rw_write_held(krwlock_t *rw)
283 1.1.2.2 rmind {
284 1.1.2.2 rmind UPRW(rw);
285 1.1.2.2 rmind
286 1.1.2.2 rmind return uprw->uprw_owner == curlwp;
287 1.1.2.2 rmind }
288 1.1.2.2 rmind
289 1.1.2.2 rmind int
290 1.1.2.2 rmind rw_read_held(krwlock_t *rw)
291 1.1.2.2 rmind {
292 1.1.2.2 rmind UPRW(rw);
293 1.1.2.2 rmind
294 1.1.2.2 rmind return uprw->uprw_readers > 0;
295 1.1.2.2 rmind }
296 1.1.2.2 rmind
297 1.1.2.2 rmind int
298 1.1.2.2 rmind rw_lock_held(krwlock_t *rw)
299 1.1.2.2 rmind {
300 1.1.2.2 rmind UPRW(rw);
301 1.1.2.2 rmind
302 1.1.2.2 rmind return uprw->uprw_owner || uprw->uprw_readers;
303 1.1.2.2 rmind }
304 1.1.2.2 rmind
305 1.1.2.2 rmind
306 1.1.2.2 rmind /*
307 1.1.2.2 rmind * Condvars are almost the same as in the MP case except that we
308 1.1.2.2 rmind * use the scheduler mutex as the pthread interlock instead of the
309 1.1.2.2 rmind * mutex associated with the condvar.
310 1.1.2.2 rmind */
311 1.1.2.2 rmind
312 1.1.2.2 rmind #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv))
313 1.1.2.2 rmind
314 1.1.2.2 rmind void
315 1.1.2.2 rmind cv_init(kcondvar_t *cv, const char *msg)
316 1.1.2.2 rmind {
317 1.1.2.2 rmind
318 1.1.2.2 rmind CTASSERT(sizeof(kcondvar_t) >= sizeof(void *));
319 1.1.2.2 rmind checkncpu();
320 1.1.2.2 rmind
321 1.1.2.2 rmind rumpuser_cv_init((struct rumpuser_cv **)cv);
322 1.1.2.2 rmind }
323 1.1.2.2 rmind
324 1.1.2.2 rmind void
325 1.1.2.2 rmind cv_destroy(kcondvar_t *cv)
326 1.1.2.2 rmind {
327 1.1.2.2 rmind
328 1.1.2.2 rmind rumpuser_cv_destroy(RUMPCV(cv));
329 1.1.2.2 rmind }
330 1.1.2.2 rmind
331 1.1.2.2 rmind void
332 1.1.2.2 rmind cv_wait(kcondvar_t *cv, kmutex_t *mtx)
333 1.1.2.2 rmind {
334 1.1.2.2 rmind #ifdef DIAGNOSTIC
335 1.1.2.2 rmind UPMTX(mtx);
336 1.1.2.2 rmind KASSERT(upm->upm_owner == curlwp);
337 1.1.2.2 rmind
338 1.1.2.2 rmind if (rump_threads == 0)
339 1.1.2.2 rmind panic("cv_wait without threads");
340 1.1.2.2 rmind #endif
341 1.1.2.2 rmind
342 1.1.2.2 rmind /*
343 1.1.2.2 rmind * NOTE: we must atomically release the *CPU* here, i.e.
344 1.1.2.2 rmind * nothing between mutex_exit and entering rumpuser condwait
345 1.1.2.2 rmind * may preempt us from the virtual CPU.
346 1.1.2.2 rmind */
347 1.1.2.2 rmind mutex_exit(mtx);
348 1.1.2.2 rmind rump_schedlock_cv_wait(RUMPCV(cv));
349 1.1.2.2 rmind mutex_enter(mtx);
350 1.1.2.2 rmind }
351 1.1.2.2 rmind
352 1.1.2.2 rmind int
353 1.1.2.2 rmind cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx)
354 1.1.2.2 rmind {
355 1.1.2.2 rmind
356 1.1.2.2 rmind cv_wait(cv, mtx);
357 1.1.2.2 rmind return 0;
358 1.1.2.2 rmind }
359 1.1.2.2 rmind
360 1.1.2.2 rmind int
361 1.1.2.2 rmind cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
362 1.1.2.2 rmind {
363 1.1.2.2 rmind struct timespec ts, tstick;
364 1.1.2.2 rmind
365 1.1.2.2 rmind #ifdef DIAGNOSTIC
366 1.1.2.2 rmind UPMTX(mtx);
367 1.1.2.2 rmind KASSERT(upm->upm_owner == curlwp);
368 1.1.2.2 rmind #endif
369 1.1.2.2 rmind
370 1.1.2.2 rmind /*
371 1.1.2.2 rmind * XXX: this fetches rump kernel time, but rumpuser_cv_timedwait
372 1.1.2.2 rmind * uses host time.
373 1.1.2.2 rmind */
374 1.1.2.2 rmind nanotime(&ts);
375 1.1.2.2 rmind tstick.tv_sec = ticks / hz;
376 1.1.2.2 rmind tstick.tv_nsec = (ticks % hz) * (1000000000/hz);
377 1.1.2.2 rmind timespecadd(&ts, &tstick, &ts);
378 1.1.2.2 rmind
379 1.1.2.2 rmind if (ticks == 0) {
380 1.1.2.2 rmind cv_wait(cv, mtx);
381 1.1.2.2 rmind return 0;
382 1.1.2.2 rmind } else {
383 1.1.2.2 rmind int rv;
384 1.1.2.2 rmind mutex_exit(mtx);
385 1.1.2.2 rmind rv = rump_schedlock_cv_timedwait(RUMPCV(cv), &ts);
386 1.1.2.2 rmind mutex_enter(mtx);
387 1.1.2.2 rmind if (rv)
388 1.1.2.2 rmind return EWOULDBLOCK;
389 1.1.2.2 rmind else
390 1.1.2.2 rmind return 0;
391 1.1.2.2 rmind }
392 1.1.2.2 rmind }
393 1.1.2.2 rmind
394 1.1.2.2 rmind int
395 1.1.2.2 rmind cv_timedwait_sig(kcondvar_t *cv, kmutex_t *mtx, int ticks)
396 1.1.2.2 rmind {
397 1.1.2.2 rmind
398 1.1.2.2 rmind return cv_timedwait(cv, mtx, ticks);
399 1.1.2.2 rmind }
400 1.1.2.2 rmind
401 1.1.2.2 rmind void
402 1.1.2.2 rmind cv_signal(kcondvar_t *cv)
403 1.1.2.2 rmind {
404 1.1.2.2 rmind
405 1.1.2.2 rmind /* CPU == interlock */
406 1.1.2.2 rmind rumpuser_cv_signal(RUMPCV(cv));
407 1.1.2.2 rmind }
408 1.1.2.2 rmind
409 1.1.2.2 rmind void
410 1.1.2.2 rmind cv_broadcast(kcondvar_t *cv)
411 1.1.2.2 rmind {
412 1.1.2.2 rmind
413 1.1.2.2 rmind /* CPU == interlock */
414 1.1.2.2 rmind rumpuser_cv_broadcast(RUMPCV(cv));
415 1.1.2.2 rmind }
416 1.1.2.2 rmind
417 1.1.2.2 rmind bool
418 1.1.2.2 rmind cv_has_waiters(kcondvar_t *cv)
419 1.1.2.2 rmind {
420 1.1.2.2 rmind
421 1.1.2.2 rmind return rumpuser_cv_has_waiters(RUMPCV(cv));
422 1.1.2.2 rmind }
423 1.1.2.2 rmind
424 1.1.2.2 rmind /* this is not much of an attempt, but ... */
425 1.1.2.2 rmind bool
426 1.1.2.2 rmind cv_is_valid(kcondvar_t *cv)
427 1.1.2.2 rmind {
428 1.1.2.2 rmind
429 1.1.2.2 rmind return RUMPCV(cv) != NULL;
430 1.1.2.2 rmind }
431