klock.c revision 1.2.4.2 1 1.2.4.2 yamt /* $NetBSD: klock.c,v 1.2.4.2 2010/08/11 22:55:06 yamt Exp $ */
2 1.2.4.2 yamt
3 1.2.4.2 yamt /*
4 1.2.4.2 yamt * Copyright (c) 2007-2010 Antti Kantee. All Rights Reserved.
5 1.2.4.2 yamt *
6 1.2.4.2 yamt * Development of this software was supported by the
7 1.2.4.2 yamt * Finnish Cultural Foundation.
8 1.2.4.2 yamt *
9 1.2.4.2 yamt * Redistribution and use in source and binary forms, with or without
10 1.2.4.2 yamt * modification, are permitted provided that the following conditions
11 1.2.4.2 yamt * are met:
12 1.2.4.2 yamt * 1. Redistributions of source code must retain the above copyright
13 1.2.4.2 yamt * notice, this list of conditions and the following disclaimer.
14 1.2.4.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
15 1.2.4.2 yamt * notice, this list of conditions and the following disclaimer in the
16 1.2.4.2 yamt * documentation and/or other materials provided with the distribution.
17 1.2.4.2 yamt *
18 1.2.4.2 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
19 1.2.4.2 yamt * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 1.2.4.2 yamt * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 1.2.4.2 yamt * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 1.2.4.2 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 1.2.4.2 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 1.2.4.2 yamt * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 1.2.4.2 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 1.2.4.2 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.2.4.2 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.2.4.2 yamt * SUCH DAMAGE.
29 1.2.4.2 yamt */
30 1.2.4.2 yamt
31 1.2.4.2 yamt #include <sys/cdefs.h>
32 1.2.4.2 yamt __KERNEL_RCSID(0, "$NetBSD: klock.c,v 1.2.4.2 2010/08/11 22:55:06 yamt Exp $");
33 1.2.4.2 yamt
34 1.2.4.2 yamt #include <sys/param.h>
35 1.2.4.2 yamt #include <sys/systm.h>
36 1.2.4.2 yamt
37 1.2.4.2 yamt #include <rump/rumpuser.h>
38 1.2.4.2 yamt
39 1.2.4.2 yamt #include "rump_private.h"
40 1.2.4.2 yamt
41 1.2.4.2 yamt /*
42 1.2.4.2 yamt * giant lock
43 1.2.4.2 yamt */
44 1.2.4.2 yamt
45 1.2.4.2 yamt static volatile int lockcnt;
46 1.2.4.2 yamt
47 1.2.4.2 yamt bool
48 1.2.4.2 yamt rump_kernel_isbiglocked()
49 1.2.4.2 yamt {
50 1.2.4.2 yamt
51 1.2.4.2 yamt return rumpuser_mutex_held(rump_giantlock) && lockcnt > 0;
52 1.2.4.2 yamt }
53 1.2.4.2 yamt
54 1.2.4.2 yamt void
55 1.2.4.2 yamt rump_kernel_unlock_allbutone(int *countp)
56 1.2.4.2 yamt {
57 1.2.4.2 yamt int minusone = lockcnt-1;
58 1.2.4.2 yamt
59 1.2.4.2 yamt KASSERT(rump_kernel_isbiglocked());
60 1.2.4.2 yamt if (minusone) {
61 1.2.4.2 yamt _kernel_unlock(minusone, countp);
62 1.2.4.2 yamt }
63 1.2.4.2 yamt KASSERT(lockcnt == 1);
64 1.2.4.2 yamt *countp = minusone;
65 1.2.4.2 yamt
66 1.2.4.2 yamt /*
67 1.2.4.2 yamt * We drop lockcnt to 0 since rumpuser doesn't know that the
68 1.2.4.2 yamt * kernel biglock is being used as the interlock for cv in
69 1.2.4.2 yamt * tsleep.
70 1.2.4.2 yamt */
71 1.2.4.2 yamt lockcnt = 0;
72 1.2.4.2 yamt }
73 1.2.4.2 yamt
74 1.2.4.2 yamt void
75 1.2.4.2 yamt rump_kernel_ununlock_allbutone(int nlocks)
76 1.2.4.2 yamt {
77 1.2.4.2 yamt
78 1.2.4.2 yamt KASSERT(rumpuser_mutex_held(rump_giantlock) && lockcnt == 0);
79 1.2.4.2 yamt lockcnt = 1;
80 1.2.4.2 yamt _kernel_lock(nlocks);
81 1.2.4.2 yamt }
82 1.2.4.2 yamt
83 1.2.4.2 yamt void
84 1.2.4.2 yamt _kernel_lock(int nlocks)
85 1.2.4.2 yamt {
86 1.2.4.2 yamt
87 1.2.4.2 yamt while (nlocks--) {
88 1.2.4.2 yamt if (!rumpuser_mutex_tryenter(rump_giantlock)) {
89 1.2.4.2 yamt struct lwp *l = curlwp;
90 1.2.4.2 yamt
91 1.2.4.2 yamt rump_unschedule_cpu1(l, NULL);
92 1.2.4.2 yamt rumpuser_mutex_enter_nowrap(rump_giantlock);
93 1.2.4.2 yamt rump_schedule_cpu(l);
94 1.2.4.2 yamt }
95 1.2.4.2 yamt lockcnt++;
96 1.2.4.2 yamt }
97 1.2.4.2 yamt }
98 1.2.4.2 yamt
99 1.2.4.2 yamt void
100 1.2.4.2 yamt _kernel_unlock(int nlocks, int *countp)
101 1.2.4.2 yamt {
102 1.2.4.2 yamt
103 1.2.4.2 yamt if (!rumpuser_mutex_held(rump_giantlock)) {
104 1.2.4.2 yamt KASSERT(nlocks == 0);
105 1.2.4.2 yamt if (countp)
106 1.2.4.2 yamt *countp = 0;
107 1.2.4.2 yamt return;
108 1.2.4.2 yamt }
109 1.2.4.2 yamt
110 1.2.4.2 yamt if (countp)
111 1.2.4.2 yamt *countp = lockcnt;
112 1.2.4.2 yamt if (nlocks == 0)
113 1.2.4.2 yamt nlocks = lockcnt;
114 1.2.4.2 yamt if (nlocks == -1) {
115 1.2.4.2 yamt KASSERT(lockcnt == 1);
116 1.2.4.2 yamt nlocks = 1;
117 1.2.4.2 yamt }
118 1.2.4.2 yamt KASSERT(nlocks <= lockcnt);
119 1.2.4.2 yamt while (nlocks--) {
120 1.2.4.2 yamt lockcnt--;
121 1.2.4.2 yamt rumpuser_mutex_exit(rump_giantlock);
122 1.2.4.2 yamt }
123 1.2.4.2 yamt }
124 1.2.4.2 yamt
125 1.2.4.2 yamt void
126 1.2.4.2 yamt rump_user_unschedule(int nlocks, int *countp, void *interlock)
127 1.2.4.2 yamt {
128 1.2.4.2 yamt
129 1.2.4.2 yamt _kernel_unlock(nlocks, countp);
130 1.2.4.2 yamt /*
131 1.2.4.2 yamt * XXX: technically we should unschedule_cpu1() here, but that
132 1.2.4.2 yamt * requires rump_intr_enter/exit to be implemented.
133 1.2.4.2 yamt */
134 1.2.4.2 yamt rump_unschedule_cpu_interlock(curlwp, interlock);
135 1.2.4.2 yamt }
136 1.2.4.2 yamt
137 1.2.4.2 yamt void
138 1.2.4.2 yamt rump_user_schedule(int nlocks, void *interlock)
139 1.2.4.2 yamt {
140 1.2.4.2 yamt
141 1.2.4.2 yamt rump_schedule_cpu_interlock(curlwp, interlock);
142 1.2.4.2 yamt
143 1.2.4.2 yamt if (nlocks)
144 1.2.4.2 yamt _kernel_lock(nlocks);
145 1.2.4.2 yamt }
146