sleepq.c revision 1.23 1 /* $NetBSD: sleepq.c,v 1.23 2022/06/30 07:47:07 knakahara Exp $ */
2
3 /*
4 * Copyright (c) 2008 Antti Kantee. All Rights Reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: sleepq.c,v 1.23 2022/06/30 07:47:07 knakahara Exp $");
30
31 #include <sys/param.h>
32 #include <sys/condvar.h>
33 #include <sys/mutex.h>
34 #include <sys/once.h>
35 #include <sys/queue.h>
36 #include <sys/sleepq.h>
37 #include <sys/syncobj.h>
38 #include <sys/atomic.h>
39
40 #include <rump-sys/kern.h>
41
42 syncobj_t sleep_syncobj;
43
44 void
45 sleepq_init(sleepq_t *sq)
46 {
47
48 LIST_INIT(sq);
49 cv_init(&sq->sq_cv, "sleepq");
50 }
51
52 void
53 sleepq_destroy(sleepq_t *sq)
54 {
55
56 cv_destroy(&sq->sq_cv);
57 }
58
59 void
60 sleepq_enqueue(sleepq_t *sq, wchan_t wc, const char *wmsg, syncobj_t *sob,
61 bool catch_p)
62 {
63 struct lwp *l = curlwp;
64
65 l->l_wchan = wc;
66 l->l_wmesg = wmsg;
67 l->l_sleepq = sq;
68 LIST_INSERT_HEAD(sq, l, l_sleepchain);
69 }
70
71 int
72 sleepq_block(int timo, bool catch, struct syncobj *syncobj __unused)
73 {
74 struct lwp *l = curlwp;
75 int error = 0;
76 kmutex_t *mp = l->l_mutex;
77 int biglocks = l->l_biglocks;
78
79 while (l->l_wchan) {
80 l->l_mutex = mp; /* keep sleepq lock until woken up */
81 error = cv_timedwait(&l->l_sleepq->sq_cv, mp, timo);
82 if (error == EWOULDBLOCK || error == EINTR) {
83 if (l->l_wchan) {
84 LIST_REMOVE(l, l_sleepchain);
85 l->l_wchan = NULL;
86 l->l_wmesg = NULL;
87 }
88 }
89 }
90 mutex_spin_exit(mp);
91
92 if (biglocks)
93 KERNEL_LOCK(biglocks, curlwp);
94
95 return error;
96 }
97
98 void
99 sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp)
100 {
101 struct lwp *l, *l_next;
102 bool found = false;
103
104 for (l = LIST_FIRST(sq); l; l = l_next) {
105 l_next = LIST_NEXT(l, l_sleepchain);
106 if (l->l_wchan == wchan) {
107 found = true;
108 l->l_wchan = NULL;
109 l->l_wmesg = NULL;
110 LIST_REMOVE(l, l_sleepchain);
111 if (--expected == 0)
112 break;
113 }
114 }
115 if (found)
116 cv_broadcast(&sq->sq_cv);
117
118 mutex_spin_exit(mp);
119 }
120
121 void
122 sleepq_unsleep(struct lwp *l, bool cleanup)
123 {
124
125 l->l_wchan = NULL;
126 l->l_wmesg = NULL;
127 LIST_REMOVE(l, l_sleepchain);
128 cv_broadcast(&l->l_sleepq->sq_cv);
129
130 if (cleanup) {
131 mutex_spin_exit(l->l_mutex);
132 }
133 }
134
135 /*
136 * Thread scheduler handles priorities. Therefore no action here.
137 * (maybe do something if we're deperate?)
138 */
139 void
140 sleepq_changepri(struct lwp *l, pri_t pri)
141 {
142
143 }
144
145 void
146 sleepq_lendpri(struct lwp *l, pri_t pri)
147 {
148
149 }
150
151 struct lwp *
152 syncobj_noowner(wchan_t wc)
153 {
154
155 return NULL;
156 }
157
158 void
159 lwp_unlock_to(struct lwp *l, kmutex_t *new)
160 {
161 kmutex_t *old;
162
163 KASSERT(mutex_owned(l->l_mutex));
164
165 old = l->l_mutex;
166 atomic_store_release(&l->l_mutex, new);
167 mutex_spin_exit(old);
168 }
169