1 1.48 mrg /* $NetBSD: subr_workqueue.c,v 1.48 2024/03/01 04:32:38 mrg Exp $ */ 2 1.1 yamt 3 1.1 yamt /*- 4 1.20 yamt * Copyright (c)2002, 2005, 2006, 2007 YAMAMOTO Takashi, 5 1.1 yamt * All rights reserved. 6 1.1 yamt * 7 1.1 yamt * Redistribution and use in source and binary forms, with or without 8 1.1 yamt * modification, are permitted provided that the following conditions 9 1.1 yamt * are met: 10 1.1 yamt * 1. Redistributions of source code must retain the above copyright 11 1.1 yamt * notice, this list of conditions and the following disclaimer. 12 1.1 yamt * 2. Redistributions in binary form must reproduce the above copyright 13 1.1 yamt * notice, this list of conditions and the following disclaimer in the 14 1.1 yamt * documentation and/or other materials provided with the distribution. 15 1.1 yamt * 16 1.1 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 1.1 yamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 1.1 yamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 1.1 yamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 1.1 yamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 1.1 yamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 1.1 yamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 1.1 yamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 1.1 yamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 1.1 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 1.1 yamt * SUCH DAMAGE. 27 1.1 yamt */ 28 1.1 yamt 29 1.1 yamt #include <sys/cdefs.h> 30 1.48 mrg __KERNEL_RCSID(0, "$NetBSD: subr_workqueue.c,v 1.48 2024/03/01 04:32:38 mrg Exp $"); 31 1.1 yamt 32 1.1 yamt #include <sys/param.h> 33 1.46 riastrad 34 1.46 riastrad #include <sys/condvar.h> 35 1.18 rmind #include <sys/cpu.h> 36 1.46 riastrad #include <sys/kmem.h> 37 1.1 yamt #include <sys/kthread.h> 38 1.46 riastrad #include <sys/mutex.h> 39 1.1 yamt #include <sys/proc.h> 40 1.46 riastrad #include <sys/queue.h> 41 1.46 riastrad #include <sys/sdt.h> 42 1.46 riastrad #include <sys/systm.h> 43 1.1 yamt #include <sys/workqueue.h> 44 1.1 yamt 45 1.17 yamt typedef struct work_impl { 46 1.17 yamt SIMPLEQ_ENTRY(work_impl) wk_entry; 47 1.17 yamt } work_impl_t; 48 1.17 yamt 49 1.17 yamt SIMPLEQ_HEAD(workqhead, work_impl); 50 1.1 yamt 51 1.1 yamt struct workqueue_queue { 52 1.9 ad kmutex_t q_mutex; 53 1.9 ad kcondvar_t q_cv; 54 1.34 ozaki struct workqhead q_queue_pending; 55 1.42 riastrad uint64_t q_gen; 56 1.28 yamt lwp_t *q_worker; 57 1.1 yamt }; 58 1.1 yamt 59 1.1 yamt struct workqueue { 60 1.1 yamt void (*wq_func)(struct work *, void *); 61 1.1 yamt void *wq_arg; 62 1.20 yamt int wq_flags; 63 1.20 yamt 64 1.32 jym char wq_name[MAXCOMLEN]; 65 1.12 yamt pri_t wq_prio; 66 1.18 rmind void *wq_ptr; 67 1.1 yamt }; 68 1.1 yamt 69 1.24 ad #define WQ_SIZE (roundup2(sizeof(struct workqueue), coherency_unit)) 70 1.24 ad #define WQ_QUEUE_SIZE (roundup2(sizeof(struct workqueue_queue), coherency_unit)) 71 1.18 rmind 72 1.1 yamt #define POISON 0xaabbccdd 73 1.1 yamt 74 1.41 riastrad SDT_PROBE_DEFINE7(sdt, kernel, workqueue, create, 75 1.41 riastrad "struct workqueue *"/*wq*/, 76 1.41 riastrad "const char *"/*name*/, 77 1.41 riastrad "void (*)(struct work *, void *)"/*func*/, 78 1.41 riastrad "void *"/*arg*/, 79 1.41 riastrad "pri_t"/*prio*/, 80 1.41 riastrad "int"/*ipl*/, 81 1.41 riastrad "int"/*flags*/); 82 1.41 riastrad SDT_PROBE_DEFINE1(sdt, kernel, workqueue, destroy, 83 1.41 riastrad "struct workqueue *"/*wq*/); 84 1.41 riastrad 85 1.41 riastrad SDT_PROBE_DEFINE3(sdt, kernel, workqueue, enqueue, 86 1.41 riastrad "struct workqueue *"/*wq*/, 87 1.41 riastrad "struct work *"/*wk*/, 88 1.41 riastrad "struct cpu_info *"/*ci*/); 89 1.41 riastrad SDT_PROBE_DEFINE4(sdt, kernel, workqueue, entry, 90 1.41 riastrad "struct workqueue *"/*wq*/, 91 1.41 riastrad "struct work *"/*wk*/, 92 1.41 riastrad "void (*)(struct work *, void *)"/*func*/, 93 1.41 riastrad "void *"/*arg*/); 94 1.41 riastrad SDT_PROBE_DEFINE4(sdt, kernel, workqueue, return, 95 1.41 riastrad "struct workqueue *"/*wq*/, 96 1.41 riastrad "struct work *"/*wk*/, 97 1.41 riastrad "void (*)(struct work *, void *)"/*func*/, 98 1.41 riastrad "void *"/*arg*/); 99 1.41 riastrad SDT_PROBE_DEFINE2(sdt, kernel, workqueue, wait__start, 100 1.41 riastrad "struct workqueue *"/*wq*/, 101 1.41 riastrad "struct work *"/*wk*/); 102 1.43 riastrad SDT_PROBE_DEFINE2(sdt, kernel, workqueue, wait__self, 103 1.43 riastrad "struct workqueue *"/*wq*/, 104 1.43 riastrad "struct work *"/*wk*/); 105 1.43 riastrad SDT_PROBE_DEFINE2(sdt, kernel, workqueue, wait__hit, 106 1.43 riastrad "struct workqueue *"/*wq*/, 107 1.43 riastrad "struct work *"/*wk*/); 108 1.41 riastrad SDT_PROBE_DEFINE2(sdt, kernel, workqueue, wait__done, 109 1.41 riastrad "struct workqueue *"/*wq*/, 110 1.41 riastrad "struct work *"/*wk*/); 111 1.41 riastrad 112 1.41 riastrad SDT_PROBE_DEFINE1(sdt, kernel, workqueue, exit__start, 113 1.41 riastrad "struct workqueue *"/*wq*/); 114 1.41 riastrad SDT_PROBE_DEFINE1(sdt, kernel, workqueue, exit__done, 115 1.41 riastrad "struct workqueue *"/*wq*/); 116 1.41 riastrad 117 1.20 yamt static size_t 118 1.20 yamt workqueue_size(int flags) 119 1.20 yamt { 120 1.20 yamt 121 1.20 yamt return WQ_SIZE 122 1.20 yamt + ((flags & WQ_PERCPU) != 0 ? ncpu : 1) * WQ_QUEUE_SIZE 123 1.24 ad + coherency_unit; 124 1.20 yamt } 125 1.20 yamt 126 1.14 rmind static struct workqueue_queue * 127 1.14 rmind workqueue_queue_lookup(struct workqueue *wq, struct cpu_info *ci) 128 1.14 rmind { 129 1.18 rmind u_int idx = 0; 130 1.14 rmind 131 1.18 rmind if (wq->wq_flags & WQ_PERCPU) { 132 1.18 rmind idx = ci ? cpu_index(ci) : cpu_index(curcpu()); 133 1.18 rmind } 134 1.14 rmind 135 1.26 rmind return (void *)((uintptr_t)(wq) + WQ_SIZE + (idx * WQ_QUEUE_SIZE)); 136 1.14 rmind } 137 1.14 rmind 138 1.1 yamt static void 139 1.1 yamt workqueue_runlist(struct workqueue *wq, struct workqhead *list) 140 1.1 yamt { 141 1.17 yamt work_impl_t *wk; 142 1.17 yamt work_impl_t *next; 143 1.48 mrg struct lwp *l = curlwp; 144 1.48 mrg 145 1.48 mrg KASSERTMSG(l->l_nopreempt == 0, "lwp %p nopreempt %d", 146 1.48 mrg l, l->l_nopreempt); 147 1.1 yamt 148 1.1 yamt for (wk = SIMPLEQ_FIRST(list); wk != NULL; wk = next) { 149 1.1 yamt next = SIMPLEQ_NEXT(wk, wk_entry); 150 1.41 riastrad SDT_PROBE4(sdt, kernel, workqueue, entry, 151 1.41 riastrad wq, wk, wq->wq_func, wq->wq_arg); 152 1.17 yamt (*wq->wq_func)((void *)wk, wq->wq_arg); 153 1.41 riastrad SDT_PROBE4(sdt, kernel, workqueue, return, 154 1.41 riastrad wq, wk, wq->wq_func, wq->wq_arg); 155 1.48 mrg KASSERTMSG(l->l_nopreempt == 0, 156 1.48 mrg "lwp %p nopreempt %d func %p", 157 1.48 mrg l, l->l_nopreempt, wq->wq_func); 158 1.1 yamt } 159 1.1 yamt } 160 1.1 yamt 161 1.1 yamt static void 162 1.21 yamt workqueue_worker(void *cookie) 163 1.1 yamt { 164 1.21 yamt struct workqueue *wq = cookie; 165 1.14 rmind struct workqueue_queue *q; 166 1.47 riastrad int s, fpu = wq->wq_flags & WQ_FPU; 167 1.14 rmind 168 1.14 rmind /* find the workqueue of this kthread */ 169 1.14 rmind q = workqueue_queue_lookup(wq, curlwp->l_cpu); 170 1.14 rmind 171 1.47 riastrad if (fpu) 172 1.38 riastrad s = kthread_fpu_enter(); 173 1.45 riastrad mutex_enter(&q->q_mutex); 174 1.3 rpaulo for (;;) { 175 1.42 riastrad struct workqhead tmp; 176 1.42 riastrad 177 1.44 riastrad SIMPLEQ_INIT(&tmp); 178 1.1 yamt 179 1.34 ozaki while (SIMPLEQ_EMPTY(&q->q_queue_pending)) 180 1.9 ad cv_wait(&q->q_cv, &q->q_mutex); 181 1.44 riastrad SIMPLEQ_CONCAT(&tmp, &q->q_queue_pending); 182 1.34 ozaki SIMPLEQ_INIT(&q->q_queue_pending); 183 1.42 riastrad 184 1.42 riastrad /* 185 1.42 riastrad * Mark the queue as actively running a batch of work 186 1.42 riastrad * by setting the generation number odd. 187 1.42 riastrad */ 188 1.42 riastrad q->q_gen |= 1; 189 1.9 ad mutex_exit(&q->q_mutex); 190 1.1 yamt 191 1.42 riastrad workqueue_runlist(wq, &tmp); 192 1.34 ozaki 193 1.42 riastrad /* 194 1.42 riastrad * Notify workqueue_wait that we have completed a batch 195 1.42 riastrad * of work by incrementing the generation number. 196 1.42 riastrad */ 197 1.34 ozaki mutex_enter(&q->q_mutex); 198 1.42 riastrad KASSERTMSG(q->q_gen & 1, "q=%p gen=%"PRIu64, q, q->q_gen); 199 1.42 riastrad q->q_gen++; 200 1.39 riastrad cv_broadcast(&q->q_cv); 201 1.1 yamt } 202 1.45 riastrad mutex_exit(&q->q_mutex); 203 1.47 riastrad if (fpu) 204 1.38 riastrad kthread_fpu_exit(s); 205 1.1 yamt } 206 1.1 yamt 207 1.1 yamt static void 208 1.1 yamt workqueue_init(struct workqueue *wq, const char *name, 209 1.1 yamt void (*callback_func)(struct work *, void *), void *callback_arg, 210 1.12 yamt pri_t prio, int ipl) 211 1.1 yamt { 212 1.1 yamt 213 1.36 ozaki KASSERT(sizeof(wq->wq_name) > strlen(name)); 214 1.32 jym strncpy(wq->wq_name, name, sizeof(wq->wq_name)); 215 1.32 jym 216 1.1 yamt wq->wq_prio = prio; 217 1.1 yamt wq->wq_func = callback_func; 218 1.1 yamt wq->wq_arg = callback_arg; 219 1.1 yamt } 220 1.1 yamt 221 1.1 yamt static int 222 1.18 rmind workqueue_initqueue(struct workqueue *wq, struct workqueue_queue *q, 223 1.18 rmind int ipl, struct cpu_info *ci) 224 1.1 yamt { 225 1.13 ad int error, ktf; 226 1.14 rmind 227 1.20 yamt KASSERT(q->q_worker == NULL); 228 1.20 yamt 229 1.22 ad mutex_init(&q->q_mutex, MUTEX_DEFAULT, ipl); 230 1.9 ad cv_init(&q->q_cv, wq->wq_name); 231 1.34 ozaki SIMPLEQ_INIT(&q->q_queue_pending); 232 1.42 riastrad q->q_gen = 0; 233 1.18 rmind ktf = ((wq->wq_flags & WQ_MPSAFE) != 0 ? KTHREAD_MPSAFE : 0); 234 1.33 matt if (wq->wq_prio < PRI_KERNEL) 235 1.33 matt ktf |= KTHREAD_TS; 236 1.18 rmind if (ci) { 237 1.18 rmind error = kthread_create(wq->wq_prio, ktf, ci, workqueue_worker, 238 1.23 martin wq, &q->q_worker, "%s/%u", wq->wq_name, ci->ci_index); 239 1.18 rmind } else { 240 1.18 rmind error = kthread_create(wq->wq_prio, ktf, ci, workqueue_worker, 241 1.18 rmind wq, &q->q_worker, "%s", wq->wq_name); 242 1.18 rmind } 243 1.20 yamt if (error != 0) { 244 1.20 yamt mutex_destroy(&q->q_mutex); 245 1.20 yamt cv_destroy(&q->q_cv); 246 1.20 yamt KASSERT(q->q_worker == NULL); 247 1.20 yamt } 248 1.1 yamt return error; 249 1.1 yamt } 250 1.1 yamt 251 1.5 yamt struct workqueue_exitargs { 252 1.17 yamt work_impl_t wqe_wk; 253 1.5 yamt struct workqueue_queue *wqe_q; 254 1.5 yamt }; 255 1.5 yamt 256 1.5 yamt static void 257 1.7 yamt workqueue_exit(struct work *wk, void *arg) 258 1.5 yamt { 259 1.5 yamt struct workqueue_exitargs *wqe = (void *)wk; 260 1.5 yamt struct workqueue_queue *q = wqe->wqe_q; 261 1.5 yamt 262 1.5 yamt /* 263 1.11 yamt * only competition at this point is workqueue_finiqueue. 264 1.5 yamt */ 265 1.5 yamt 266 1.13 ad KASSERT(q->q_worker == curlwp); 267 1.34 ozaki KASSERT(SIMPLEQ_EMPTY(&q->q_queue_pending)); 268 1.9 ad mutex_enter(&q->q_mutex); 269 1.5 yamt q->q_worker = NULL; 270 1.39 riastrad cv_broadcast(&q->q_cv); 271 1.9 ad mutex_exit(&q->q_mutex); 272 1.5 yamt kthread_exit(0); 273 1.5 yamt } 274 1.5 yamt 275 1.5 yamt static void 276 1.14 rmind workqueue_finiqueue(struct workqueue *wq, struct workqueue_queue *q) 277 1.5 yamt { 278 1.5 yamt struct workqueue_exitargs wqe; 279 1.5 yamt 280 1.20 yamt KASSERT(wq->wq_func == workqueue_exit); 281 1.5 yamt 282 1.5 yamt wqe.wqe_q = q; 283 1.34 ozaki KASSERT(SIMPLEQ_EMPTY(&q->q_queue_pending)); 284 1.5 yamt KASSERT(q->q_worker != NULL); 285 1.9 ad mutex_enter(&q->q_mutex); 286 1.34 ozaki SIMPLEQ_INSERT_TAIL(&q->q_queue_pending, &wqe.wqe_wk, wk_entry); 287 1.39 riastrad cv_broadcast(&q->q_cv); 288 1.5 yamt while (q->q_worker != NULL) { 289 1.9 ad cv_wait(&q->q_cv, &q->q_mutex); 290 1.5 yamt } 291 1.9 ad mutex_exit(&q->q_mutex); 292 1.9 ad mutex_destroy(&q->q_mutex); 293 1.9 ad cv_destroy(&q->q_cv); 294 1.5 yamt } 295 1.5 yamt 296 1.1 yamt /* --- */ 297 1.1 yamt 298 1.1 yamt int 299 1.1 yamt workqueue_create(struct workqueue **wqp, const char *name, 300 1.1 yamt void (*callback_func)(struct work *, void *), void *callback_arg, 301 1.12 yamt pri_t prio, int ipl, int flags) 302 1.1 yamt { 303 1.1 yamt struct workqueue *wq; 304 1.18 rmind struct workqueue_queue *q; 305 1.18 rmind void *ptr; 306 1.20 yamt int error = 0; 307 1.1 yamt 308 1.25 matt CTASSERT(sizeof(work_impl_t) <= sizeof(struct work)); 309 1.17 yamt 310 1.20 yamt ptr = kmem_zalloc(workqueue_size(flags), KM_SLEEP); 311 1.26 rmind wq = (void *)roundup2((uintptr_t)ptr, coherency_unit); 312 1.18 rmind wq->wq_ptr = ptr; 313 1.18 rmind wq->wq_flags = flags; 314 1.1 yamt 315 1.1 yamt workqueue_init(wq, name, callback_func, callback_arg, prio, ipl); 316 1.1 yamt 317 1.14 rmind if (flags & WQ_PERCPU) { 318 1.14 rmind struct cpu_info *ci; 319 1.14 rmind CPU_INFO_ITERATOR cii; 320 1.14 rmind 321 1.14 rmind /* create the work-queue for each CPU */ 322 1.14 rmind for (CPU_INFO_FOREACH(cii, ci)) { 323 1.20 yamt q = workqueue_queue_lookup(wq, ci); 324 1.18 rmind error = workqueue_initqueue(wq, q, ipl, ci); 325 1.18 rmind if (error) { 326 1.14 rmind break; 327 1.18 rmind } 328 1.14 rmind } 329 1.14 rmind } else { 330 1.18 rmind /* initialize a work-queue */ 331 1.20 yamt q = workqueue_queue_lookup(wq, NULL); 332 1.18 rmind error = workqueue_initqueue(wq, q, ipl, NULL); 333 1.1 yamt } 334 1.18 rmind 335 1.20 yamt if (error != 0) { 336 1.20 yamt workqueue_destroy(wq); 337 1.20 yamt } else { 338 1.20 yamt *wqp = wq; 339 1.15 rmind } 340 1.1 yamt 341 1.20 yamt return error; 342 1.1 yamt } 343 1.1 yamt 344 1.34 ozaki static bool 345 1.43 riastrad workqueue_q_wait(struct workqueue *wq, struct workqueue_queue *q, 346 1.43 riastrad work_impl_t *wk_target) 347 1.34 ozaki { 348 1.34 ozaki work_impl_t *wk; 349 1.34 ozaki bool found = false; 350 1.42 riastrad uint64_t gen; 351 1.34 ozaki 352 1.34 ozaki mutex_enter(&q->q_mutex); 353 1.42 riastrad 354 1.42 riastrad /* 355 1.42 riastrad * Avoid a deadlock scenario. We can't guarantee that 356 1.42 riastrad * wk_target has completed at this point, but we can't wait for 357 1.42 riastrad * it either, so do nothing. 358 1.42 riastrad * 359 1.42 riastrad * XXX Are there use-cases that require this semantics? 360 1.42 riastrad */ 361 1.43 riastrad if (q->q_worker == curlwp) { 362 1.43 riastrad SDT_PROBE2(sdt, kernel, workqueue, wait__self, wq, wk_target); 363 1.37 ozaki goto out; 364 1.43 riastrad } 365 1.42 riastrad 366 1.42 riastrad /* 367 1.42 riastrad * Wait until the target is no longer pending. If we find it 368 1.42 riastrad * on this queue, the caller can stop looking in other queues. 369 1.42 riastrad * If we don't find it in this queue, however, we can't skip 370 1.42 riastrad * waiting -- it may be hidden in the running queue which we 371 1.42 riastrad * have no access to. 372 1.42 riastrad */ 373 1.34 ozaki again: 374 1.34 ozaki SIMPLEQ_FOREACH(wk, &q->q_queue_pending, wk_entry) { 375 1.42 riastrad if (wk == wk_target) { 376 1.43 riastrad SDT_PROBE2(sdt, kernel, workqueue, wait__hit, wq, wk); 377 1.42 riastrad found = true; 378 1.42 riastrad cv_wait(&q->q_cv, &q->q_mutex); 379 1.42 riastrad goto again; 380 1.42 riastrad } 381 1.34 ozaki } 382 1.42 riastrad 383 1.42 riastrad /* 384 1.42 riastrad * The target may be in the batch of work currently running, 385 1.42 riastrad * but we can't touch that queue. So if there's anything 386 1.42 riastrad * running, wait until the generation changes. 387 1.42 riastrad */ 388 1.42 riastrad gen = q->q_gen; 389 1.42 riastrad if (gen & 1) { 390 1.42 riastrad do 391 1.42 riastrad cv_wait(&q->q_cv, &q->q_mutex); 392 1.42 riastrad while (gen == q->q_gen); 393 1.34 ozaki } 394 1.42 riastrad 395 1.37 ozaki out: 396 1.34 ozaki mutex_exit(&q->q_mutex); 397 1.34 ozaki 398 1.34 ozaki return found; 399 1.34 ozaki } 400 1.34 ozaki 401 1.34 ozaki /* 402 1.34 ozaki * Wait for a specified work to finish. The caller must ensure that no new 403 1.34 ozaki * work will be enqueued before calling workqueue_wait. Note that if the 404 1.34 ozaki * workqueue is WQ_PERCPU, the caller can enqueue a new work to another queue 405 1.34 ozaki * other than the waiting queue. 406 1.34 ozaki */ 407 1.34 ozaki void 408 1.34 ozaki workqueue_wait(struct workqueue *wq, struct work *wk) 409 1.34 ozaki { 410 1.34 ozaki struct workqueue_queue *q; 411 1.34 ozaki bool found; 412 1.34 ozaki 413 1.40 riastrad ASSERT_SLEEPABLE(); 414 1.40 riastrad 415 1.41 riastrad SDT_PROBE2(sdt, kernel, workqueue, wait__start, wq, wk); 416 1.34 ozaki if (ISSET(wq->wq_flags, WQ_PERCPU)) { 417 1.34 ozaki struct cpu_info *ci; 418 1.34 ozaki CPU_INFO_ITERATOR cii; 419 1.34 ozaki for (CPU_INFO_FOREACH(cii, ci)) { 420 1.34 ozaki q = workqueue_queue_lookup(wq, ci); 421 1.43 riastrad found = workqueue_q_wait(wq, q, (work_impl_t *)wk); 422 1.34 ozaki if (found) 423 1.34 ozaki break; 424 1.34 ozaki } 425 1.34 ozaki } else { 426 1.34 ozaki q = workqueue_queue_lookup(wq, NULL); 427 1.43 riastrad (void)workqueue_q_wait(wq, q, (work_impl_t *)wk); 428 1.34 ozaki } 429 1.41 riastrad SDT_PROBE2(sdt, kernel, workqueue, wait__done, wq, wk); 430 1.34 ozaki } 431 1.34 ozaki 432 1.1 yamt void 433 1.5 yamt workqueue_destroy(struct workqueue *wq) 434 1.5 yamt { 435 1.14 rmind struct workqueue_queue *q; 436 1.20 yamt struct cpu_info *ci; 437 1.20 yamt CPU_INFO_ITERATOR cii; 438 1.5 yamt 439 1.40 riastrad ASSERT_SLEEPABLE(); 440 1.40 riastrad 441 1.41 riastrad SDT_PROBE1(sdt, kernel, workqueue, exit__start, wq); 442 1.20 yamt wq->wq_func = workqueue_exit; 443 1.20 yamt for (CPU_INFO_FOREACH(cii, ci)) { 444 1.20 yamt q = workqueue_queue_lookup(wq, ci); 445 1.20 yamt if (q->q_worker != NULL) { 446 1.18 rmind workqueue_finiqueue(wq, q); 447 1.18 rmind } 448 1.14 rmind } 449 1.41 riastrad SDT_PROBE1(sdt, kernel, workqueue, exit__done, wq); 450 1.20 yamt kmem_free(wq->wq_ptr, workqueue_size(wq->wq_flags)); 451 1.5 yamt } 452 1.5 yamt 453 1.35 ozaki #ifdef DEBUG 454 1.35 ozaki static void 455 1.35 ozaki workqueue_check_duplication(struct workqueue_queue *q, work_impl_t *wk) 456 1.35 ozaki { 457 1.35 ozaki work_impl_t *_wk; 458 1.35 ozaki 459 1.35 ozaki SIMPLEQ_FOREACH(_wk, &q->q_queue_pending, wk_entry) { 460 1.35 ozaki if (_wk == wk) 461 1.35 ozaki panic("%s: tried to enqueue a queued work", __func__); 462 1.35 ozaki } 463 1.35 ozaki } 464 1.35 ozaki #endif 465 1.35 ozaki 466 1.5 yamt void 467 1.17 yamt workqueue_enqueue(struct workqueue *wq, struct work *wk0, struct cpu_info *ci) 468 1.1 yamt { 469 1.14 rmind struct workqueue_queue *q; 470 1.17 yamt work_impl_t *wk = (void *)wk0; 471 1.14 rmind 472 1.41 riastrad SDT_PROBE3(sdt, kernel, workqueue, enqueue, wq, wk0, ci); 473 1.41 riastrad 474 1.18 rmind KASSERT(wq->wq_flags & WQ_PERCPU || ci == NULL); 475 1.14 rmind q = workqueue_queue_lookup(wq, ci); 476 1.1 yamt 477 1.9 ad mutex_enter(&q->q_mutex); 478 1.35 ozaki #ifdef DEBUG 479 1.35 ozaki workqueue_check_duplication(q, wk); 480 1.35 ozaki #endif 481 1.34 ozaki SIMPLEQ_INSERT_TAIL(&q->q_queue_pending, wk, wk_entry); 482 1.39 riastrad cv_broadcast(&q->q_cv); 483 1.9 ad mutex_exit(&q->q_mutex); 484 1.1 yamt } 485