kern_threadpool.c revision 1.12 1 /* $NetBSD: kern_threadpool.c,v 1.12 2018/12/27 04:45:29 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2014, 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell and Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Thread pools.
34 *
35 * A thread pool is a collection of worker threads idle or running
36 * jobs, together with an overseer thread that does not run jobs but
37 * can be given jobs to assign to a worker thread. Scheduling a job in
38 * a thread pool does not allocate or even sleep at all, except perhaps
39 * on an adaptive lock, unlike kthread_create. Jobs reuse threads, so
40 * they do not incur the expense of creating and destroying kthreads
41 * unless there is not much work to be done.
42 *
43 * A per-CPU thread pool (threadpool_percpu) is a collection of thread
44 * pools, one per CPU bound to that CPU. For each priority level in
45 * use, there is one shared unbound thread pool (i.e., pool of threads
46 * not bound to any CPU) and one shared per-CPU thread pool.
47 *
48 * To use the unbound thread pool at priority pri, call
49 * threadpool_get(&pool, pri). When you're done, call
50 * threadpool_put(pool, pri).
51 *
52 * To use the per-CPU thread pools at priority pri, call
53 * threadpool_percpu_get(&pool_percpu, pri), and then use the thread
54 * pool returned by threadpool_percpu_ref(pool_percpu) for the current
55 * CPU, or by threadpool_percpu_ref_remote(pool_percpu, ci) for another
56 * CPU. When you're done, call threadpool_percpu_put(pool_percpu,
57 * pri).
58 *
59 * +--MACHINE-----------------------------------------------+
60 * | +--CPU 0-------+ +--CPU 1-------+ +--CPU n-------+ |
61 * | | <overseer 0> | | <overseer 1> | ... | <overseer n> | |
62 * | | <idle 0a> | | <running 1a> | ... | <idle na> | |
63 * | | <running 0b> | | <running 1b> | ... | <idle nb> | |
64 * | | . | | . | ... | . | |
65 * | | . | | . | ... | . | |
66 * | | . | | . | ... | . | |
67 * | +--------------+ +--------------+ +--------------+ |
68 * | +--unbound---------+ |
69 * | | <overseer n+1> | |
70 * | | <idle (n+1)a> | |
71 * | | <running (n+1)b> | |
72 * | +------------------+ |
73 * +--------------------------------------------------------+
74 *
75 * XXX Why one overseer per CPU? I did that originally to avoid
76 * touching remote CPUs' memory when scheduling a job, but that still
77 * requires interprocessor synchronization. Perhaps we could get by
78 * with a single overseer thread, at the expense of another pointer in
79 * struct threadpool_job to identify the CPU on which it must run
80 * in order for the overseer to schedule it correctly.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: kern_threadpool.c,v 1.12 2018/12/27 04:45:29 thorpej Exp $");
85
86 #include <sys/types.h>
87 #include <sys/param.h>
88 #include <sys/atomic.h>
89 #include <sys/condvar.h>
90 #include <sys/cpu.h>
91 #include <sys/kernel.h>
92 #include <sys/kmem.h>
93 #include <sys/kthread.h>
94 #include <sys/mutex.h>
95 #include <sys/once.h>
96 #include <sys/percpu.h>
97 #include <sys/pool.h>
98 #include <sys/proc.h>
99 #include <sys/queue.h>
100 #include <sys/systm.h>
101 #include <sys/threadpool.h>
102
103 /* Data structures */
104
105 TAILQ_HEAD(job_head, threadpool_job);
106 TAILQ_HEAD(thread_head, threadpool_thread);
107
108 struct threadpool_thread {
109 struct lwp *tpt_lwp;
110 char *tpt_lwp_savedname;
111 struct threadpool *tpt_pool;
112 struct threadpool_job *tpt_job;
113 kcondvar_t tpt_cv;
114 TAILQ_ENTRY(threadpool_thread) tpt_entry;
115 };
116
117 struct threadpool {
118 kmutex_t tp_lock;
119 struct threadpool_thread tp_overseer;
120 struct job_head tp_jobs;
121 struct thread_head tp_idle_threads;
122 uint64_t tp_refcnt;
123 int tp_flags;
124 #define THREADPOOL_DYING 0x01
125 struct cpu_info *tp_cpu;
126 pri_t tp_pri;
127 };
128
129 static void threadpool_hold(struct threadpool *);
130 static void threadpool_rele(struct threadpool *);
131
132 static int threadpool_percpu_create(struct threadpool_percpu **, pri_t);
133 static void threadpool_percpu_destroy(struct threadpool_percpu *);
134
135 static threadpool_job_fn_t threadpool_job_dead;
136
137 static int threadpool_job_hold(struct threadpool_job *);
138 static void threadpool_job_rele(struct threadpool_job *);
139
140 static void threadpool_overseer_thread(void *) __dead;
141 static void threadpool_thread(void *) __dead;
142
143 static pool_cache_t threadpool_thread_pc __read_mostly;
144
145 static kmutex_t threadpools_lock __cacheline_aligned;
146
147 /* Idle out threads after 30 seconds */
148 #define THREADPOOL_IDLE_TICKS mstohz(30 * 1000)
149
150 struct threadpool_unbound {
151 struct threadpool tpu_pool;
152
153 /* protected by threadpools_lock */
154 LIST_ENTRY(threadpool_unbound) tpu_link;
155 uint64_t tpu_refcnt;
156 };
157
158 static LIST_HEAD(, threadpool_unbound) unbound_threadpools;
159
160 static struct threadpool_unbound *
161 threadpool_lookup_unbound(pri_t pri)
162 {
163 struct threadpool_unbound *tpu;
164
165 LIST_FOREACH(tpu, &unbound_threadpools, tpu_link) {
166 if (tpu->tpu_pool.tp_pri == pri)
167 return tpu;
168 }
169 return NULL;
170 }
171
172 static void
173 threadpool_insert_unbound(struct threadpool_unbound *tpu)
174 {
175 KASSERT(threadpool_lookup_unbound(tpu->tpu_pool.tp_pri) == NULL);
176 LIST_INSERT_HEAD(&unbound_threadpools, tpu, tpu_link);
177 }
178
179 static void
180 threadpool_remove_unbound(struct threadpool_unbound *tpu)
181 {
182 KASSERT(threadpool_lookup_unbound(tpu->tpu_pool.tp_pri) == tpu);
183 LIST_REMOVE(tpu, tpu_link);
184 }
185
186 struct threadpool_percpu {
187 percpu_t * tpp_percpu;
188 pri_t tpp_pri;
189
190 /* protected by threadpools_lock */
191 LIST_ENTRY(threadpool_percpu) tpp_link;
192 uint64_t tpp_refcnt;
193 };
194
195 static LIST_HEAD(, threadpool_percpu) percpu_threadpools;
196
197 static struct threadpool_percpu *
198 threadpool_lookup_percpu(pri_t pri)
199 {
200 struct threadpool_percpu *tpp;
201
202 LIST_FOREACH(tpp, &percpu_threadpools, tpp_link) {
203 if (tpp->tpp_pri == pri)
204 return tpp;
205 }
206 return NULL;
207 }
208
209 static void
210 threadpool_insert_percpu(struct threadpool_percpu *tpp)
211 {
212 KASSERT(threadpool_lookup_percpu(tpp->tpp_pri) == NULL);
213 LIST_INSERT_HEAD(&percpu_threadpools, tpp, tpp_link);
214 }
215
216 static void
217 threadpool_remove_percpu(struct threadpool_percpu *tpp)
218 {
219 KASSERT(threadpool_lookup_percpu(tpp->tpp_pri) == tpp);
220 LIST_REMOVE(tpp, tpp_link);
221 }
222
223 #ifdef THREADPOOL_VERBOSE
224 #define TP_LOG(x) printf x
225 #else
226 #define TP_LOG(x) /* nothing */
227 #endif /* THREADPOOL_VERBOSE */
228
229 void
230 threadpools_init(void)
231 {
232
233 threadpool_thread_pc =
234 pool_cache_init(sizeof(struct threadpool_thread), 0, 0, 0,
235 "thplthrd", NULL, IPL_NONE, NULL, NULL, NULL);
236
237 LIST_INIT(&unbound_threadpools);
238 LIST_INIT(&percpu_threadpools);
239 mutex_init(&threadpools_lock, MUTEX_DEFAULT, IPL_NONE);
240 }
241
242 /* Thread pool creation */
243
244 static bool
245 threadpool_pri_is_valid(pri_t pri)
246 {
247 return (pri == PRI_NONE || (pri >= PRI_USER && pri < PRI_COUNT));
248 }
249
250 static int
251 threadpool_create(struct threadpool *const pool, struct cpu_info *ci,
252 pri_t pri)
253 {
254 struct lwp *lwp;
255 int ktflags;
256 int error;
257
258 KASSERT(threadpool_pri_is_valid(pri));
259
260 mutex_init(&pool->tp_lock, MUTEX_DEFAULT, IPL_VM);
261 /* XXX overseer */
262 TAILQ_INIT(&pool->tp_jobs);
263 TAILQ_INIT(&pool->tp_idle_threads);
264 pool->tp_refcnt = 1; /* overseer's reference */
265 pool->tp_flags = 0;
266 pool->tp_cpu = ci;
267 pool->tp_pri = pri;
268
269 pool->tp_overseer.tpt_lwp = NULL;
270 pool->tp_overseer.tpt_pool = pool;
271 pool->tp_overseer.tpt_job = NULL;
272 cv_init(&pool->tp_overseer.tpt_cv, "poolover");
273
274 ktflags = 0;
275 ktflags |= KTHREAD_MPSAFE;
276 if (pri < PRI_KERNEL)
277 ktflags |= KTHREAD_TS;
278 error = kthread_create(pri, ktflags, ci, &threadpool_overseer_thread,
279 &pool->tp_overseer, &lwp,
280 "pooloverseer/%d@%d", (ci ? cpu_index(ci) : -1), (int)pri);
281 if (error)
282 goto fail0;
283
284 mutex_spin_enter(&pool->tp_lock);
285 pool->tp_overseer.tpt_lwp = lwp;
286 cv_broadcast(&pool->tp_overseer.tpt_cv);
287 mutex_spin_exit(&pool->tp_lock);
288
289 return 0;
290
291 fail0: KASSERT(error);
292 KASSERT(pool->tp_overseer.tpt_job == NULL);
293 KASSERT(pool->tp_overseer.tpt_pool == pool);
294 KASSERT(pool->tp_flags == 0);
295 KASSERT(pool->tp_refcnt == 0);
296 KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads));
297 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
298 KASSERT(!cv_has_waiters(&pool->tp_overseer.tpt_cv));
299 cv_destroy(&pool->tp_overseer.tpt_cv);
300 mutex_destroy(&pool->tp_lock);
301 return error;
302 }
303
304 /* Thread pool destruction */
305
306 static void
307 threadpool_destroy(struct threadpool *pool)
308 {
309 struct threadpool_thread *thread;
310
311 /* Mark the pool dying and wait for threads to commit suicide. */
312 mutex_spin_enter(&pool->tp_lock);
313 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
314 pool->tp_flags |= THREADPOOL_DYING;
315 cv_broadcast(&pool->tp_overseer.tpt_cv);
316 TAILQ_FOREACH(thread, &pool->tp_idle_threads, tpt_entry)
317 cv_broadcast(&thread->tpt_cv);
318 while (0 < pool->tp_refcnt) {
319 TP_LOG(("%s: draining %u references...\n", __func__,
320 pool->tp_refcnt));
321 cv_wait(&pool->tp_overseer.tpt_cv, &pool->tp_lock);
322 }
323 mutex_spin_exit(&pool->tp_lock);
324
325 KASSERT(pool->tp_overseer.tpt_job == NULL);
326 KASSERT(pool->tp_overseer.tpt_pool == pool);
327 KASSERT(pool->tp_flags == THREADPOOL_DYING);
328 KASSERT(pool->tp_refcnt == 0);
329 KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads));
330 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
331 KASSERT(!cv_has_waiters(&pool->tp_overseer.tpt_cv));
332 cv_destroy(&pool->tp_overseer.tpt_cv);
333 mutex_destroy(&pool->tp_lock);
334 }
335
336 static void
337 threadpool_hold(struct threadpool *pool)
338 {
339
340 KASSERT(mutex_owned(&pool->tp_lock));
341 pool->tp_refcnt++;
342 KASSERT(pool->tp_refcnt != 0);
343 }
344
345 static void
346 threadpool_rele(struct threadpool *pool)
347 {
348
349 KASSERT(mutex_owned(&pool->tp_lock));
350 KASSERT(0 < pool->tp_refcnt);
351 if (--pool->tp_refcnt == 0)
352 cv_broadcast(&pool->tp_overseer.tpt_cv);
353 }
354
355 /* Unbound thread pools */
356
357 int
358 threadpool_get(struct threadpool **poolp, pri_t pri)
359 {
360 struct threadpool_unbound *tpu, *tmp = NULL;
361 int error;
362
363 ASSERT_SLEEPABLE();
364
365 if (! threadpool_pri_is_valid(pri))
366 return EINVAL;
367
368 mutex_enter(&threadpools_lock);
369 tpu = threadpool_lookup_unbound(pri);
370 if (tpu == NULL) {
371 mutex_exit(&threadpools_lock);
372 TP_LOG(("%s: No pool for pri=%d, creating one.\n",
373 __func__, (int)pri));
374 tmp = kmem_zalloc(sizeof(*tmp), KM_SLEEP);
375 error = threadpool_create(&tmp->tpu_pool, NULL, pri);
376 if (error) {
377 kmem_free(tmp, sizeof(*tmp));
378 return error;
379 }
380 mutex_enter(&threadpools_lock);
381 tpu = threadpool_lookup_unbound(pri);
382 if (tpu == NULL) {
383 TP_LOG(("%s: Won the creation race for pri=%d.\n",
384 __func__, (int)pri));
385 tpu = tmp;
386 tmp = NULL;
387 threadpool_insert_unbound(tpu);
388 }
389 }
390 KASSERT(tpu != NULL);
391 tpu->tpu_refcnt++;
392 KASSERT(tpu->tpu_refcnt != 0);
393 mutex_exit(&threadpools_lock);
394
395 if (tmp != NULL) {
396 threadpool_destroy(&tmp->tpu_pool);
397 kmem_free(tmp, sizeof(*tmp));
398 }
399 KASSERT(tpu != NULL);
400 *poolp = &tpu->tpu_pool;
401 return 0;
402 }
403
404 void
405 threadpool_put(struct threadpool *pool, pri_t pri)
406 {
407 struct threadpool_unbound *tpu =
408 container_of(pool, struct threadpool_unbound, tpu_pool);
409
410 ASSERT_SLEEPABLE();
411
412 KASSERT(threadpool_pri_is_valid(pri));
413
414 mutex_enter(&threadpools_lock);
415 KASSERT(tpu == threadpool_lookup_unbound(pri));
416 KASSERT(0 < tpu->tpu_refcnt);
417 if (--tpu->tpu_refcnt == 0) {
418 TP_LOG(("%s: Last reference for pri=%d, destroying pool.\n",
419 __func__, (int)pri));
420 threadpool_remove_unbound(tpu);
421 } else {
422 tpu = NULL;
423 }
424 mutex_exit(&threadpools_lock);
425
426 if (tpu) {
427 threadpool_destroy(&tpu->tpu_pool);
428 kmem_free(tpu, sizeof(*tpu));
429 }
430 }
431
432 /* Per-CPU thread pools */
433
434 int
435 threadpool_percpu_get(struct threadpool_percpu **pool_percpup, pri_t pri)
436 {
437 struct threadpool_percpu *pool_percpu, *tmp = NULL;
438 int error;
439
440 ASSERT_SLEEPABLE();
441
442 if (! threadpool_pri_is_valid(pri))
443 return EINVAL;
444
445 mutex_enter(&threadpools_lock);
446 pool_percpu = threadpool_lookup_percpu(pri);
447 if (pool_percpu == NULL) {
448 mutex_exit(&threadpools_lock);
449 TP_LOG(("%s: No pool for pri=%d, creating one.\n",
450 __func__, (int)pri));
451 error = threadpool_percpu_create(&tmp, pri);
452 if (error)
453 return error;
454 KASSERT(tmp != NULL);
455 mutex_enter(&threadpools_lock);
456 pool_percpu = threadpool_lookup_percpu(pri);
457 if (pool_percpu == NULL) {
458 TP_LOG(("%s: Won the creation race for pri=%d.\n",
459 __func__, (int)pri));
460 pool_percpu = tmp;
461 tmp = NULL;
462 threadpool_insert_percpu(pool_percpu);
463 }
464 }
465 KASSERT(pool_percpu != NULL);
466 pool_percpu->tpp_refcnt++;
467 KASSERT(pool_percpu->tpp_refcnt != 0);
468 mutex_exit(&threadpools_lock);
469
470 if (tmp != NULL)
471 threadpool_percpu_destroy(tmp);
472 KASSERT(pool_percpu != NULL);
473 *pool_percpup = pool_percpu;
474 return 0;
475 }
476
477 void
478 threadpool_percpu_put(struct threadpool_percpu *pool_percpu, pri_t pri)
479 {
480
481 ASSERT_SLEEPABLE();
482
483 KASSERT(threadpool_pri_is_valid(pri));
484
485 mutex_enter(&threadpools_lock);
486 KASSERT(pool_percpu == threadpool_lookup_percpu(pri));
487 KASSERT(0 < pool_percpu->tpp_refcnt);
488 if (--pool_percpu->tpp_refcnt == 0) {
489 TP_LOG(("%s: Last reference for pri=%d, destroying pool.\n",
490 __func__, (int)pri));
491 threadpool_remove_percpu(pool_percpu);
492 } else {
493 pool_percpu = NULL;
494 }
495 mutex_exit(&threadpools_lock);
496
497 if (pool_percpu)
498 threadpool_percpu_destroy(pool_percpu);
499 }
500
501 struct threadpool *
502 threadpool_percpu_ref(struct threadpool_percpu *pool_percpu)
503 {
504 struct threadpool **poolp, *pool;
505
506 poolp = percpu_getref(pool_percpu->tpp_percpu);
507 pool = *poolp;
508 percpu_putref(pool_percpu->tpp_percpu);
509
510 return pool;
511 }
512
513 struct threadpool *
514 threadpool_percpu_ref_remote(struct threadpool_percpu *pool_percpu,
515 struct cpu_info *ci)
516 {
517 struct threadpool **poolp, *pool;
518
519 percpu_traverse_enter();
520 poolp = percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
521 pool = *poolp;
522 percpu_traverse_exit();
523
524 return pool;
525 }
526
527 static int
528 threadpool_percpu_create(struct threadpool_percpu **pool_percpup, pri_t pri)
529 {
530 struct threadpool_percpu *pool_percpu;
531 struct cpu_info *ci;
532 CPU_INFO_ITERATOR cii;
533 unsigned int i, j;
534 int error;
535
536 pool_percpu = kmem_zalloc(sizeof(*pool_percpu), KM_SLEEP);
537 if (pool_percpu == NULL) {
538 error = ENOMEM;
539 goto fail0;
540 }
541 pool_percpu->tpp_pri = pri;
542
543 pool_percpu->tpp_percpu = percpu_alloc(sizeof(struct threadpool *));
544 if (pool_percpu->tpp_percpu == NULL) {
545 error = ENOMEM;
546 goto fail1;
547 }
548
549 for (i = 0, CPU_INFO_FOREACH(cii, ci), i++) {
550 struct threadpool *pool;
551
552 pool = kmem_zalloc(sizeof(*pool), KM_SLEEP);
553 error = threadpool_create(pool, ci, pri);
554 if (error) {
555 kmem_free(pool, sizeof(*pool));
556 goto fail2;
557 }
558 percpu_traverse_enter();
559 struct threadpool **const poolp =
560 percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
561 *poolp = pool;
562 percpu_traverse_exit();
563 }
564
565 /* Success! */
566 *pool_percpup = (struct threadpool_percpu *)pool_percpu;
567 return 0;
568
569 fail2: for (j = 0, CPU_INFO_FOREACH(cii, ci), j++) {
570 if (i <= j)
571 break;
572 percpu_traverse_enter();
573 struct threadpool **const poolp =
574 percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
575 struct threadpool *const pool = *poolp;
576 percpu_traverse_exit();
577 threadpool_destroy(pool);
578 kmem_free(pool, sizeof(*pool));
579 }
580 percpu_free(pool_percpu->tpp_percpu, sizeof(struct taskthread_pool *));
581 fail1: kmem_free(pool_percpu, sizeof(*pool_percpu));
582 fail0: return error;
583 }
584
585 static void
586 threadpool_percpu_destroy(struct threadpool_percpu *pool_percpu)
587 {
588 struct cpu_info *ci;
589 CPU_INFO_ITERATOR cii;
590
591 for (CPU_INFO_FOREACH(cii, ci)) {
592 percpu_traverse_enter();
593 struct threadpool **const poolp =
594 percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
595 struct threadpool *const pool = *poolp;
596 percpu_traverse_exit();
597 threadpool_destroy(pool);
598 kmem_free(pool, sizeof(*pool));
599 }
600
601 percpu_free(pool_percpu->tpp_percpu, sizeof(struct threadpool *));
602 kmem_free(pool_percpu, sizeof(*pool_percpu));
603 }
604
605 /* Thread pool jobs */
606
607 void __printflike(4,5)
608 threadpool_job_init(struct threadpool_job *job, threadpool_job_fn_t fn,
609 kmutex_t *lock, const char *fmt, ...)
610 {
611 va_list ap;
612
613 va_start(ap, fmt);
614 (void)vsnprintf(job->job_name, sizeof(job->job_name), fmt, ap);
615 va_end(ap);
616
617 job->job_lock = lock;
618 job->job_thread = NULL;
619 job->job_refcnt = 0;
620 cv_init(&job->job_cv, job->job_name);
621 job->job_fn = fn;
622 }
623
624 static void
625 threadpool_job_dead(struct threadpool_job *job)
626 {
627
628 panic("threadpool job %p ran after destruction", job);
629 }
630
631 void
632 threadpool_job_destroy(struct threadpool_job *job)
633 {
634
635 ASSERT_SLEEPABLE();
636
637 KASSERTMSG((job->job_thread == NULL), "job %p still running", job);
638
639 mutex_enter(job->job_lock);
640 while (0 < job->job_refcnt)
641 cv_wait(&job->job_cv, job->job_lock);
642 mutex_exit(job->job_lock);
643
644 job->job_lock = NULL;
645 KASSERT(job->job_thread == NULL);
646 KASSERT(job->job_refcnt == 0);
647 KASSERT(!cv_has_waiters(&job->job_cv));
648 cv_destroy(&job->job_cv);
649 job->job_fn = threadpool_job_dead;
650 (void)strlcpy(job->job_name, "deadjob", sizeof(job->job_name));
651 }
652
653 static int
654 threadpool_job_hold(struct threadpool_job *job)
655 {
656 unsigned int refcnt;
657
658 do {
659 refcnt = job->job_refcnt;
660 if (refcnt == UINT_MAX)
661 return EBUSY;
662 } while (atomic_cas_uint(&job->job_refcnt, refcnt, (refcnt + 1))
663 != refcnt);
664
665 return 0;
666 }
667
668 static void
669 threadpool_job_rele(struct threadpool_job *job)
670 {
671 unsigned int refcnt;
672
673 do {
674 refcnt = job->job_refcnt;
675 KASSERT(0 < refcnt);
676 if (refcnt == 1) {
677 mutex_enter(job->job_lock);
678 refcnt = atomic_dec_uint_nv(&job->job_refcnt);
679 KASSERT(refcnt != UINT_MAX);
680 if (refcnt == 0)
681 cv_broadcast(&job->job_cv);
682 mutex_exit(job->job_lock);
683 return;
684 }
685 } while (atomic_cas_uint(&job->job_refcnt, refcnt, (refcnt - 1))
686 != refcnt);
687 }
688
689 void
690 threadpool_job_done(struct threadpool_job *job)
691 {
692
693 KASSERT(mutex_owned(job->job_lock));
694 KASSERT(job->job_thread != NULL);
695 KASSERT(job->job_thread->tpt_lwp == curlwp);
696
697 /*
698 * We can safely read this field; it's only modified right before
699 * we call the job work function, and we are only preserving it
700 * to use here; no one cares if it contains junk afterward.
701 */
702 lwp_lock(curlwp);
703 curlwp->l_name = job->job_thread->tpt_lwp_savedname;
704 lwp_unlock(curlwp);
705
706 cv_broadcast(&job->job_cv);
707 job->job_thread = NULL;
708 }
709
710 void
711 threadpool_schedule_job(struct threadpool *pool, struct threadpool_job *job)
712 {
713
714 KASSERT(mutex_owned(job->job_lock));
715
716 /*
717 * If the job's already running, let it keep running. The job
718 * is guaranteed by the interlock not to end early -- if it had
719 * ended early, threadpool_job_done would have set job_thread
720 * to NULL under the interlock.
721 */
722 if (__predict_true(job->job_thread != NULL)) {
723 TP_LOG(("%s: job '%s' already runnining.\n",
724 __func__, job->job_name));
725 return;
726 }
727
728 /* Otherwise, try to assign a thread to the job. */
729 mutex_spin_enter(&pool->tp_lock);
730 if (__predict_false(TAILQ_EMPTY(&pool->tp_idle_threads))) {
731 /* Nobody's idle. Give it to the overseer. */
732 TP_LOG(("%s: giving job '%s' to overseer.\n",
733 __func__, job->job_name));
734 job->job_thread = &pool->tp_overseer;
735 TAILQ_INSERT_TAIL(&pool->tp_jobs, job, job_entry);
736 } else {
737 /* Assign it to the first idle thread. */
738 job->job_thread = TAILQ_FIRST(&pool->tp_idle_threads);
739 TP_LOG(("%s: giving job '%s' to idle thread %p.\n",
740 __func__, job->job_name, job->job_thread));
741 TAILQ_REMOVE(&pool->tp_idle_threads, job->job_thread,
742 tpt_entry);
743 threadpool_job_hold(job);
744 job->job_thread->tpt_job = job;
745 }
746
747 /* Notify whomever we gave it to, overseer or idle thread. */
748 KASSERT(job->job_thread != NULL);
749 cv_broadcast(&job->job_thread->tpt_cv);
750 mutex_spin_exit(&pool->tp_lock);
751 }
752
753 bool
754 threadpool_cancel_job_async(struct threadpool *pool, struct threadpool_job *job)
755 {
756
757 KASSERT(mutex_owned(job->job_lock));
758
759 /*
760 * XXXJRT This fails (albeit safely) when all of the following
761 * are true:
762 *
763 * => "pool" is something other than what the job was
764 * scheduled on. This can legitimately occur if,
765 * for example, a job is percpu-scheduled on CPU0
766 * and then CPU1 attempts to cancel it without taking
767 * a remote pool reference. (this might happen by
768 * "luck of the draw").
769 *
770 * => "job" is not yet running, but is assigned to the
771 * overseer.
772 *
773 * When this happens, this code makes the determination that
774 * the job is already running. The failure mode is that the
775 * caller is told the job is running, and thus has to wait.
776 * The overseer will eventually get to it and the job will
777 * proceed as if it had been already running.
778 */
779
780 if (job->job_thread == NULL) {
781 /* Nothing to do. Guaranteed not running. */
782 return true;
783 } else if (job->job_thread == &pool->tp_overseer) {
784 /* Take it off the list to guarantee it won't run. */
785 job->job_thread = NULL;
786 mutex_spin_enter(&pool->tp_lock);
787 TAILQ_REMOVE(&pool->tp_jobs, job, job_entry);
788 mutex_spin_exit(&pool->tp_lock);
789 return true;
790 } else {
791 /* Too late -- already running. */
792 return false;
793 }
794 }
795
796 void
797 threadpool_cancel_job(struct threadpool *pool, struct threadpool_job *job)
798 {
799
800 ASSERT_SLEEPABLE();
801
802 KASSERT(mutex_owned(job->job_lock));
803
804 if (threadpool_cancel_job_async(pool, job))
805 return;
806
807 /* Already running. Wait for it to complete. */
808 while (job->job_thread != NULL)
809 cv_wait(&job->job_cv, job->job_lock);
810 }
811
812 /* Thread pool overseer thread */
813
814 static void __dead
815 threadpool_overseer_thread(void *arg)
816 {
817 struct threadpool_thread *const overseer = arg;
818 struct threadpool *const pool = overseer->tpt_pool;
819 struct lwp *lwp = NULL;
820 int ktflags;
821 int error;
822
823 KASSERT((pool->tp_cpu == NULL) || (pool->tp_cpu == curcpu()));
824
825 /* Wait until we're initialized. */
826 mutex_spin_enter(&pool->tp_lock);
827 while (overseer->tpt_lwp == NULL)
828 cv_wait(&overseer->tpt_cv, &pool->tp_lock);
829
830 TP_LOG(("%s: starting.\n", __func__));
831
832 for (;;) {
833 /* Wait until there's a job. */
834 while (TAILQ_EMPTY(&pool->tp_jobs)) {
835 if (ISSET(pool->tp_flags, THREADPOOL_DYING)) {
836 TP_LOG(("%s: THREADPOOL_DYING\n",
837 __func__));
838 break;
839 }
840 cv_wait(&overseer->tpt_cv, &pool->tp_lock);
841 }
842 if (__predict_false(TAILQ_EMPTY(&pool->tp_jobs)))
843 break;
844
845 /* If there are no threads, we'll have to try to start one. */
846 if (TAILQ_EMPTY(&pool->tp_idle_threads)) {
847 TP_LOG(("%s: Got a job, need to create a thread.\n",
848 __func__));
849 threadpool_hold(pool);
850 mutex_spin_exit(&pool->tp_lock);
851
852 struct threadpool_thread *const thread =
853 pool_cache_get(threadpool_thread_pc, PR_WAITOK);
854 thread->tpt_lwp = NULL;
855 thread->tpt_pool = pool;
856 thread->tpt_job = NULL;
857 cv_init(&thread->tpt_cv, "poolthrd");
858
859 ktflags = 0;
860 ktflags |= KTHREAD_MPSAFE;
861 if (pool->tp_pri < PRI_KERNEL)
862 ktflags |= KTHREAD_TS;
863 error = kthread_create(pool->tp_pri, ktflags,
864 pool->tp_cpu, &threadpool_thread, thread, &lwp,
865 "poolthread/%d@%d",
866 (pool->tp_cpu ? cpu_index(pool->tp_cpu) : -1),
867 (int)pool->tp_pri);
868
869 mutex_spin_enter(&pool->tp_lock);
870 if (error) {
871 pool_cache_put(threadpool_thread_pc, thread);
872 threadpool_rele(pool);
873 /* XXX What to do to wait for memory? */
874 (void)kpause("thrdplcr", false, hz,
875 &pool->tp_lock);
876 continue;
877 }
878 /*
879 * New kthread now owns the reference to the pool
880 * taken above.
881 */
882 KASSERT(lwp != NULL);
883 TAILQ_INSERT_TAIL(&pool->tp_idle_threads, thread,
884 tpt_entry);
885 thread->tpt_lwp = lwp;
886 lwp = NULL;
887 cv_broadcast(&thread->tpt_cv);
888 continue;
889 }
890
891 /* There are idle threads, so try giving one a job. */
892 bool rele_job = true;
893 struct threadpool_job *const job = TAILQ_FIRST(&pool->tp_jobs);
894 TAILQ_REMOVE(&pool->tp_jobs, job, job_entry);
895 error = threadpool_job_hold(job);
896 if (error) {
897 TAILQ_INSERT_HEAD(&pool->tp_jobs, job, job_entry);
898 (void)kpause("pooljob", false, hz, &pool->tp_lock);
899 continue;
900 }
901 mutex_spin_exit(&pool->tp_lock);
902
903 mutex_enter(job->job_lock);
904 /* If the job was cancelled, we'll no longer be its thread. */
905 if (__predict_true(job->job_thread == overseer)) {
906 mutex_spin_enter(&pool->tp_lock);
907 if (__predict_false(
908 TAILQ_EMPTY(&pool->tp_idle_threads))) {
909 /*
910 * Someone else snagged the thread
911 * first. We'll have to try again.
912 */
913 TP_LOG(("%s: '%s' lost race to use idle thread.\n",
914 __func__, job->job_name));
915 TAILQ_INSERT_HEAD(&pool->tp_jobs, job,
916 job_entry);
917 } else {
918 /*
919 * Assign the job to the thread and
920 * wake the thread so it starts work.
921 */
922 struct threadpool_thread *const thread =
923 TAILQ_FIRST(&pool->tp_idle_threads);
924
925 TP_LOG(("%s: '%s' gets thread %p\n",
926 __func__, job->job_name, thread));
927 KASSERT(thread->tpt_job == NULL);
928 TAILQ_REMOVE(&pool->tp_idle_threads, thread,
929 tpt_entry);
930 thread->tpt_job = job;
931 job->job_thread = thread;
932 cv_broadcast(&thread->tpt_cv);
933 /* Gave the thread our job reference. */
934 rele_job = false;
935 }
936 mutex_spin_exit(&pool->tp_lock);
937 }
938 mutex_exit(job->job_lock);
939 if (__predict_false(rele_job))
940 threadpool_job_rele(job);
941
942 mutex_spin_enter(&pool->tp_lock);
943 }
944 threadpool_rele(pool);
945 mutex_spin_exit(&pool->tp_lock);
946
947 TP_LOG(("%s: exiting.\n", __func__));
948
949 kthread_exit(0);
950 }
951
952 /* Thread pool thread */
953
954 static void __dead
955 threadpool_thread(void *arg)
956 {
957 struct threadpool_thread *const thread = arg;
958 struct threadpool *const pool = thread->tpt_pool;
959
960 KASSERT((pool->tp_cpu == NULL) || (pool->tp_cpu == curcpu()));
961
962 /* Wait until we're initialized and on the queue. */
963 mutex_spin_enter(&pool->tp_lock);
964 while (thread->tpt_lwp == NULL)
965 cv_wait(&thread->tpt_cv, &pool->tp_lock);
966
967 TP_LOG(("%s: starting.\n", __func__));
968
969 KASSERT(thread->tpt_lwp == curlwp);
970 for (;;) {
971 /* Wait until we are assigned a job. */
972 while (thread->tpt_job == NULL) {
973 if (ISSET(pool->tp_flags, THREADPOOL_DYING)) {
974 TP_LOG(("%s: THREADPOOL_DYING\n",
975 __func__));
976 break;
977 }
978 if (cv_timedwait(&thread->tpt_cv, &pool->tp_lock,
979 THREADPOOL_IDLE_TICKS))
980 break;
981 }
982 if (__predict_false(thread->tpt_job == NULL)) {
983 TAILQ_REMOVE(&pool->tp_idle_threads, thread,
984 tpt_entry);
985 break;
986 }
987
988 struct threadpool_job *const job = thread->tpt_job;
989 KASSERT(job != NULL);
990
991 /* Set our lwp name to reflect what job we're doing. */
992 lwp_lock(curlwp);
993 char *const lwp_name __diagused = curlwp->l_name;
994 thread->tpt_lwp_savedname = curlwp->l_name;
995 curlwp->l_name = job->job_name;
996 lwp_unlock(curlwp);
997
998 mutex_spin_exit(&pool->tp_lock);
999
1000 TP_LOG(("%s: running job '%s' on thread %p.\n",
1001 __func__, job->job_name, thread));
1002
1003 /* Run the job. */
1004 (*job->job_fn)(job);
1005
1006 /* lwp name restored in threadpool_job_done(). */
1007 KASSERTMSG((curlwp->l_name == lwp_name),
1008 "someone forgot to call threadpool_job_done()!");
1009
1010 /* Job is done and its name is unreferenced. Release it. */
1011 threadpool_job_rele(job);
1012
1013 mutex_spin_enter(&pool->tp_lock);
1014 KASSERT(thread->tpt_job == job);
1015 thread->tpt_job = NULL;
1016 TAILQ_INSERT_TAIL(&pool->tp_idle_threads, thread, tpt_entry);
1017 }
1018 threadpool_rele(pool);
1019 mutex_spin_exit(&pool->tp_lock);
1020
1021 TP_LOG(("%s: thread %p exiting.\n", __func__, thread));
1022
1023 KASSERT(!cv_has_waiters(&thread->tpt_cv));
1024 cv_destroy(&thread->tpt_cv);
1025 pool_cache_put(threadpool_thread_pc, thread);
1026 kthread_exit(0);
1027 }
1028