kern_threadpool.c revision 1.11 1 /* $NetBSD: kern_threadpool.c,v 1.11 2018/12/26 22:16:26 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2014, 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell and Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Thread pools.
34 *
35 * A thread pool is a collection of worker threads idle or running
36 * jobs, together with an overseer thread that does not run jobs but
37 * can be given jobs to assign to a worker thread. Scheduling a job in
38 * a thread pool does not allocate or even sleep at all, except perhaps
39 * on an adaptive lock, unlike kthread_create. Jobs reuse threads, so
40 * they do not incur the expense of creating and destroying kthreads
41 * unless there is not much work to be done.
42 *
43 * A per-CPU thread pool (threadpool_percpu) is a collection of thread
44 * pools, one per CPU bound to that CPU. For each priority level in
45 * use, there is one shared unbound thread pool (i.e., pool of threads
46 * not bound to any CPU) and one shared per-CPU thread pool.
47 *
48 * To use the unbound thread pool at priority pri, call
49 * threadpool_get(&pool, pri). When you're done, call
50 * threadpool_put(pool, pri).
51 *
52 * To use the per-CPU thread pools at priority pri, call
53 * threadpool_percpu_get(&pool_percpu, pri), and then use the thread
54 * pool returned by threadpool_percpu_ref(pool_percpu) for the current
55 * CPU, or by threadpool_percpu_ref_remote(pool_percpu, ci) for another
56 * CPU. When you're done, call threadpool_percpu_put(pool_percpu,
57 * pri).
58 *
59 * +--MACHINE-----------------------------------------------+
60 * | +--CPU 0-------+ +--CPU 1-------+ +--CPU n-------+ |
61 * | | <overseer 0> | | <overseer 1> | ... | <overseer n> | |
62 * | | <idle 0a> | | <running 1a> | ... | <idle na> | |
63 * | | <running 0b> | | <running 1b> | ... | <idle nb> | |
64 * | | . | | . | ... | . | |
65 * | | . | | . | ... | . | |
66 * | | . | | . | ... | . | |
67 * | +--------------+ +--------------+ +--------------+ |
68 * | +--unbound---------+ |
69 * | | <overseer n+1> | |
70 * | | <idle (n+1)a> | |
71 * | | <running (n+1)b> | |
72 * | +------------------+ |
73 * +--------------------------------------------------------+
74 *
75 * XXX Why one overseer per CPU? I did that originally to avoid
76 * touching remote CPUs' memory when scheduling a job, but that still
77 * requires interprocessor synchronization. Perhaps we could get by
78 * with a single overseer thread, at the expense of another pointer in
79 * struct threadpool_job to identify the CPU on which it must run
80 * in order for the overseer to schedule it correctly.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: kern_threadpool.c,v 1.11 2018/12/26 22:16:26 thorpej Exp $");
85
86 #include <sys/types.h>
87 #include <sys/param.h>
88 #include <sys/atomic.h>
89 #include <sys/condvar.h>
90 #include <sys/cpu.h>
91 #include <sys/kernel.h>
92 #include <sys/kmem.h>
93 #include <sys/kthread.h>
94 #include <sys/mutex.h>
95 #include <sys/once.h>
96 #include <sys/percpu.h>
97 #include <sys/pool.h>
98 #include <sys/proc.h>
99 #include <sys/queue.h>
100 #include <sys/systm.h>
101 #include <sys/threadpool.h>
102
103 /* Data structures */
104
105 TAILQ_HEAD(job_head, threadpool_job);
106 TAILQ_HEAD(thread_head, threadpool_thread);
107
108 struct threadpool_thread {
109 struct lwp *tpt_lwp;
110 struct threadpool *tpt_pool;
111 struct threadpool_job *tpt_job;
112 kcondvar_t tpt_cv;
113 TAILQ_ENTRY(threadpool_thread) tpt_entry;
114 };
115
116 struct threadpool {
117 kmutex_t tp_lock;
118 struct threadpool_thread tp_overseer;
119 struct job_head tp_jobs;
120 struct thread_head tp_idle_threads;
121 uint64_t tp_refcnt;
122 int tp_flags;
123 #define THREADPOOL_DYING 0x01
124 struct cpu_info *tp_cpu;
125 pri_t tp_pri;
126 };
127
128 static void threadpool_hold(struct threadpool *);
129 static void threadpool_rele(struct threadpool *);
130
131 static int threadpool_percpu_create(struct threadpool_percpu **, pri_t);
132 static void threadpool_percpu_destroy(struct threadpool_percpu *);
133
134 static threadpool_job_fn_t threadpool_job_dead;
135
136 static int threadpool_job_hold(struct threadpool_job *);
137 static void threadpool_job_rele(struct threadpool_job *);
138
139 static void threadpool_overseer_thread(void *) __dead;
140 static void threadpool_thread(void *) __dead;
141
142 static pool_cache_t threadpool_thread_pc __read_mostly;
143
144 static kmutex_t threadpools_lock __cacheline_aligned;
145
146 /* Idle out threads after 30 seconds */
147 #define THREADPOOL_IDLE_TICKS mstohz(30 * 1000)
148
149 struct threadpool_unbound {
150 struct threadpool tpu_pool;
151
152 /* protected by threadpools_lock */
153 LIST_ENTRY(threadpool_unbound) tpu_link;
154 uint64_t tpu_refcnt;
155 };
156
157 static LIST_HEAD(, threadpool_unbound) unbound_threadpools;
158
159 static struct threadpool_unbound *
160 threadpool_lookup_unbound(pri_t pri)
161 {
162 struct threadpool_unbound *tpu;
163
164 LIST_FOREACH(tpu, &unbound_threadpools, tpu_link) {
165 if (tpu->tpu_pool.tp_pri == pri)
166 return tpu;
167 }
168 return NULL;
169 }
170
171 static void
172 threadpool_insert_unbound(struct threadpool_unbound *tpu)
173 {
174 KASSERT(threadpool_lookup_unbound(tpu->tpu_pool.tp_pri) == NULL);
175 LIST_INSERT_HEAD(&unbound_threadpools, tpu, tpu_link);
176 }
177
178 static void
179 threadpool_remove_unbound(struct threadpool_unbound *tpu)
180 {
181 KASSERT(threadpool_lookup_unbound(tpu->tpu_pool.tp_pri) == tpu);
182 LIST_REMOVE(tpu, tpu_link);
183 }
184
185 struct threadpool_percpu {
186 percpu_t * tpp_percpu;
187 pri_t tpp_pri;
188
189 /* protected by threadpools_lock */
190 LIST_ENTRY(threadpool_percpu) tpp_link;
191 uint64_t tpp_refcnt;
192 };
193
194 static LIST_HEAD(, threadpool_percpu) percpu_threadpools;
195
196 static struct threadpool_percpu *
197 threadpool_lookup_percpu(pri_t pri)
198 {
199 struct threadpool_percpu *tpp;
200
201 LIST_FOREACH(tpp, &percpu_threadpools, tpp_link) {
202 if (tpp->tpp_pri == pri)
203 return tpp;
204 }
205 return NULL;
206 }
207
208 static void
209 threadpool_insert_percpu(struct threadpool_percpu *tpp)
210 {
211 KASSERT(threadpool_lookup_percpu(tpp->tpp_pri) == NULL);
212 LIST_INSERT_HEAD(&percpu_threadpools, tpp, tpp_link);
213 }
214
215 static void
216 threadpool_remove_percpu(struct threadpool_percpu *tpp)
217 {
218 KASSERT(threadpool_lookup_percpu(tpp->tpp_pri) == tpp);
219 LIST_REMOVE(tpp, tpp_link);
220 }
221
222 #ifdef THREADPOOL_VERBOSE
223 #define TP_LOG(x) printf x
224 #else
225 #define TP_LOG(x) /* nothing */
226 #endif /* THREADPOOL_VERBOSE */
227
228 void
229 threadpools_init(void)
230 {
231
232 threadpool_thread_pc =
233 pool_cache_init(sizeof(struct threadpool_thread), 0, 0, 0,
234 "thplthrd", NULL, IPL_NONE, NULL, NULL, NULL);
235
236 LIST_INIT(&unbound_threadpools);
237 LIST_INIT(&percpu_threadpools);
238 mutex_init(&threadpools_lock, MUTEX_DEFAULT, IPL_NONE);
239 }
240
241 /* Thread pool creation */
242
243 static bool
244 threadpool_pri_is_valid(pri_t pri)
245 {
246 return (pri == PRI_NONE || (pri >= PRI_USER && pri < PRI_COUNT));
247 }
248
249 static int
250 threadpool_create(struct threadpool *const pool, struct cpu_info *ci,
251 pri_t pri)
252 {
253 struct lwp *lwp;
254 int ktflags;
255 int error;
256
257 KASSERT(threadpool_pri_is_valid(pri));
258
259 mutex_init(&pool->tp_lock, MUTEX_DEFAULT, IPL_VM);
260 /* XXX overseer */
261 TAILQ_INIT(&pool->tp_jobs);
262 TAILQ_INIT(&pool->tp_idle_threads);
263 pool->tp_refcnt = 1; /* overseer's reference */
264 pool->tp_flags = 0;
265 pool->tp_cpu = ci;
266 pool->tp_pri = pri;
267
268 pool->tp_overseer.tpt_lwp = NULL;
269 pool->tp_overseer.tpt_pool = pool;
270 pool->tp_overseer.tpt_job = NULL;
271 cv_init(&pool->tp_overseer.tpt_cv, "poolover");
272
273 ktflags = 0;
274 ktflags |= KTHREAD_MPSAFE;
275 if (pri < PRI_KERNEL)
276 ktflags |= KTHREAD_TS;
277 error = kthread_create(pri, ktflags, ci, &threadpool_overseer_thread,
278 &pool->tp_overseer, &lwp,
279 "pooloverseer/%d@%d", (ci ? cpu_index(ci) : -1), (int)pri);
280 if (error)
281 goto fail0;
282
283 mutex_spin_enter(&pool->tp_lock);
284 pool->tp_overseer.tpt_lwp = lwp;
285 cv_broadcast(&pool->tp_overseer.tpt_cv);
286 mutex_spin_exit(&pool->tp_lock);
287
288 return 0;
289
290 fail0: KASSERT(error);
291 KASSERT(pool->tp_overseer.tpt_job == NULL);
292 KASSERT(pool->tp_overseer.tpt_pool == pool);
293 KASSERT(pool->tp_flags == 0);
294 KASSERT(pool->tp_refcnt == 0);
295 KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads));
296 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
297 KASSERT(!cv_has_waiters(&pool->tp_overseer.tpt_cv));
298 cv_destroy(&pool->tp_overseer.tpt_cv);
299 mutex_destroy(&pool->tp_lock);
300 return error;
301 }
302
303 /* Thread pool destruction */
304
305 static void
306 threadpool_destroy(struct threadpool *pool)
307 {
308 struct threadpool_thread *thread;
309
310 /* Mark the pool dying and wait for threads to commit suicide. */
311 mutex_spin_enter(&pool->tp_lock);
312 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
313 pool->tp_flags |= THREADPOOL_DYING;
314 cv_broadcast(&pool->tp_overseer.tpt_cv);
315 TAILQ_FOREACH(thread, &pool->tp_idle_threads, tpt_entry)
316 cv_broadcast(&thread->tpt_cv);
317 while (0 < pool->tp_refcnt) {
318 TP_LOG(("%s: draining %u references...\n", __func__,
319 pool->tp_refcnt));
320 cv_wait(&pool->tp_overseer.tpt_cv, &pool->tp_lock);
321 }
322 mutex_spin_exit(&pool->tp_lock);
323
324 KASSERT(pool->tp_overseer.tpt_job == NULL);
325 KASSERT(pool->tp_overseer.tpt_pool == pool);
326 KASSERT(pool->tp_flags == THREADPOOL_DYING);
327 KASSERT(pool->tp_refcnt == 0);
328 KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads));
329 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
330 KASSERT(!cv_has_waiters(&pool->tp_overseer.tpt_cv));
331 cv_destroy(&pool->tp_overseer.tpt_cv);
332 mutex_destroy(&pool->tp_lock);
333 }
334
335 static void
336 threadpool_hold(struct threadpool *pool)
337 {
338
339 KASSERT(mutex_owned(&pool->tp_lock));
340 pool->tp_refcnt++;
341 KASSERT(pool->tp_refcnt != 0);
342 }
343
344 static void
345 threadpool_rele(struct threadpool *pool)
346 {
347
348 KASSERT(mutex_owned(&pool->tp_lock));
349 KASSERT(0 < pool->tp_refcnt);
350 if (--pool->tp_refcnt == 0)
351 cv_broadcast(&pool->tp_overseer.tpt_cv);
352 }
353
354 /* Unbound thread pools */
355
356 int
357 threadpool_get(struct threadpool **poolp, pri_t pri)
358 {
359 struct threadpool_unbound *tpu, *tmp = NULL;
360 int error;
361
362 ASSERT_SLEEPABLE();
363
364 if (! threadpool_pri_is_valid(pri))
365 return EINVAL;
366
367 mutex_enter(&threadpools_lock);
368 tpu = threadpool_lookup_unbound(pri);
369 if (tpu == NULL) {
370 mutex_exit(&threadpools_lock);
371 TP_LOG(("%s: No pool for pri=%d, creating one.\n",
372 __func__, (int)pri));
373 tmp = kmem_zalloc(sizeof(*tmp), KM_SLEEP);
374 error = threadpool_create(&tmp->tpu_pool, NULL, pri);
375 if (error) {
376 kmem_free(tmp, sizeof(*tmp));
377 return error;
378 }
379 mutex_enter(&threadpools_lock);
380 tpu = threadpool_lookup_unbound(pri);
381 if (tpu == NULL) {
382 TP_LOG(("%s: Won the creation race for pri=%d.\n",
383 __func__, (int)pri));
384 tpu = tmp;
385 tmp = NULL;
386 threadpool_insert_unbound(tpu);
387 }
388 }
389 KASSERT(tpu != NULL);
390 tpu->tpu_refcnt++;
391 KASSERT(tpu->tpu_refcnt != 0);
392 mutex_exit(&threadpools_lock);
393
394 if (tmp != NULL) {
395 threadpool_destroy(&tmp->tpu_pool);
396 kmem_free(tmp, sizeof(*tmp));
397 }
398 KASSERT(tpu != NULL);
399 *poolp = &tpu->tpu_pool;
400 return 0;
401 }
402
403 void
404 threadpool_put(struct threadpool *pool, pri_t pri)
405 {
406 struct threadpool_unbound *tpu =
407 container_of(pool, struct threadpool_unbound, tpu_pool);
408
409 ASSERT_SLEEPABLE();
410
411 KASSERT(threadpool_pri_is_valid(pri));
412
413 mutex_enter(&threadpools_lock);
414 KASSERT(tpu == threadpool_lookup_unbound(pri));
415 KASSERT(0 < tpu->tpu_refcnt);
416 if (--tpu->tpu_refcnt == 0) {
417 TP_LOG(("%s: Last reference for pri=%d, destroying pool.\n",
418 __func__, (int)pri));
419 threadpool_remove_unbound(tpu);
420 } else {
421 tpu = NULL;
422 }
423 mutex_exit(&threadpools_lock);
424
425 if (tpu) {
426 threadpool_destroy(&tpu->tpu_pool);
427 kmem_free(tpu, sizeof(*tpu));
428 }
429 }
430
431 /* Per-CPU thread pools */
432
433 int
434 threadpool_percpu_get(struct threadpool_percpu **pool_percpup, pri_t pri)
435 {
436 struct threadpool_percpu *pool_percpu, *tmp = NULL;
437 int error;
438
439 ASSERT_SLEEPABLE();
440
441 if (! threadpool_pri_is_valid(pri))
442 return EINVAL;
443
444 mutex_enter(&threadpools_lock);
445 pool_percpu = threadpool_lookup_percpu(pri);
446 if (pool_percpu == NULL) {
447 mutex_exit(&threadpools_lock);
448 TP_LOG(("%s: No pool for pri=%d, creating one.\n",
449 __func__, (int)pri));
450 error = threadpool_percpu_create(&tmp, pri);
451 if (error)
452 return error;
453 KASSERT(tmp != NULL);
454 mutex_enter(&threadpools_lock);
455 pool_percpu = threadpool_lookup_percpu(pri);
456 if (pool_percpu == NULL) {
457 TP_LOG(("%s: Won the creation race for pri=%d.\n",
458 __func__, (int)pri));
459 pool_percpu = tmp;
460 tmp = NULL;
461 threadpool_insert_percpu(pool_percpu);
462 }
463 }
464 KASSERT(pool_percpu != NULL);
465 pool_percpu->tpp_refcnt++;
466 KASSERT(pool_percpu->tpp_refcnt != 0);
467 mutex_exit(&threadpools_lock);
468
469 if (tmp != NULL)
470 threadpool_percpu_destroy(tmp);
471 KASSERT(pool_percpu != NULL);
472 *pool_percpup = pool_percpu;
473 return 0;
474 }
475
476 void
477 threadpool_percpu_put(struct threadpool_percpu *pool_percpu, pri_t pri)
478 {
479
480 ASSERT_SLEEPABLE();
481
482 KASSERT(threadpool_pri_is_valid(pri));
483
484 mutex_enter(&threadpools_lock);
485 KASSERT(pool_percpu == threadpool_lookup_percpu(pri));
486 KASSERT(0 < pool_percpu->tpp_refcnt);
487 if (--pool_percpu->tpp_refcnt == 0) {
488 TP_LOG(("%s: Last reference for pri=%d, destroying pool.\n",
489 __func__, (int)pri));
490 threadpool_remove_percpu(pool_percpu);
491 } else {
492 pool_percpu = NULL;
493 }
494 mutex_exit(&threadpools_lock);
495
496 if (pool_percpu)
497 threadpool_percpu_destroy(pool_percpu);
498 }
499
500 struct threadpool *
501 threadpool_percpu_ref(struct threadpool_percpu *pool_percpu)
502 {
503 struct threadpool **poolp, *pool;
504
505 poolp = percpu_getref(pool_percpu->tpp_percpu);
506 pool = *poolp;
507 percpu_putref(pool_percpu->tpp_percpu);
508
509 return pool;
510 }
511
512 struct threadpool *
513 threadpool_percpu_ref_remote(struct threadpool_percpu *pool_percpu,
514 struct cpu_info *ci)
515 {
516 struct threadpool **poolp, *pool;
517
518 percpu_traverse_enter();
519 poolp = percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
520 pool = *poolp;
521 percpu_traverse_exit();
522
523 return pool;
524 }
525
526 static int
527 threadpool_percpu_create(struct threadpool_percpu **pool_percpup, pri_t pri)
528 {
529 struct threadpool_percpu *pool_percpu;
530 struct cpu_info *ci;
531 CPU_INFO_ITERATOR cii;
532 unsigned int i, j;
533 int error;
534
535 pool_percpu = kmem_zalloc(sizeof(*pool_percpu), KM_SLEEP);
536 if (pool_percpu == NULL) {
537 error = ENOMEM;
538 goto fail0;
539 }
540 pool_percpu->tpp_pri = pri;
541
542 pool_percpu->tpp_percpu = percpu_alloc(sizeof(struct threadpool *));
543 if (pool_percpu->tpp_percpu == NULL) {
544 error = ENOMEM;
545 goto fail1;
546 }
547
548 for (i = 0, CPU_INFO_FOREACH(cii, ci), i++) {
549 struct threadpool *pool;
550
551 pool = kmem_zalloc(sizeof(*pool), KM_SLEEP);
552 error = threadpool_create(pool, ci, pri);
553 if (error) {
554 kmem_free(pool, sizeof(*pool));
555 goto fail2;
556 }
557 percpu_traverse_enter();
558 struct threadpool **const poolp =
559 percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
560 *poolp = pool;
561 percpu_traverse_exit();
562 }
563
564 /* Success! */
565 *pool_percpup = (struct threadpool_percpu *)pool_percpu;
566 return 0;
567
568 fail2: for (j = 0, CPU_INFO_FOREACH(cii, ci), j++) {
569 if (i <= j)
570 break;
571 percpu_traverse_enter();
572 struct threadpool **const poolp =
573 percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
574 struct threadpool *const pool = *poolp;
575 percpu_traverse_exit();
576 threadpool_destroy(pool);
577 kmem_free(pool, sizeof(*pool));
578 }
579 percpu_free(pool_percpu->tpp_percpu, sizeof(struct taskthread_pool *));
580 fail1: kmem_free(pool_percpu, sizeof(*pool_percpu));
581 fail0: return error;
582 }
583
584 static void
585 threadpool_percpu_destroy(struct threadpool_percpu *pool_percpu)
586 {
587 struct cpu_info *ci;
588 CPU_INFO_ITERATOR cii;
589
590 for (CPU_INFO_FOREACH(cii, ci)) {
591 percpu_traverse_enter();
592 struct threadpool **const poolp =
593 percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
594 struct threadpool *const pool = *poolp;
595 percpu_traverse_exit();
596 threadpool_destroy(pool);
597 kmem_free(pool, sizeof(*pool));
598 }
599
600 percpu_free(pool_percpu->tpp_percpu, sizeof(struct threadpool *));
601 kmem_free(pool_percpu, sizeof(*pool_percpu));
602 }
603
604 /* Thread pool jobs */
605
606 void __printflike(4,5)
607 threadpool_job_init(struct threadpool_job *job, threadpool_job_fn_t fn,
608 kmutex_t *lock, const char *fmt, ...)
609 {
610 va_list ap;
611
612 va_start(ap, fmt);
613 (void)vsnprintf(job->job_name, sizeof(job->job_name), fmt, ap);
614 va_end(ap);
615
616 job->job_lock = lock;
617 job->job_thread = NULL;
618 job->job_refcnt = 0;
619 cv_init(&job->job_cv, job->job_name);
620 job->job_fn = fn;
621 }
622
623 static void
624 threadpool_job_dead(struct threadpool_job *job)
625 {
626
627 panic("threadpool job %p ran after destruction", job);
628 }
629
630 void
631 threadpool_job_destroy(struct threadpool_job *job)
632 {
633
634 ASSERT_SLEEPABLE();
635
636 KASSERTMSG((job->job_thread == NULL), "job %p still running", job);
637
638 mutex_enter(job->job_lock);
639 while (0 < job->job_refcnt)
640 cv_wait(&job->job_cv, job->job_lock);
641 mutex_exit(job->job_lock);
642
643 job->job_lock = NULL;
644 KASSERT(job->job_thread == NULL);
645 KASSERT(job->job_refcnt == 0);
646 KASSERT(!cv_has_waiters(&job->job_cv));
647 cv_destroy(&job->job_cv);
648 job->job_fn = threadpool_job_dead;
649 (void)strlcpy(job->job_name, "deadjob", sizeof(job->job_name));
650 }
651
652 static int
653 threadpool_job_hold(struct threadpool_job *job)
654 {
655 unsigned int refcnt;
656
657 do {
658 refcnt = job->job_refcnt;
659 if (refcnt == UINT_MAX)
660 return EBUSY;
661 } while (atomic_cas_uint(&job->job_refcnt, refcnt, (refcnt + 1))
662 != refcnt);
663
664 return 0;
665 }
666
667 static void
668 threadpool_job_rele(struct threadpool_job *job)
669 {
670 unsigned int refcnt;
671
672 do {
673 refcnt = job->job_refcnt;
674 KASSERT(0 < refcnt);
675 if (refcnt == 1) {
676 mutex_enter(job->job_lock);
677 refcnt = atomic_dec_uint_nv(&job->job_refcnt);
678 KASSERT(refcnt != UINT_MAX);
679 if (refcnt == 0)
680 cv_broadcast(&job->job_cv);
681 mutex_exit(job->job_lock);
682 return;
683 }
684 } while (atomic_cas_uint(&job->job_refcnt, refcnt, (refcnt - 1))
685 != refcnt);
686 }
687
688 void
689 threadpool_job_done(struct threadpool_job *job)
690 {
691
692 KASSERT(mutex_owned(job->job_lock));
693 KASSERT(job->job_thread != NULL);
694 KASSERT(job->job_thread->tpt_lwp == curlwp);
695
696 cv_broadcast(&job->job_cv);
697 job->job_thread = NULL;
698 }
699
700 void
701 threadpool_schedule_job(struct threadpool *pool, struct threadpool_job *job)
702 {
703
704 KASSERT(mutex_owned(job->job_lock));
705
706 /*
707 * If the job's already running, let it keep running. The job
708 * is guaranteed by the interlock not to end early -- if it had
709 * ended early, threadpool_job_done would have set job_thread
710 * to NULL under the interlock.
711 */
712 if (__predict_true(job->job_thread != NULL)) {
713 TP_LOG(("%s: job '%s' already runnining.\n",
714 __func__, job->job_name));
715 return;
716 }
717
718 /* Otherwise, try to assign a thread to the job. */
719 mutex_spin_enter(&pool->tp_lock);
720 if (__predict_false(TAILQ_EMPTY(&pool->tp_idle_threads))) {
721 /* Nobody's idle. Give it to the overseer. */
722 TP_LOG(("%s: giving job '%s' to overseer.\n",
723 __func__, job->job_name));
724 job->job_thread = &pool->tp_overseer;
725 TAILQ_INSERT_TAIL(&pool->tp_jobs, job, job_entry);
726 } else {
727 /* Assign it to the first idle thread. */
728 job->job_thread = TAILQ_FIRST(&pool->tp_idle_threads);
729 TP_LOG(("%s: giving job '%s' to idle thread %p.\n",
730 __func__, job->job_name, job->job_thread));
731 TAILQ_REMOVE(&pool->tp_idle_threads, job->job_thread,
732 tpt_entry);
733 threadpool_job_hold(job);
734 job->job_thread->tpt_job = job;
735 }
736
737 /* Notify whomever we gave it to, overseer or idle thread. */
738 KASSERT(job->job_thread != NULL);
739 cv_broadcast(&job->job_thread->tpt_cv);
740 mutex_spin_exit(&pool->tp_lock);
741 }
742
743 bool
744 threadpool_cancel_job_async(struct threadpool *pool, struct threadpool_job *job)
745 {
746
747 KASSERT(mutex_owned(job->job_lock));
748
749 /*
750 * XXXJRT This fails (albeit safely) when all of the following
751 * are true:
752 *
753 * => "pool" is something other than what the job was
754 * scheduled on. This can legitimately occur if,
755 * for example, a job is percpu-scheduled on CPU0
756 * and then CPU1 attempts to cancel it without taking
757 * a remote pool reference. (this might happen by
758 * "luck of the draw").
759 *
760 * => "job" is not yet running, but is assigned to the
761 * overseer.
762 *
763 * When this happens, this code makes the determination that
764 * the job is already running. The failure mode is that the
765 * caller is told the job is running, and thus has to wait.
766 * The overseer will eventually get to it and the job will
767 * proceed as if it had been already running.
768 */
769
770 if (job->job_thread == NULL) {
771 /* Nothing to do. Guaranteed not running. */
772 return true;
773 } else if (job->job_thread == &pool->tp_overseer) {
774 /* Take it off the list to guarantee it won't run. */
775 job->job_thread = NULL;
776 mutex_spin_enter(&pool->tp_lock);
777 TAILQ_REMOVE(&pool->tp_jobs, job, job_entry);
778 mutex_spin_exit(&pool->tp_lock);
779 return true;
780 } else {
781 /* Too late -- already running. */
782 return false;
783 }
784 }
785
786 void
787 threadpool_cancel_job(struct threadpool *pool, struct threadpool_job *job)
788 {
789
790 ASSERT_SLEEPABLE();
791
792 KASSERT(mutex_owned(job->job_lock));
793
794 if (threadpool_cancel_job_async(pool, job))
795 return;
796
797 /* Already running. Wait for it to complete. */
798 while (job->job_thread != NULL)
799 cv_wait(&job->job_cv, job->job_lock);
800 }
801
802 /* Thread pool overseer thread */
803
804 static void __dead
805 threadpool_overseer_thread(void *arg)
806 {
807 struct threadpool_thread *const overseer = arg;
808 struct threadpool *const pool = overseer->tpt_pool;
809 struct lwp *lwp = NULL;
810 int ktflags;
811 int error;
812
813 KASSERT((pool->tp_cpu == NULL) || (pool->tp_cpu == curcpu()));
814
815 /* Wait until we're initialized. */
816 mutex_spin_enter(&pool->tp_lock);
817 while (overseer->tpt_lwp == NULL)
818 cv_wait(&overseer->tpt_cv, &pool->tp_lock);
819
820 TP_LOG(("%s: starting.\n", __func__));
821
822 for (;;) {
823 /* Wait until there's a job. */
824 while (TAILQ_EMPTY(&pool->tp_jobs)) {
825 if (ISSET(pool->tp_flags, THREADPOOL_DYING)) {
826 TP_LOG(("%s: THREADPOOL_DYING\n",
827 __func__));
828 break;
829 }
830 cv_wait(&overseer->tpt_cv, &pool->tp_lock);
831 }
832 if (__predict_false(TAILQ_EMPTY(&pool->tp_jobs)))
833 break;
834
835 /* If there are no threads, we'll have to try to start one. */
836 if (TAILQ_EMPTY(&pool->tp_idle_threads)) {
837 TP_LOG(("%s: Got a job, need to create a thread.\n",
838 __func__));
839 threadpool_hold(pool);
840 mutex_spin_exit(&pool->tp_lock);
841
842 struct threadpool_thread *const thread =
843 pool_cache_get(threadpool_thread_pc, PR_WAITOK);
844 thread->tpt_lwp = NULL;
845 thread->tpt_pool = pool;
846 thread->tpt_job = NULL;
847 cv_init(&thread->tpt_cv, "poolthrd");
848
849 ktflags = 0;
850 ktflags |= KTHREAD_MPSAFE;
851 if (pool->tp_pri < PRI_KERNEL)
852 ktflags |= KTHREAD_TS;
853 error = kthread_create(pool->tp_pri, ktflags,
854 pool->tp_cpu, &threadpool_thread, thread, &lwp,
855 "poolthread/%d@%d",
856 (pool->tp_cpu ? cpu_index(pool->tp_cpu) : -1),
857 (int)pool->tp_pri);
858
859 mutex_spin_enter(&pool->tp_lock);
860 if (error) {
861 pool_cache_put(threadpool_thread_pc, thread);
862 threadpool_rele(pool);
863 /* XXX What to do to wait for memory? */
864 (void)kpause("thrdplcr", false, hz,
865 &pool->tp_lock);
866 continue;
867 }
868 /*
869 * New kthread now owns the reference to the pool
870 * taken above.
871 */
872 KASSERT(lwp != NULL);
873 TAILQ_INSERT_TAIL(&pool->tp_idle_threads, thread,
874 tpt_entry);
875 thread->tpt_lwp = lwp;
876 lwp = NULL;
877 cv_broadcast(&thread->tpt_cv);
878 continue;
879 }
880
881 /* There are idle threads, so try giving one a job. */
882 bool rele_job = true;
883 struct threadpool_job *const job = TAILQ_FIRST(&pool->tp_jobs);
884 TAILQ_REMOVE(&pool->tp_jobs, job, job_entry);
885 error = threadpool_job_hold(job);
886 if (error) {
887 TAILQ_INSERT_HEAD(&pool->tp_jobs, job, job_entry);
888 (void)kpause("pooljob", false, hz, &pool->tp_lock);
889 continue;
890 }
891 mutex_spin_exit(&pool->tp_lock);
892
893 mutex_enter(job->job_lock);
894 /* If the job was cancelled, we'll no longer be its thread. */
895 if (__predict_true(job->job_thread == overseer)) {
896 mutex_spin_enter(&pool->tp_lock);
897 if (__predict_false(
898 TAILQ_EMPTY(&pool->tp_idle_threads))) {
899 /*
900 * Someone else snagged the thread
901 * first. We'll have to try again.
902 */
903 TP_LOG(("%s: '%s' lost race to use idle thread.\n",
904 __func__, job->job_name));
905 TAILQ_INSERT_HEAD(&pool->tp_jobs, job,
906 job_entry);
907 } else {
908 /*
909 * Assign the job to the thread and
910 * wake the thread so it starts work.
911 */
912 struct threadpool_thread *const thread =
913 TAILQ_FIRST(&pool->tp_idle_threads);
914
915 TP_LOG(("%s: '%s' gets thread %p\n",
916 __func__, job->job_name, thread));
917 KASSERT(thread->tpt_job == NULL);
918 TAILQ_REMOVE(&pool->tp_idle_threads, thread,
919 tpt_entry);
920 thread->tpt_job = job;
921 job->job_thread = thread;
922 cv_broadcast(&thread->tpt_cv);
923 /* Gave the thread our job reference. */
924 rele_job = false;
925 }
926 mutex_spin_exit(&pool->tp_lock);
927 }
928 mutex_exit(job->job_lock);
929 if (__predict_false(rele_job))
930 threadpool_job_rele(job);
931
932 mutex_spin_enter(&pool->tp_lock);
933 }
934 threadpool_rele(pool);
935 mutex_spin_exit(&pool->tp_lock);
936
937 TP_LOG(("%s: exiting.\n", __func__));
938
939 kthread_exit(0);
940 }
941
942 /* Thread pool thread */
943
944 static void __dead
945 threadpool_thread(void *arg)
946 {
947 struct threadpool_thread *const thread = arg;
948 struct threadpool *const pool = thread->tpt_pool;
949
950 KASSERT((pool->tp_cpu == NULL) || (pool->tp_cpu == curcpu()));
951
952 /* Wait until we're initialized and on the queue. */
953 mutex_spin_enter(&pool->tp_lock);
954 while (thread->tpt_lwp == NULL)
955 cv_wait(&thread->tpt_cv, &pool->tp_lock);
956
957 TP_LOG(("%s: starting.\n", __func__));
958
959 KASSERT(thread->tpt_lwp == curlwp);
960 for (;;) {
961 /* Wait until we are assigned a job. */
962 while (thread->tpt_job == NULL) {
963 if (ISSET(pool->tp_flags, THREADPOOL_DYING)) {
964 TP_LOG(("%s: THREADPOOL_DYING\n",
965 __func__));
966 break;
967 }
968 if (cv_timedwait(&thread->tpt_cv, &pool->tp_lock,
969 THREADPOOL_IDLE_TICKS))
970 break;
971 }
972 if (__predict_false(thread->tpt_job == NULL)) {
973 TAILQ_REMOVE(&pool->tp_idle_threads, thread,
974 tpt_entry);
975 break;
976 }
977
978 struct threadpool_job *const job = thread->tpt_job;
979 KASSERT(job != NULL);
980 mutex_spin_exit(&pool->tp_lock);
981
982 TP_LOG(("%s: running job '%s' on thread %p.\n",
983 __func__, job->job_name, thread));
984
985 /* Set our lwp name to reflect what job we're doing. */
986 lwp_lock(curlwp);
987 char *const lwp_name = curlwp->l_name;
988 curlwp->l_name = job->job_name;
989 lwp_unlock(curlwp);
990
991 /* Run the job. */
992 (*job->job_fn)(job);
993
994 /* Restore our lwp name. */
995 lwp_lock(curlwp);
996 curlwp->l_name = lwp_name;
997 lwp_unlock(curlwp);
998
999 /* Job is done and its name is unreferenced. Release it. */
1000 threadpool_job_rele(job);
1001
1002 mutex_spin_enter(&pool->tp_lock);
1003 KASSERT(thread->tpt_job == job);
1004 thread->tpt_job = NULL;
1005 TAILQ_INSERT_TAIL(&pool->tp_idle_threads, thread, tpt_entry);
1006 }
1007 threadpool_rele(pool);
1008 mutex_spin_exit(&pool->tp_lock);
1009
1010 TP_LOG(("%s: thread %p exiting.\n", __func__, thread));
1011
1012 KASSERT(!cv_has_waiters(&thread->tpt_cv));
1013 cv_destroy(&thread->tpt_cv);
1014 pool_cache_put(threadpool_thread_pc, thread);
1015 kthread_exit(0);
1016 }
1017