kern_threadpool.c revision 1.10 1 /* $NetBSD: kern_threadpool.c,v 1.10 2018/12/26 21:43:39 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2014, 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell and Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Thread pools.
34 *
35 * A thread pool is a collection of worker threads idle or running
36 * jobs, together with an overseer thread that does not run jobs but
37 * can be given jobs to assign to a worker thread. Scheduling a job in
38 * a thread pool does not allocate or even sleep at all, except perhaps
39 * on an adaptive lock, unlike kthread_create. Jobs reuse threads, so
40 * they do not incur the expense of creating and destroying kthreads
41 * unless there is not much work to be done.
42 *
43 * A per-CPU thread pool (threadpool_percpu) is a collection of thread
44 * pools, one per CPU bound to that CPU. For each priority level in
45 * use, there is one shared unbound thread pool (i.e., pool of threads
46 * not bound to any CPU) and one shared per-CPU thread pool.
47 *
48 * To use the unbound thread pool at priority pri, call
49 * threadpool_get(&pool, pri). When you're done, call
50 * threadpool_put(pool, pri).
51 *
52 * To use the per-CPU thread pools at priority pri, call
53 * threadpool_percpu_get(&pool_percpu, pri), and then use the thread
54 * pool returned by threadpool_percpu_ref(pool_percpu) for the current
55 * CPU, or by threadpool_percpu_ref_remote(pool_percpu, ci) for another
56 * CPU. When you're done, call threadpool_percpu_put(pool_percpu,
57 * pri).
58 *
59 * +--MACHINE-----------------------------------------------+
60 * | +--CPU 0-------+ +--CPU 1-------+ +--CPU n-------+ |
61 * | | <overseer 0> | | <overseer 1> | ... | <overseer n> | |
62 * | | <idle 0a> | | <running 1a> | ... | <idle na> | |
63 * | | <running 0b> | | <running 1b> | ... | <idle nb> | |
64 * | | . | | . | ... | . | |
65 * | | . | | . | ... | . | |
66 * | | . | | . | ... | . | |
67 * | +--------------+ +--------------+ +--------------+ |
68 * | +--unbound---------+ |
69 * | | <overseer n+1> | |
70 * | | <idle (n+1)a> | |
71 * | | <running (n+1)b> | |
72 * | +------------------+ |
73 * +--------------------------------------------------------+
74 *
75 * XXX Why one overseer per CPU? I did that originally to avoid
76 * touching remote CPUs' memory when scheduling a job, but that still
77 * requires interprocessor synchronization. Perhaps we could get by
78 * with a single overseer thread, at the expense of another pointer in
79 * struct threadpool_job to identify the CPU on which it must run
80 * in order for the overseer to schedule it correctly.
81 */
82
83 #include <sys/cdefs.h>
84 __KERNEL_RCSID(0, "$NetBSD: kern_threadpool.c,v 1.10 2018/12/26 21:43:39 thorpej Exp $");
85
86 #include <sys/types.h>
87 #include <sys/param.h>
88 #include <sys/atomic.h>
89 #include <sys/condvar.h>
90 #include <sys/cpu.h>
91 #include <sys/kernel.h>
92 #include <sys/kmem.h>
93 #include <sys/kthread.h>
94 #include <sys/mutex.h>
95 #include <sys/once.h>
96 #include <sys/percpu.h>
97 #include <sys/pool.h>
98 #include <sys/proc.h>
99 #include <sys/queue.h>
100 #include <sys/systm.h>
101 #include <sys/threadpool.h>
102
103 static ONCE_DECL(threadpool_init_once)
104
105 #define THREADPOOL_INIT() \
106 do { \
107 int threadpool_init_error __diagused = \
108 RUN_ONCE(&threadpool_init_once, threadpools_init); \
109 KASSERT(threadpool_init_error == 0); \
110 } while (/*CONSTCOND*/0)
111
112 /* Data structures */
113
114 TAILQ_HEAD(job_head, threadpool_job);
115 TAILQ_HEAD(thread_head, threadpool_thread);
116
117 struct threadpool_thread {
118 struct lwp *tpt_lwp;
119 struct threadpool *tpt_pool;
120 struct threadpool_job *tpt_job;
121 kcondvar_t tpt_cv;
122 TAILQ_ENTRY(threadpool_thread) tpt_entry;
123 };
124
125 struct threadpool {
126 kmutex_t tp_lock;
127 struct threadpool_thread tp_overseer;
128 struct job_head tp_jobs;
129 struct thread_head tp_idle_threads;
130 uint64_t tp_refcnt;
131 int tp_flags;
132 #define THREADPOOL_DYING 0x01
133 struct cpu_info *tp_cpu;
134 pri_t tp_pri;
135 };
136
137 static void threadpool_hold(struct threadpool *);
138 static void threadpool_rele(struct threadpool *);
139
140 static int threadpool_percpu_create(struct threadpool_percpu **, pri_t);
141 static void threadpool_percpu_destroy(struct threadpool_percpu *);
142
143 static threadpool_job_fn_t threadpool_job_dead;
144
145 static int threadpool_job_hold(struct threadpool_job *);
146 static void threadpool_job_rele(struct threadpool_job *);
147
148 static void threadpool_overseer_thread(void *) __dead;
149 static void threadpool_thread(void *) __dead;
150
151 static pool_cache_t threadpool_thread_pc __read_mostly;
152
153 static kmutex_t threadpools_lock __cacheline_aligned;
154
155 /* Idle out threads after 30 seconds */
156 #define THREADPOOL_IDLE_TICKS mstohz(30 * 1000)
157
158 struct threadpool_unbound {
159 struct threadpool tpu_pool;
160
161 /* protected by threadpools_lock */
162 LIST_ENTRY(threadpool_unbound) tpu_link;
163 uint64_t tpu_refcnt;
164 };
165
166 static LIST_HEAD(, threadpool_unbound) unbound_threadpools;
167
168 static struct threadpool_unbound *
169 threadpool_lookup_unbound(pri_t pri)
170 {
171 struct threadpool_unbound *tpu;
172
173 LIST_FOREACH(tpu, &unbound_threadpools, tpu_link) {
174 if (tpu->tpu_pool.tp_pri == pri)
175 return tpu;
176 }
177 return NULL;
178 }
179
180 static void
181 threadpool_insert_unbound(struct threadpool_unbound *tpu)
182 {
183 KASSERT(threadpool_lookup_unbound(tpu->tpu_pool.tp_pri) == NULL);
184 LIST_INSERT_HEAD(&unbound_threadpools, tpu, tpu_link);
185 }
186
187 static void
188 threadpool_remove_unbound(struct threadpool_unbound *tpu)
189 {
190 KASSERT(threadpool_lookup_unbound(tpu->tpu_pool.tp_pri) == tpu);
191 LIST_REMOVE(tpu, tpu_link);
192 }
193
194 struct threadpool_percpu {
195 percpu_t * tpp_percpu;
196 pri_t tpp_pri;
197
198 /* protected by threadpools_lock */
199 LIST_ENTRY(threadpool_percpu) tpp_link;
200 uint64_t tpp_refcnt;
201 };
202
203 static LIST_HEAD(, threadpool_percpu) percpu_threadpools;
204
205 static struct threadpool_percpu *
206 threadpool_lookup_percpu(pri_t pri)
207 {
208 struct threadpool_percpu *tpp;
209
210 LIST_FOREACH(tpp, &percpu_threadpools, tpp_link) {
211 if (tpp->tpp_pri == pri)
212 return tpp;
213 }
214 return NULL;
215 }
216
217 static void
218 threadpool_insert_percpu(struct threadpool_percpu *tpp)
219 {
220 KASSERT(threadpool_lookup_percpu(tpp->tpp_pri) == NULL);
221 LIST_INSERT_HEAD(&percpu_threadpools, tpp, tpp_link);
222 }
223
224 static void
225 threadpool_remove_percpu(struct threadpool_percpu *tpp)
226 {
227 KASSERT(threadpool_lookup_percpu(tpp->tpp_pri) == tpp);
228 LIST_REMOVE(tpp, tpp_link);
229 }
230
231 #ifdef THREADPOOL_VERBOSE
232 #define TP_LOG(x) printf x
233 #else
234 #define TP_LOG(x) /* nothing */
235 #endif /* THREADPOOL_VERBOSE */
236
237 static int
238 threadpools_init(void)
239 {
240
241 threadpool_thread_pc =
242 pool_cache_init(sizeof(struct threadpool_thread), 0, 0, 0,
243 "thplthrd", NULL, IPL_NONE, NULL, NULL, NULL);
244
245 LIST_INIT(&unbound_threadpools);
246 LIST_INIT(&percpu_threadpools);
247 mutex_init(&threadpools_lock, MUTEX_DEFAULT, IPL_NONE);
248
249 TP_LOG(("%s: sizeof(threadpool_job) = %zu\n",
250 __func__, sizeof(struct threadpool_job)));
251
252 return 0;
253 }
254
255 /* Thread pool creation */
256
257 static bool
258 threadpool_pri_is_valid(pri_t pri)
259 {
260 return (pri == PRI_NONE || (pri >= PRI_USER && pri < PRI_COUNT));
261 }
262
263 static int
264 threadpool_create(struct threadpool *const pool, struct cpu_info *ci,
265 pri_t pri)
266 {
267 struct lwp *lwp;
268 int ktflags;
269 int error;
270
271 KASSERT(threadpool_pri_is_valid(pri));
272
273 mutex_init(&pool->tp_lock, MUTEX_DEFAULT, IPL_VM);
274 /* XXX overseer */
275 TAILQ_INIT(&pool->tp_jobs);
276 TAILQ_INIT(&pool->tp_idle_threads);
277 pool->tp_refcnt = 1; /* overseer's reference */
278 pool->tp_flags = 0;
279 pool->tp_cpu = ci;
280 pool->tp_pri = pri;
281
282 pool->tp_overseer.tpt_lwp = NULL;
283 pool->tp_overseer.tpt_pool = pool;
284 pool->tp_overseer.tpt_job = NULL;
285 cv_init(&pool->tp_overseer.tpt_cv, "poolover");
286
287 ktflags = 0;
288 ktflags |= KTHREAD_MPSAFE;
289 if (pri < PRI_KERNEL)
290 ktflags |= KTHREAD_TS;
291 error = kthread_create(pri, ktflags, ci, &threadpool_overseer_thread,
292 &pool->tp_overseer, &lwp,
293 "pooloverseer/%d@%d", (ci ? cpu_index(ci) : -1), (int)pri);
294 if (error)
295 goto fail0;
296
297 mutex_spin_enter(&pool->tp_lock);
298 pool->tp_overseer.tpt_lwp = lwp;
299 cv_broadcast(&pool->tp_overseer.tpt_cv);
300 mutex_spin_exit(&pool->tp_lock);
301
302 return 0;
303
304 fail0: KASSERT(error);
305 KASSERT(pool->tp_overseer.tpt_job == NULL);
306 KASSERT(pool->tp_overseer.tpt_pool == pool);
307 KASSERT(pool->tp_flags == 0);
308 KASSERT(pool->tp_refcnt == 0);
309 KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads));
310 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
311 KASSERT(!cv_has_waiters(&pool->tp_overseer.tpt_cv));
312 cv_destroy(&pool->tp_overseer.tpt_cv);
313 mutex_destroy(&pool->tp_lock);
314 return error;
315 }
316
317 /* Thread pool destruction */
318
319 static void
320 threadpool_destroy(struct threadpool *pool)
321 {
322 struct threadpool_thread *thread;
323
324 /* Mark the pool dying and wait for threads to commit suicide. */
325 mutex_spin_enter(&pool->tp_lock);
326 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
327 pool->tp_flags |= THREADPOOL_DYING;
328 cv_broadcast(&pool->tp_overseer.tpt_cv);
329 TAILQ_FOREACH(thread, &pool->tp_idle_threads, tpt_entry)
330 cv_broadcast(&thread->tpt_cv);
331 while (0 < pool->tp_refcnt) {
332 TP_LOG(("%s: draining %u references...\n", __func__,
333 pool->tp_refcnt));
334 cv_wait(&pool->tp_overseer.tpt_cv, &pool->tp_lock);
335 }
336 mutex_spin_exit(&pool->tp_lock);
337
338 KASSERT(pool->tp_overseer.tpt_job == NULL);
339 KASSERT(pool->tp_overseer.tpt_pool == pool);
340 KASSERT(pool->tp_flags == THREADPOOL_DYING);
341 KASSERT(pool->tp_refcnt == 0);
342 KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads));
343 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
344 KASSERT(!cv_has_waiters(&pool->tp_overseer.tpt_cv));
345 cv_destroy(&pool->tp_overseer.tpt_cv);
346 mutex_destroy(&pool->tp_lock);
347 }
348
349 static void
350 threadpool_hold(struct threadpool *pool)
351 {
352
353 KASSERT(mutex_owned(&pool->tp_lock));
354 pool->tp_refcnt++;
355 KASSERT(pool->tp_refcnt != 0);
356 }
357
358 static void
359 threadpool_rele(struct threadpool *pool)
360 {
361
362 KASSERT(mutex_owned(&pool->tp_lock));
363 KASSERT(0 < pool->tp_refcnt);
364 if (--pool->tp_refcnt == 0)
365 cv_broadcast(&pool->tp_overseer.tpt_cv);
366 }
367
368 /* Unbound thread pools */
369
370 int
371 threadpool_get(struct threadpool **poolp, pri_t pri)
372 {
373 struct threadpool_unbound *tpu, *tmp = NULL;
374 int error;
375
376 THREADPOOL_INIT();
377
378 ASSERT_SLEEPABLE();
379
380 if (! threadpool_pri_is_valid(pri))
381 return EINVAL;
382
383 mutex_enter(&threadpools_lock);
384 tpu = threadpool_lookup_unbound(pri);
385 if (tpu == NULL) {
386 mutex_exit(&threadpools_lock);
387 TP_LOG(("%s: No pool for pri=%d, creating one.\n",
388 __func__, (int)pri));
389 tmp = kmem_zalloc(sizeof(*tmp), KM_SLEEP);
390 error = threadpool_create(&tmp->tpu_pool, NULL, pri);
391 if (error) {
392 kmem_free(tmp, sizeof(*tmp));
393 return error;
394 }
395 mutex_enter(&threadpools_lock);
396 tpu = threadpool_lookup_unbound(pri);
397 if (tpu == NULL) {
398 TP_LOG(("%s: Won the creation race for pri=%d.\n",
399 __func__, (int)pri));
400 tpu = tmp;
401 tmp = NULL;
402 threadpool_insert_unbound(tpu);
403 }
404 }
405 KASSERT(tpu != NULL);
406 tpu->tpu_refcnt++;
407 KASSERT(tpu->tpu_refcnt != 0);
408 mutex_exit(&threadpools_lock);
409
410 if (tmp != NULL) {
411 threadpool_destroy(&tmp->tpu_pool);
412 kmem_free(tmp, sizeof(*tmp));
413 }
414 KASSERT(tpu != NULL);
415 *poolp = &tpu->tpu_pool;
416 return 0;
417 }
418
419 void
420 threadpool_put(struct threadpool *pool, pri_t pri)
421 {
422 struct threadpool_unbound *tpu =
423 container_of(pool, struct threadpool_unbound, tpu_pool);
424
425 THREADPOOL_INIT();
426
427 ASSERT_SLEEPABLE();
428
429 KASSERT(threadpool_pri_is_valid(pri));
430
431 mutex_enter(&threadpools_lock);
432 KASSERT(tpu == threadpool_lookup_unbound(pri));
433 KASSERT(0 < tpu->tpu_refcnt);
434 if (--tpu->tpu_refcnt == 0) {
435 TP_LOG(("%s: Last reference for pri=%d, destroying pool.\n",
436 __func__, (int)pri));
437 threadpool_remove_unbound(tpu);
438 } else {
439 tpu = NULL;
440 }
441 mutex_exit(&threadpools_lock);
442
443 if (tpu) {
444 threadpool_destroy(&tpu->tpu_pool);
445 kmem_free(tpu, sizeof(*tpu));
446 }
447 }
448
449 /* Per-CPU thread pools */
450
451 int
452 threadpool_percpu_get(struct threadpool_percpu **pool_percpup, pri_t pri)
453 {
454 struct threadpool_percpu *pool_percpu, *tmp = NULL;
455 int error;
456
457 THREADPOOL_INIT();
458
459 ASSERT_SLEEPABLE();
460
461 if (! threadpool_pri_is_valid(pri))
462 return EINVAL;
463
464 mutex_enter(&threadpools_lock);
465 pool_percpu = threadpool_lookup_percpu(pri);
466 if (pool_percpu == NULL) {
467 mutex_exit(&threadpools_lock);
468 TP_LOG(("%s: No pool for pri=%d, creating one.\n",
469 __func__, (int)pri));
470 error = threadpool_percpu_create(&tmp, pri);
471 if (error)
472 return error;
473 KASSERT(tmp != NULL);
474 mutex_enter(&threadpools_lock);
475 pool_percpu = threadpool_lookup_percpu(pri);
476 if (pool_percpu == NULL) {
477 TP_LOG(("%s: Won the creation race for pri=%d.\n",
478 __func__, (int)pri));
479 pool_percpu = tmp;
480 tmp = NULL;
481 threadpool_insert_percpu(pool_percpu);
482 }
483 }
484 KASSERT(pool_percpu != NULL);
485 pool_percpu->tpp_refcnt++;
486 KASSERT(pool_percpu->tpp_refcnt != 0);
487 mutex_exit(&threadpools_lock);
488
489 if (tmp != NULL)
490 threadpool_percpu_destroy(tmp);
491 KASSERT(pool_percpu != NULL);
492 *pool_percpup = pool_percpu;
493 return 0;
494 }
495
496 void
497 threadpool_percpu_put(struct threadpool_percpu *pool_percpu, pri_t pri)
498 {
499
500 THREADPOOL_INIT();
501
502 ASSERT_SLEEPABLE();
503
504 KASSERT(threadpool_pri_is_valid(pri));
505
506 mutex_enter(&threadpools_lock);
507 KASSERT(pool_percpu == threadpool_lookup_percpu(pri));
508 KASSERT(0 < pool_percpu->tpp_refcnt);
509 if (--pool_percpu->tpp_refcnt == 0) {
510 TP_LOG(("%s: Last reference for pri=%d, destroying pool.\n",
511 __func__, (int)pri));
512 threadpool_remove_percpu(pool_percpu);
513 } else {
514 pool_percpu = NULL;
515 }
516 mutex_exit(&threadpools_lock);
517
518 if (pool_percpu)
519 threadpool_percpu_destroy(pool_percpu);
520 }
521
522 struct threadpool *
523 threadpool_percpu_ref(struct threadpool_percpu *pool_percpu)
524 {
525 struct threadpool **poolp, *pool;
526
527 poolp = percpu_getref(pool_percpu->tpp_percpu);
528 pool = *poolp;
529 percpu_putref(pool_percpu->tpp_percpu);
530
531 return pool;
532 }
533
534 struct threadpool *
535 threadpool_percpu_ref_remote(struct threadpool_percpu *pool_percpu,
536 struct cpu_info *ci)
537 {
538 struct threadpool **poolp, *pool;
539
540 percpu_traverse_enter();
541 poolp = percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
542 pool = *poolp;
543 percpu_traverse_exit();
544
545 return pool;
546 }
547
548 static int
549 threadpool_percpu_create(struct threadpool_percpu **pool_percpup, pri_t pri)
550 {
551 struct threadpool_percpu *pool_percpu;
552 struct cpu_info *ci;
553 CPU_INFO_ITERATOR cii;
554 unsigned int i, j;
555 int error;
556
557 pool_percpu = kmem_zalloc(sizeof(*pool_percpu), KM_SLEEP);
558 if (pool_percpu == NULL) {
559 error = ENOMEM;
560 goto fail0;
561 }
562 pool_percpu->tpp_pri = pri;
563
564 pool_percpu->tpp_percpu = percpu_alloc(sizeof(struct threadpool *));
565 if (pool_percpu->tpp_percpu == NULL) {
566 error = ENOMEM;
567 goto fail1;
568 }
569
570 for (i = 0, CPU_INFO_FOREACH(cii, ci), i++) {
571 struct threadpool *pool;
572
573 pool = kmem_zalloc(sizeof(*pool), KM_SLEEP);
574 error = threadpool_create(pool, ci, pri);
575 if (error) {
576 kmem_free(pool, sizeof(*pool));
577 goto fail2;
578 }
579 percpu_traverse_enter();
580 struct threadpool **const poolp =
581 percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
582 *poolp = pool;
583 percpu_traverse_exit();
584 }
585
586 /* Success! */
587 *pool_percpup = (struct threadpool_percpu *)pool_percpu;
588 return 0;
589
590 fail2: for (j = 0, CPU_INFO_FOREACH(cii, ci), j++) {
591 if (i <= j)
592 break;
593 percpu_traverse_enter();
594 struct threadpool **const poolp =
595 percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
596 struct threadpool *const pool = *poolp;
597 percpu_traverse_exit();
598 threadpool_destroy(pool);
599 kmem_free(pool, sizeof(*pool));
600 }
601 percpu_free(pool_percpu->tpp_percpu, sizeof(struct taskthread_pool *));
602 fail1: kmem_free(pool_percpu, sizeof(*pool_percpu));
603 fail0: return error;
604 }
605
606 static void
607 threadpool_percpu_destroy(struct threadpool_percpu *pool_percpu)
608 {
609 struct cpu_info *ci;
610 CPU_INFO_ITERATOR cii;
611
612 for (CPU_INFO_FOREACH(cii, ci)) {
613 percpu_traverse_enter();
614 struct threadpool **const poolp =
615 percpu_getptr_remote(pool_percpu->tpp_percpu, ci);
616 struct threadpool *const pool = *poolp;
617 percpu_traverse_exit();
618 threadpool_destroy(pool);
619 kmem_free(pool, sizeof(*pool));
620 }
621
622 percpu_free(pool_percpu->tpp_percpu, sizeof(struct threadpool *));
623 kmem_free(pool_percpu, sizeof(*pool_percpu));
624 }
625
626 /* Thread pool jobs */
627
628 void __printflike(4,5)
629 threadpool_job_init(struct threadpool_job *job, threadpool_job_fn_t fn,
630 kmutex_t *lock, const char *fmt, ...)
631 {
632 va_list ap;
633
634 va_start(ap, fmt);
635 (void)vsnprintf(job->job_name, sizeof(job->job_name), fmt, ap);
636 va_end(ap);
637
638 job->job_lock = lock;
639 job->job_thread = NULL;
640 job->job_refcnt = 0;
641 cv_init(&job->job_cv, job->job_name);
642 job->job_fn = fn;
643 }
644
645 static void
646 threadpool_job_dead(struct threadpool_job *job)
647 {
648
649 panic("threadpool job %p ran after destruction", job);
650 }
651
652 void
653 threadpool_job_destroy(struct threadpool_job *job)
654 {
655
656 ASSERT_SLEEPABLE();
657
658 KASSERTMSG((job->job_thread == NULL), "job %p still running", job);
659
660 mutex_enter(job->job_lock);
661 while (0 < job->job_refcnt)
662 cv_wait(&job->job_cv, job->job_lock);
663 mutex_exit(job->job_lock);
664
665 job->job_lock = NULL;
666 KASSERT(job->job_thread == NULL);
667 KASSERT(job->job_refcnt == 0);
668 KASSERT(!cv_has_waiters(&job->job_cv));
669 cv_destroy(&job->job_cv);
670 job->job_fn = threadpool_job_dead;
671 (void)strlcpy(job->job_name, "deadjob", sizeof(job->job_name));
672 }
673
674 static int
675 threadpool_job_hold(struct threadpool_job *job)
676 {
677 unsigned int refcnt;
678
679 do {
680 refcnt = job->job_refcnt;
681 if (refcnt == UINT_MAX)
682 return EBUSY;
683 } while (atomic_cas_uint(&job->job_refcnt, refcnt, (refcnt + 1))
684 != refcnt);
685
686 return 0;
687 }
688
689 static void
690 threadpool_job_rele(struct threadpool_job *job)
691 {
692 unsigned int refcnt;
693
694 do {
695 refcnt = job->job_refcnt;
696 KASSERT(0 < refcnt);
697 if (refcnt == 1) {
698 mutex_enter(job->job_lock);
699 refcnt = atomic_dec_uint_nv(&job->job_refcnt);
700 KASSERT(refcnt != UINT_MAX);
701 if (refcnt == 0)
702 cv_broadcast(&job->job_cv);
703 mutex_exit(job->job_lock);
704 return;
705 }
706 } while (atomic_cas_uint(&job->job_refcnt, refcnt, (refcnt - 1))
707 != refcnt);
708 }
709
710 void
711 threadpool_job_done(struct threadpool_job *job)
712 {
713
714 KASSERT(mutex_owned(job->job_lock));
715 KASSERT(job->job_thread != NULL);
716 KASSERT(job->job_thread->tpt_lwp == curlwp);
717
718 cv_broadcast(&job->job_cv);
719 job->job_thread = NULL;
720 }
721
722 void
723 threadpool_schedule_job(struct threadpool *pool, struct threadpool_job *job)
724 {
725
726 KASSERT(mutex_owned(job->job_lock));
727
728 /*
729 * If the job's already running, let it keep running. The job
730 * is guaranteed by the interlock not to end early -- if it had
731 * ended early, threadpool_job_done would have set job_thread
732 * to NULL under the interlock.
733 */
734 if (__predict_true(job->job_thread != NULL)) {
735 TP_LOG(("%s: job '%s' already runnining.\n",
736 __func__, job->job_name));
737 return;
738 }
739
740 /* Otherwise, try to assign a thread to the job. */
741 mutex_spin_enter(&pool->tp_lock);
742 if (__predict_false(TAILQ_EMPTY(&pool->tp_idle_threads))) {
743 /* Nobody's idle. Give it to the overseer. */
744 TP_LOG(("%s: giving job '%s' to overseer.\n",
745 __func__, job->job_name));
746 job->job_thread = &pool->tp_overseer;
747 TAILQ_INSERT_TAIL(&pool->tp_jobs, job, job_entry);
748 } else {
749 /* Assign it to the first idle thread. */
750 job->job_thread = TAILQ_FIRST(&pool->tp_idle_threads);
751 TP_LOG(("%s: giving job '%s' to idle thread %p.\n",
752 __func__, job->job_name, job->job_thread));
753 TAILQ_REMOVE(&pool->tp_idle_threads, job->job_thread,
754 tpt_entry);
755 threadpool_job_hold(job);
756 job->job_thread->tpt_job = job;
757 }
758
759 /* Notify whomever we gave it to, overseer or idle thread. */
760 KASSERT(job->job_thread != NULL);
761 cv_broadcast(&job->job_thread->tpt_cv);
762 mutex_spin_exit(&pool->tp_lock);
763 }
764
765 bool
766 threadpool_cancel_job_async(struct threadpool *pool, struct threadpool_job *job)
767 {
768
769 KASSERT(mutex_owned(job->job_lock));
770
771 /*
772 * XXXJRT This fails (albeit safely) when all of the following
773 * are true:
774 *
775 * => "pool" is something other than what the job was
776 * scheduled on. This can legitimately occur if,
777 * for example, a job is percpu-scheduled on CPU0
778 * and then CPU1 attempts to cancel it without taking
779 * a remote pool reference. (this might happen by
780 * "luck of the draw").
781 *
782 * => "job" is not yet running, but is assigned to the
783 * overseer.
784 *
785 * When this happens, this code makes the determination that
786 * the job is already running. The failure mode is that the
787 * caller is told the job is running, and thus has to wait.
788 * The overseer will eventually get to it and the job will
789 * proceed as if it had been already running.
790 */
791
792 if (job->job_thread == NULL) {
793 /* Nothing to do. Guaranteed not running. */
794 return true;
795 } else if (job->job_thread == &pool->tp_overseer) {
796 /* Take it off the list to guarantee it won't run. */
797 job->job_thread = NULL;
798 mutex_spin_enter(&pool->tp_lock);
799 TAILQ_REMOVE(&pool->tp_jobs, job, job_entry);
800 mutex_spin_exit(&pool->tp_lock);
801 return true;
802 } else {
803 /* Too late -- already running. */
804 return false;
805 }
806 }
807
808 void
809 threadpool_cancel_job(struct threadpool *pool, struct threadpool_job *job)
810 {
811
812 ASSERT_SLEEPABLE();
813
814 KASSERT(mutex_owned(job->job_lock));
815
816 if (threadpool_cancel_job_async(pool, job))
817 return;
818
819 /* Already running. Wait for it to complete. */
820 while (job->job_thread != NULL)
821 cv_wait(&job->job_cv, job->job_lock);
822 }
823
824 /* Thread pool overseer thread */
825
826 static void __dead
827 threadpool_overseer_thread(void *arg)
828 {
829 struct threadpool_thread *const overseer = arg;
830 struct threadpool *const pool = overseer->tpt_pool;
831 struct lwp *lwp = NULL;
832 int ktflags;
833 int error;
834
835 KASSERT((pool->tp_cpu == NULL) || (pool->tp_cpu == curcpu()));
836
837 /* Wait until we're initialized. */
838 mutex_spin_enter(&pool->tp_lock);
839 while (overseer->tpt_lwp == NULL)
840 cv_wait(&overseer->tpt_cv, &pool->tp_lock);
841
842 TP_LOG(("%s: starting.\n", __func__));
843
844 for (;;) {
845 /* Wait until there's a job. */
846 while (TAILQ_EMPTY(&pool->tp_jobs)) {
847 if (ISSET(pool->tp_flags, THREADPOOL_DYING)) {
848 TP_LOG(("%s: THREADPOOL_DYING\n",
849 __func__));
850 break;
851 }
852 cv_wait(&overseer->tpt_cv, &pool->tp_lock);
853 }
854 if (__predict_false(TAILQ_EMPTY(&pool->tp_jobs)))
855 break;
856
857 /* If there are no threads, we'll have to try to start one. */
858 if (TAILQ_EMPTY(&pool->tp_idle_threads)) {
859 TP_LOG(("%s: Got a job, need to create a thread.\n",
860 __func__));
861 threadpool_hold(pool);
862 mutex_spin_exit(&pool->tp_lock);
863
864 struct threadpool_thread *const thread =
865 pool_cache_get(threadpool_thread_pc, PR_WAITOK);
866 thread->tpt_lwp = NULL;
867 thread->tpt_pool = pool;
868 thread->tpt_job = NULL;
869 cv_init(&thread->tpt_cv, "poolthrd");
870
871 ktflags = 0;
872 ktflags |= KTHREAD_MPSAFE;
873 if (pool->tp_pri < PRI_KERNEL)
874 ktflags |= KTHREAD_TS;
875 error = kthread_create(pool->tp_pri, ktflags,
876 pool->tp_cpu, &threadpool_thread, thread, &lwp,
877 "poolthread/%d@%d",
878 (pool->tp_cpu ? cpu_index(pool->tp_cpu) : -1),
879 (int)pool->tp_pri);
880
881 mutex_spin_enter(&pool->tp_lock);
882 if (error) {
883 pool_cache_put(threadpool_thread_pc, thread);
884 threadpool_rele(pool);
885 /* XXX What to do to wait for memory? */
886 (void)kpause("thrdplcr", false, hz,
887 &pool->tp_lock);
888 continue;
889 }
890 /*
891 * New kthread now owns the reference to the pool
892 * taken above.
893 */
894 KASSERT(lwp != NULL);
895 TAILQ_INSERT_TAIL(&pool->tp_idle_threads, thread,
896 tpt_entry);
897 thread->tpt_lwp = lwp;
898 lwp = NULL;
899 cv_broadcast(&thread->tpt_cv);
900 continue;
901 }
902
903 /* There are idle threads, so try giving one a job. */
904 bool rele_job = true;
905 struct threadpool_job *const job = TAILQ_FIRST(&pool->tp_jobs);
906 TAILQ_REMOVE(&pool->tp_jobs, job, job_entry);
907 error = threadpool_job_hold(job);
908 if (error) {
909 TAILQ_INSERT_HEAD(&pool->tp_jobs, job, job_entry);
910 (void)kpause("pooljob", false, hz, &pool->tp_lock);
911 continue;
912 }
913 mutex_spin_exit(&pool->tp_lock);
914
915 mutex_enter(job->job_lock);
916 /* If the job was cancelled, we'll no longer be its thread. */
917 if (__predict_true(job->job_thread == overseer)) {
918 mutex_spin_enter(&pool->tp_lock);
919 if (__predict_false(
920 TAILQ_EMPTY(&pool->tp_idle_threads))) {
921 /*
922 * Someone else snagged the thread
923 * first. We'll have to try again.
924 */
925 TP_LOG(("%s: '%s' lost race to use idle thread.\n",
926 __func__, job->job_name));
927 TAILQ_INSERT_HEAD(&pool->tp_jobs, job,
928 job_entry);
929 } else {
930 /*
931 * Assign the job to the thread and
932 * wake the thread so it starts work.
933 */
934 struct threadpool_thread *const thread =
935 TAILQ_FIRST(&pool->tp_idle_threads);
936
937 TP_LOG(("%s: '%s' gets thread %p\n",
938 __func__, job->job_name, thread));
939 KASSERT(thread->tpt_job == NULL);
940 TAILQ_REMOVE(&pool->tp_idle_threads, thread,
941 tpt_entry);
942 thread->tpt_job = job;
943 job->job_thread = thread;
944 cv_broadcast(&thread->tpt_cv);
945 /* Gave the thread our job reference. */
946 rele_job = false;
947 }
948 mutex_spin_exit(&pool->tp_lock);
949 }
950 mutex_exit(job->job_lock);
951 if (__predict_false(rele_job))
952 threadpool_job_rele(job);
953
954 mutex_spin_enter(&pool->tp_lock);
955 }
956 threadpool_rele(pool);
957 mutex_spin_exit(&pool->tp_lock);
958
959 TP_LOG(("%s: exiting.\n", __func__));
960
961 kthread_exit(0);
962 }
963
964 /* Thread pool thread */
965
966 static void __dead
967 threadpool_thread(void *arg)
968 {
969 struct threadpool_thread *const thread = arg;
970 struct threadpool *const pool = thread->tpt_pool;
971
972 KASSERT((pool->tp_cpu == NULL) || (pool->tp_cpu == curcpu()));
973
974 /* Wait until we're initialized and on the queue. */
975 mutex_spin_enter(&pool->tp_lock);
976 while (thread->tpt_lwp == NULL)
977 cv_wait(&thread->tpt_cv, &pool->tp_lock);
978
979 TP_LOG(("%s: starting.\n", __func__));
980
981 KASSERT(thread->tpt_lwp == curlwp);
982 for (;;) {
983 /* Wait until we are assigned a job. */
984 while (thread->tpt_job == NULL) {
985 if (ISSET(pool->tp_flags, THREADPOOL_DYING)) {
986 TP_LOG(("%s: THREADPOOL_DYING\n",
987 __func__));
988 break;
989 }
990 if (cv_timedwait(&thread->tpt_cv, &pool->tp_lock,
991 THREADPOOL_IDLE_TICKS))
992 break;
993 }
994 if (__predict_false(thread->tpt_job == NULL)) {
995 TAILQ_REMOVE(&pool->tp_idle_threads, thread,
996 tpt_entry);
997 break;
998 }
999
1000 struct threadpool_job *const job = thread->tpt_job;
1001 KASSERT(job != NULL);
1002 mutex_spin_exit(&pool->tp_lock);
1003
1004 TP_LOG(("%s: running job '%s' on thread %p.\n",
1005 __func__, job->job_name, thread));
1006
1007 /* Set our lwp name to reflect what job we're doing. */
1008 lwp_lock(curlwp);
1009 char *const lwp_name = curlwp->l_name;
1010 curlwp->l_name = job->job_name;
1011 lwp_unlock(curlwp);
1012
1013 /* Run the job. */
1014 (*job->job_fn)(job);
1015
1016 /* Restore our lwp name. */
1017 lwp_lock(curlwp);
1018 curlwp->l_name = lwp_name;
1019 lwp_unlock(curlwp);
1020
1021 /* Job is done and its name is unreferenced. Release it. */
1022 threadpool_job_rele(job);
1023
1024 mutex_spin_enter(&pool->tp_lock);
1025 KASSERT(thread->tpt_job == job);
1026 thread->tpt_job = NULL;
1027 TAILQ_INSERT_TAIL(&pool->tp_idle_threads, thread, tpt_entry);
1028 }
1029 threadpool_rele(pool);
1030 mutex_spin_exit(&pool->tp_lock);
1031
1032 TP_LOG(("%s: thread %p exiting.\n", __func__, thread));
1033
1034 KASSERT(!cv_has_waiters(&thread->tpt_cv));
1035 cv_destroy(&thread->tpt_cv);
1036 pool_cache_put(threadpool_thread_pc, thread);
1037 kthread_exit(0);
1038 }
1039