Lines Matching defs:pool
35 * A thread pool is a collection of worker threads idle or running
38 * a thread pool does not allocate or even sleep at all, except perhaps
43 * A per-CPU thread pool (threadpool_percpu) is a collection of thread
45 * use, there is one shared unbound thread pool (i.e., pool of threads
46 * not bound to any CPU) and one shared per-CPU thread pool.
48 * To use the unbound thread pool at priority pri, call
49 * threadpool_get(&pool, pri). When you're done, call
50 * threadpool_put(pool, pri).
54 * pool returned by threadpool_percpu_ref(pool_percpu) for the current
97 #include <sys/pool.h>
114 "struct threadpool *"/*pool*/, "pri_t"/*pri*/);
116 "struct threadpool *"/*pool*/, "pri_t"/*pri*/);
125 "struct threadpool *"/*pool*/, "pri_t"/*pri*/);
127 "struct threadpool *"/*pool*/, "pri_t"/*pri*/);
132 "struct cpu_info *"/*ci*/, "pri_t"/*pri*/, "struct threadpool *"/*pool*/);
136 "struct threadpool *"/*pool*/);
138 "struct threadpool *"/*pool*/, "uint64_t"/*refcnt*/);
141 "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/);
143 "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/);
145 "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/);
147 "struct threadpool *"/*pool*/,
152 "struct threadpool *"/*pool*/);
154 "struct threadpool *"/*pool*/);
156 "struct threadpool *"/*pool*/);
158 "struct threadpool *"/*pool*/,
161 "struct threadpool *"/*pool*/,
165 "struct threadpool *"/*pool*/);
168 "struct threadpool *"/*pool*/);
170 "struct threadpool *"/*pool*/);
172 "struct threadpool *"/*pool*/, "struct threadpool_job *"/*job*/);
174 "struct threadpool *"/*pool*/);
223 /* Default to 30 second idle timeout for pool threads. */
371 /* Thread pool creation */
380 threadpool_create(struct threadpool *const pool, struct cpu_info *ci,
392 mutex_init(&pool->tp_lock, MUTEX_DEFAULT, IPL_VM);
394 TAILQ_INIT(&pool->tp_jobs);
395 TAILQ_INIT(&pool->tp_idle_threads);
396 pool->tp_refcnt = 1; /* dispatcher's reference */
397 pool->tp_flags = 0;
398 pool->tp_cpu = ci;
399 pool->tp_pri = pri;
401 pool->tp_dispatcher.tpt_lwp = NULL;
402 pool->tp_dispatcher.tpt_pool = pool;
403 pool->tp_dispatcher.tpt_job = NULL;
404 cv_init(&pool->tp_dispatcher.tpt_cv, "pooldisp");
412 &pool->tp_dispatcher, &lwp, "pooldisp%s", suffix);
416 mutex_spin_enter(&pool->tp_lock);
417 pool->tp_dispatcher.tpt_lwp = lwp;
418 cv_broadcast(&pool->tp_dispatcher.tpt_cv);
419 mutex_spin_exit(&pool->tp_lock);
421 SDT_PROBE3(sdt, kernel, threadpool, create__success, ci, pri, pool);
425 KASSERT(pool->tp_dispatcher.tpt_job == NULL);
426 KASSERT(pool->tp_dispatcher.tpt_pool == pool);
427 KASSERT(pool->tp_flags == 0);
428 KASSERT(pool->tp_refcnt == 0);
429 KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads));
430 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
431 KASSERT(!cv_has_waiters(&pool->tp_dispatcher.tpt_cv));
432 cv_destroy(&pool->tp_dispatcher.tpt_cv);
433 mutex_destroy(&pool->tp_lock);
438 /* Thread pool destruction */
441 threadpool_destroy(struct threadpool *pool)
445 SDT_PROBE1(sdt, kernel, threadpool, destroy, pool);
447 /* Mark the pool dying and wait for threads to commit suicide. */
448 mutex_spin_enter(&pool->tp_lock);
449 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
450 pool->tp_flags |= THREADPOOL_DYING;
451 cv_broadcast(&pool->tp_dispatcher.tpt_cv);
452 TAILQ_FOREACH(thread, &pool->tp_idle_threads, tpt_entry)
454 while (0 < pool->tp_refcnt) {
456 pool, pool->tp_refcnt);
457 cv_wait(&pool->tp_dispatcher.tpt_cv, &pool->tp_lock);
459 mutex_spin_exit(&pool->tp_lock);
461 KASSERT(pool->tp_dispatcher.tpt_job == NULL);
462 KASSERT(pool->tp_dispatcher.tpt_pool == pool);
463 KASSERT(pool->tp_flags == THREADPOOL_DYING);
464 KASSERT(pool->tp_refcnt == 0);
465 KASSERT(TAILQ_EMPTY(&pool->tp_idle_threads));
466 KASSERT(TAILQ_EMPTY(&pool->tp_jobs));
467 KASSERT(!cv_has_waiters(&pool->tp_dispatcher.tpt_cv));
468 cv_destroy(&pool->tp_dispatcher.tpt_cv);
469 mutex_destroy(&pool->tp_lock);
473 threadpool_hold(struct threadpool *pool)
476 KASSERT(mutex_owned(&pool->tp_lock));
477 pool->tp_refcnt++;
478 KASSERT(pool->tp_refcnt != 0);
482 threadpool_rele(struct threadpool *pool)
485 KASSERT(mutex_owned(&pool->tp_lock));
486 KASSERT(0 < pool->tp_refcnt);
487 if (--pool->tp_refcnt == 0)
488 cv_broadcast(&pool->tp_dispatcher.tpt_cv);
542 threadpool_put(struct threadpool *pool, pri_t pri)
545 container_of(pool, struct threadpool_unbound, tpu_pool);
550 SDT_PROBE2(sdt, kernel, threadpool, put, pool, pri);
556 SDT_PROBE2(sdt, kernel, threadpool, put__destroy, pool, pri);
645 struct threadpool **poolp, *pool;
648 pool = *poolp;
651 return pool;
658 struct threadpool **poolp, *pool;
668 pool = *poolp;
671 return pool;
749 /* Thread pool jobs */
851 threadpool_schedule_job(struct threadpool *pool, struct threadpool_job *job)
856 SDT_PROBE2(sdt, kernel, threadpool, schedule__job, pool, job);
866 pool, job);
873 mutex_spin_enter(&pool->tp_lock);
874 if (__predict_false(TAILQ_EMPTY(&pool->tp_idle_threads))) {
877 pool, job);
878 job->job_thread = &pool->tp_dispatcher;
879 TAILQ_INSERT_TAIL(&pool->tp_jobs, job, job_entry);
882 job->job_thread = TAILQ_FIRST(&pool->tp_idle_threads);
884 pool, job, job->job_thread->tpt_lwp);
885 TAILQ_REMOVE(&pool->tp_idle_threads, job->job_thread,
893 mutex_spin_exit(&pool->tp_lock);
897 threadpool_cancel_job_async(struct threadpool *pool, struct threadpool_job *job)
906 * => "pool" is something other than what the job was
910 * a remote pool reference. (this might happen by
926 } else if (job->job_thread == &pool->tp_dispatcher) {
929 mutex_spin_enter(&pool->tp_lock);
930 TAILQ_REMOVE(&pool->tp_jobs, job, job_entry);
931 mutex_spin_exit(&pool->tp_lock);
941 threadpool_cancel_job(struct threadpool *pool, struct threadpool_job *job)
953 if (threadpool_cancel_job_async(pool, job))
961 /* Thread pool dispatcher thread */
967 struct threadpool *const pool = dispatcher->tpt_pool;
973 KASSERT((pool->tp_cpu == NULL) || (pool->tp_cpu == curcpu()));
974 KASSERT((pool->tp_cpu == NULL) || (curlwp->l_pflag & LP_BOUND));
977 mutex_spin_enter(&pool->tp_lock);
979 cv_wait(&dispatcher->tpt_cv, &pool->tp_lock);
981 SDT_PROBE1(sdt, kernel, threadpool, dispatcher__start, pool);
985 while (TAILQ_EMPTY(&pool->tp_jobs)) {
986 if (ISSET(pool->tp_flags, THREADPOOL_DYING)) {
988 dispatcher__dying, pool);
991 cv_wait(&dispatcher->tpt_cv, &pool->tp_lock);
993 if (__predict_false(TAILQ_EMPTY(&pool->tp_jobs)))
997 if (TAILQ_EMPTY(&pool->tp_idle_threads)) {
999 pool);
1000 threadpool_hold(pool);
1001 mutex_spin_exit(&pool->tp_lock);
1006 thread->tpt_pool = pool;
1012 if (pool->tp_pri < PRI_KERNEL)
1014 threadnamesuffix(suffix, sizeof(suffix), pool->tp_cpu,
1015 pool->tp_pri);
1016 error = kthread_create(pool->tp_pri, ktflags,
1017 pool->tp_cpu, &threadpool_thread, thread, &lwp,
1020 mutex_spin_enter(&pool->tp_lock);
1023 threadpool_rele(pool);
1026 &pool->tp_lock);
1030 * New kthread now owns the reference to the pool
1034 TAILQ_INSERT_TAIL(&pool->tp_idle_threads, thread,
1043 struct threadpool_job *const job = TAILQ_FIRST(&pool->tp_jobs);
1050 mutex_spin_exit(&pool->tp_lock);
1055 mutex_spin_enter(&pool->tp_lock);
1056 TAILQ_REMOVE(&pool->tp_jobs, job, job_entry);
1058 TAILQ_EMPTY(&pool->tp_idle_threads))) {
1064 dispatcher__race, pool, job);
1065 TAILQ_INSERT_HEAD(&pool->tp_jobs, job,
1073 TAILQ_FIRST(&pool->tp_idle_threads);
1078 TAILQ_REMOVE(&pool->tp_idle_threads, thread,
1084 mutex_spin_exit(&pool->tp_lock);
1089 mutex_spin_enter(&pool->tp_lock);
1091 threadpool_rele(pool);
1092 mutex_spin_exit(&pool->tp_lock);
1094 SDT_PROBE1(sdt, kernel, threadpool, dispatcher__exit, pool);
1099 /* Thread pool thread */
1105 struct threadpool *const pool = thread->tpt_pool;
1107 KASSERT((pool->tp_cpu == NULL) || (pool->tp_cpu == curcpu()));
1108 KASSERT((pool->tp_cpu == NULL) || (curlwp->l_pflag & LP_BOUND));
1111 mutex_spin_enter(&pool->tp_lock);
1113 cv_wait(&thread->tpt_cv, &pool->tp_lock);
1115 SDT_PROBE1(sdt, kernel, threadpool, thread__start, pool);
1121 if (ISSET(pool->tp_flags, THREADPOOL_DYING)) {
1123 thread__dying, pool);
1126 if (cv_timedwait(&thread->tpt_cv, &pool->tp_lock,
1131 TAILQ_REMOVE(&pool->tp_idle_threads, thread,
1146 mutex_spin_exit(&pool->tp_lock);
1148 SDT_PROBE2(sdt, kernel, threadpool, thread__job, pool, job);
1163 mutex_spin_enter(&pool->tp_lock);
1166 TAILQ_INSERT_TAIL(&pool->tp_idle_threads, thread, tpt_entry);
1168 threadpool_rele(pool);
1169 mutex_spin_exit(&pool->tp_lock);
1171 SDT_PROBE1(sdt, kernel, threadpool, thread__exit, pool);