Home | History | Annotate | Line # | Download | only in linux
linux_kthread.c revision 1.6
      1 /*	$NetBSD: linux_kthread.c,v 1.6 2021/12/19 12:42:25 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2021 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: linux_kthread.c,v 1.6 2021/12/19 12:42:25 riastradh Exp $");
     34 
     35 #include <sys/types.h>
     36 
     37 #include <sys/condvar.h>
     38 #include <sys/kmem.h>
     39 #include <sys/kthread.h>
     40 #include <sys/lwp.h>
     41 #include <sys/mutex.h>
     42 #include <sys/specificdata.h>
     43 
     44 #include <linux/kthread.h>
     45 #include <linux/spinlock.h>
     46 
     47 #include <drm/drm_wait_netbsd.h>
     48 
     49 struct task_struct {
     50 	kmutex_t	kt_lock;
     51 	kcondvar_t	kt_cv;
     52 	bool		kt_shouldstop:1;
     53 	bool		kt_shouldpark:1;
     54 	bool		kt_parked:1;
     55 
     56 	int		(*kt_func)(void *);
     57 	void		*kt_cookie;
     58 	spinlock_t	*kt_interlock;
     59 	drm_waitqueue_t	*kt_wq;
     60 	struct lwp	*kt_lwp;
     61 };
     62 
     63 static specificdata_key_t linux_kthread_key __read_mostly = -1;
     64 
     65 int
     66 linux_kthread_init(void)
     67 {
     68 	int error;
     69 
     70 	error = lwp_specific_key_create(&linux_kthread_key, NULL);
     71 	if (error)
     72 		goto out;
     73 
     74 	/* Success!  */
     75 	error = 0;
     76 
     77 out:	if (error)
     78 		linux_kthread_fini();
     79 	return error;
     80 }
     81 
     82 void
     83 linux_kthread_fini(void)
     84 {
     85 
     86 	if (linux_kthread_key != -1) {
     87 		lwp_specific_key_delete(linux_kthread_key);
     88 		linux_kthread_key = -1;
     89 	}
     90 }
     91 
     92 #define	linux_kthread()	_linux_kthread(__func__)
     93 static struct task_struct *
     94 _linux_kthread(const char *caller)
     95 {
     96 	struct task_struct *T;
     97 
     98 	T = lwp_getspecific(linux_kthread_key);
     99 	KASSERTMSG(T != NULL, "%s must be called from Linux kthread", caller);
    100 
    101 	return T;
    102 }
    103 
    104 static void
    105 linux_kthread_start(void *cookie)
    106 {
    107 	struct task_struct *T = cookie;
    108 	int ret;
    109 
    110 	lwp_setspecific(linux_kthread_key, T);
    111 
    112 	ret = (*T->kt_func)(T->kt_cookie);
    113 	kthread_exit(ret);
    114 }
    115 
    116 static struct task_struct *
    117 kthread_alloc(int (*func)(void *), void *cookie, spinlock_t *interlock,
    118     drm_waitqueue_t *wq)
    119 {
    120 	struct task_struct *T;
    121 
    122 	T = kmem_zalloc(sizeof(*T), KM_SLEEP);
    123 
    124 	mutex_init(&T->kt_lock, MUTEX_DEFAULT, IPL_VM);
    125 	cv_init(&T->kt_cv, "lnxkthrd");
    126 
    127 	T->kt_func = func;
    128 	T->kt_cookie = cookie;
    129 	T->kt_interlock = interlock;
    130 	T->kt_wq = wq;
    131 
    132 	return T;
    133 }
    134 
    135 static void
    136 kthread_free(struct task_struct *T)
    137 {
    138 
    139 	cv_destroy(&T->kt_cv);
    140 	mutex_destroy(&T->kt_lock);
    141 	kmem_free(T, sizeof(*T));
    142 }
    143 
    144 struct task_struct *
    145 kthread_run(int (*func)(void *), void *cookie, const char *name,
    146     spinlock_t *interlock, drm_waitqueue_t *wq)
    147 {
    148 	struct task_struct *T;
    149 	int error;
    150 
    151 	T = kthread_alloc(func, cookie, interlock, wq);
    152 	error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_MUSTJOIN, NULL,
    153 	    linux_kthread_start, T, &T->kt_lwp, "%s", name);
    154 	if (error) {
    155 		kthread_free(T);
    156 		T = NULL;
    157 	}
    158 
    159 	return T;
    160 }
    161 
    162 int
    163 kthread_stop(struct task_struct *T)
    164 {
    165 	int ret;
    166 
    167 	/* Lock order: interlock, then kthread lock.  */
    168 	spin_lock(T->kt_interlock);
    169 	mutex_enter(&T->kt_lock);
    170 
    171 	/*
    172 	 * Notify the thread that it's stopping, and wake it if it's
    173 	 * parked or sleeping on its own waitqueue.
    174 	 */
    175 	T->kt_shouldpark = false;
    176 	T->kt_shouldstop = true;
    177 	cv_broadcast(&T->kt_cv);
    178 	DRM_SPIN_WAKEUP_ALL(T->kt_wq, T->kt_interlock);
    179 
    180 	/* Release the locks.  */
    181 	mutex_exit(&T->kt_lock);
    182 	spin_unlock(T->kt_interlock);
    183 
    184 	/* Wait for the (NetBSD) kthread to exit.  */
    185 	ret = kthread_join(T->kt_lwp);
    186 
    187 	/* Free the (Linux) kthread.  */
    188 	kthread_free(T);
    189 
    190 	/* Return what the thread returned.  */
    191 	return ret;
    192 }
    193 
    194 int
    195 kthread_should_stop(void)
    196 {
    197 	struct task_struct *T = linux_kthread();
    198 	bool shouldstop;
    199 
    200 	mutex_enter(&T->kt_lock);
    201 	shouldstop = T->kt_shouldstop;
    202 	mutex_exit(&T->kt_lock);
    203 
    204 	return shouldstop;
    205 }
    206 
    207 void
    208 kthread_park(struct task_struct *T)
    209 {
    210 
    211 	/* Lock order: interlock, then kthread lock.  */
    212 	spin_lock(T->kt_interlock);
    213 	mutex_enter(&T->kt_lock);
    214 
    215 	/* Caller must not ask to park if they've already asked to stop.  */
    216 	KASSERT(!T->kt_shouldstop);
    217 
    218 	/* Ask the thread to park.  */
    219 	T->kt_shouldpark = true;
    220 
    221 	/*
    222 	 * Ensure the thread is not sleeping on its condvar.  After
    223 	 * this point, we are done with the interlock, which we must
    224 	 * not hold while we wait on the kthread condvar.
    225 	 */
    226 	DRM_SPIN_WAKEUP_ALL(T->kt_wq, T->kt_interlock);
    227 	spin_unlock(T->kt_interlock);
    228 
    229 	/*
    230 	 * Wait until the thread has issued kthread_parkme, unless we
    231 	 * are already the thread, which Linux allows and interprets to
    232 	 * mean don't wait.
    233 	 */
    234 	if (T->kt_lwp != curlwp) {
    235 		while (!T->kt_parked)
    236 			cv_wait(&T->kt_cv, &T->kt_lock);
    237 	}
    238 
    239 	/* Release the kthread lock too.  */
    240 	mutex_exit(&T->kt_lock);
    241 }
    242 
    243 void
    244 kthread_unpark(struct task_struct *T)
    245 {
    246 
    247 	mutex_enter(&T->kt_lock);
    248 	T->kt_shouldpark = false;
    249 	cv_broadcast(&T->kt_cv);
    250 	mutex_exit(&T->kt_lock);
    251 }
    252 
    253 int
    254 __kthread_should_park(struct task_struct *T)
    255 {
    256 	bool shouldpark;
    257 
    258 	mutex_enter(&T->kt_lock);
    259 	shouldpark = T->kt_shouldpark;
    260 	mutex_exit(&T->kt_lock);
    261 
    262 	return shouldpark;
    263 }
    264 
    265 int
    266 kthread_should_park(void)
    267 {
    268 	struct task_struct *T = linux_kthread();
    269 
    270 	return __kthread_should_park(T);
    271 }
    272 
    273 void
    274 kthread_parkme(void)
    275 {
    276 	struct task_struct *T = linux_kthread();
    277 
    278 	mutex_enter(&T->kt_lock);
    279 	while (T->kt_shouldpark) {
    280 		T->kt_parked = true;
    281 		cv_broadcast(&T->kt_cv);
    282 		cv_wait(&T->kt_cv, &T->kt_lock);
    283 		T->kt_parked = false;
    284 	}
    285 	mutex_exit(&T->kt_lock);
    286 }
    287