Home | History | Annotate | Line # | Download | only in linux
linux_kthread.c revision 1.8
      1 /*	$NetBSD: linux_kthread.c,v 1.8 2021/12/19 12:42:48 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2021 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: linux_kthread.c,v 1.8 2021/12/19 12:42:48 riastradh Exp $");
     34 
     35 #include <sys/types.h>
     36 
     37 #include <sys/condvar.h>
     38 #include <sys/kmem.h>
     39 #include <sys/kthread.h>
     40 #include <sys/lwp.h>
     41 #include <sys/mutex.h>
     42 #include <sys/specificdata.h>
     43 
     44 #include <linux/err.h>
     45 #include <linux/kthread.h>
     46 #include <linux/spinlock.h>
     47 
     48 #include <drm/drm_wait_netbsd.h>
     49 
     50 struct task_struct {
     51 	kmutex_t	kt_lock;
     52 	kcondvar_t	kt_cv;
     53 	bool		kt_shouldstop:1;
     54 	bool		kt_shouldpark:1;
     55 	bool		kt_parked:1;
     56 
     57 	int		(*kt_func)(void *);
     58 	void		*kt_cookie;
     59 	spinlock_t	*kt_interlock;
     60 	drm_waitqueue_t	*kt_wq;
     61 	struct lwp	*kt_lwp;
     62 };
     63 
     64 static specificdata_key_t linux_kthread_key __read_mostly = -1;
     65 
     66 int
     67 linux_kthread_init(void)
     68 {
     69 	int error;
     70 
     71 	error = lwp_specific_key_create(&linux_kthread_key, NULL);
     72 	if (error)
     73 		goto out;
     74 
     75 	/* Success!  */
     76 	error = 0;
     77 
     78 out:	if (error)
     79 		linux_kthread_fini();
     80 	return error;
     81 }
     82 
     83 void
     84 linux_kthread_fini(void)
     85 {
     86 
     87 	if (linux_kthread_key != -1) {
     88 		lwp_specific_key_delete(linux_kthread_key);
     89 		linux_kthread_key = -1;
     90 	}
     91 }
     92 
     93 #define	linux_kthread()	_linux_kthread(__func__)
     94 static struct task_struct *
     95 _linux_kthread(const char *caller)
     96 {
     97 	struct task_struct *T;
     98 
     99 	T = lwp_getspecific(linux_kthread_key);
    100 	KASSERTMSG(T != NULL, "%s must be called from Linux kthread", caller);
    101 
    102 	return T;
    103 }
    104 
    105 static void
    106 linux_kthread_start(void *cookie)
    107 {
    108 	struct task_struct *T = cookie;
    109 	int ret;
    110 
    111 	lwp_setspecific(linux_kthread_key, T);
    112 
    113 	ret = (*T->kt_func)(T->kt_cookie);
    114 	kthread_exit(ret);
    115 }
    116 
    117 static struct task_struct *
    118 kthread_alloc(int (*func)(void *), void *cookie, spinlock_t *interlock,
    119     drm_waitqueue_t *wq)
    120 {
    121 	struct task_struct *T;
    122 
    123 	T = kmem_zalloc(sizeof(*T), KM_SLEEP);
    124 
    125 	mutex_init(&T->kt_lock, MUTEX_DEFAULT, IPL_VM);
    126 	cv_init(&T->kt_cv, "lnxkthrd");
    127 
    128 	T->kt_func = func;
    129 	T->kt_cookie = cookie;
    130 	T->kt_interlock = interlock;
    131 	T->kt_wq = wq;
    132 
    133 	return T;
    134 }
    135 
    136 static void
    137 kthread_free(struct task_struct *T)
    138 {
    139 
    140 	cv_destroy(&T->kt_cv);
    141 	mutex_destroy(&T->kt_lock);
    142 	kmem_free(T, sizeof(*T));
    143 }
    144 
    145 struct task_struct *
    146 kthread_run(int (*func)(void *), void *cookie, const char *name,
    147     spinlock_t *interlock, drm_waitqueue_t *wq)
    148 {
    149 	struct task_struct *T;
    150 	int error;
    151 
    152 	T = kthread_alloc(func, cookie, interlock, wq);
    153 	error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_MUSTJOIN, NULL,
    154 	    linux_kthread_start, T, &T->kt_lwp, "%s", name);
    155 	if (error) {
    156 		kthread_free(T);
    157 		return ERR_PTR(-error); /* XXX errno NetBSD->Linux */
    158 	}
    159 
    160 	return T;
    161 }
    162 
    163 int
    164 kthread_stop(struct task_struct *T)
    165 {
    166 	int ret;
    167 
    168 	/* Lock order: interlock, then kthread lock.  */
    169 	spin_lock(T->kt_interlock);
    170 	mutex_enter(&T->kt_lock);
    171 
    172 	/*
    173 	 * Notify the thread that it's stopping, and wake it if it's
    174 	 * parked or sleeping on its own waitqueue.
    175 	 */
    176 	T->kt_shouldpark = false;
    177 	T->kt_shouldstop = true;
    178 	cv_broadcast(&T->kt_cv);
    179 	DRM_SPIN_WAKEUP_ALL(T->kt_wq, T->kt_interlock);
    180 
    181 	/* Release the locks.  */
    182 	mutex_exit(&T->kt_lock);
    183 	spin_unlock(T->kt_interlock);
    184 
    185 	/* Wait for the (NetBSD) kthread to exit.  */
    186 	ret = kthread_join(T->kt_lwp);
    187 
    188 	/* Free the (Linux) kthread.  */
    189 	kthread_free(T);
    190 
    191 	/* Return what the thread returned.  */
    192 	return ret;
    193 }
    194 
    195 int
    196 kthread_should_stop(void)
    197 {
    198 	struct task_struct *T = linux_kthread();
    199 	bool shouldstop;
    200 
    201 	mutex_enter(&T->kt_lock);
    202 	shouldstop = T->kt_shouldstop;
    203 	mutex_exit(&T->kt_lock);
    204 
    205 	return shouldstop;
    206 }
    207 
    208 void
    209 kthread_park(struct task_struct *T)
    210 {
    211 
    212 	/* Lock order: interlock, then kthread lock.  */
    213 	spin_lock(T->kt_interlock);
    214 	mutex_enter(&T->kt_lock);
    215 
    216 	/* Caller must not ask to park if they've already asked to stop.  */
    217 	KASSERT(!T->kt_shouldstop);
    218 
    219 	/* Ask the thread to park.  */
    220 	T->kt_shouldpark = true;
    221 
    222 	/*
    223 	 * Ensure the thread is not sleeping on its condvar.  After
    224 	 * this point, we are done with the interlock, which we must
    225 	 * not hold while we wait on the kthread condvar.
    226 	 */
    227 	DRM_SPIN_WAKEUP_ALL(T->kt_wq, T->kt_interlock);
    228 	spin_unlock(T->kt_interlock);
    229 
    230 	/*
    231 	 * Wait until the thread has issued kthread_parkme, unless we
    232 	 * are already the thread, which Linux allows and interprets to
    233 	 * mean don't wait.
    234 	 */
    235 	if (T->kt_lwp != curlwp) {
    236 		while (!T->kt_parked)
    237 			cv_wait(&T->kt_cv, &T->kt_lock);
    238 	}
    239 
    240 	/* Release the kthread lock too.  */
    241 	mutex_exit(&T->kt_lock);
    242 }
    243 
    244 void
    245 kthread_unpark(struct task_struct *T)
    246 {
    247 
    248 	mutex_enter(&T->kt_lock);
    249 	T->kt_shouldpark = false;
    250 	cv_broadcast(&T->kt_cv);
    251 	mutex_exit(&T->kt_lock);
    252 }
    253 
    254 int
    255 __kthread_should_park(struct task_struct *T)
    256 {
    257 	bool shouldpark;
    258 
    259 	mutex_enter(&T->kt_lock);
    260 	shouldpark = T->kt_shouldpark;
    261 	mutex_exit(&T->kt_lock);
    262 
    263 	return shouldpark;
    264 }
    265 
    266 int
    267 kthread_should_park(void)
    268 {
    269 	struct task_struct *T = linux_kthread();
    270 
    271 	return __kthread_should_park(T);
    272 }
    273 
    274 void
    275 kthread_parkme(void)
    276 {
    277 	struct task_struct *T = linux_kthread();
    278 
    279 	assert_spin_locked(T->kt_interlock);
    280 
    281 	spin_unlock(T->kt_interlock);
    282 	mutex_enter(&T->kt_lock);
    283 	while (T->kt_shouldpark) {
    284 		T->kt_parked = true;
    285 		cv_broadcast(&T->kt_cv);
    286 		cv_wait(&T->kt_cv, &T->kt_lock);
    287 		T->kt_parked = false;
    288 	}
    289 	mutex_exit(&T->kt_lock);
    290 	spin_lock(T->kt_interlock);
    291 }
    292