drm_lock.c revision 1.12 1 /* $NetBSD: drm_lock.c,v 1.12 2021/12/19 09:52:00 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * DRM lock. Each drm master has a heavy-weight lock to provide mutual
34 * exclusion for access to the hardware. The lock can be held by the
35 * kernel or by a drm file; the kernel takes access only for unusual
36 * purposes, with drm_idlelock_take, mainly for idling the GPU when
37 * closing down.
38 *
39 * The physical memory storing the lock state is shared between
40 * userland and kernel: the pointer at dev->master->lock->hw_lock is
41 * mapped into both userland and kernel address spaces. This way,
42 * userland can try to take the hardware lock without a system call,
43 * although if it fails then it will use the DRM_LOCK ioctl to block
44 * atomically until the lock is available. All this means that the
45 * kernel must use atomic_ops to manage the lock state.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: drm_lock.c,v 1.12 2021/12/19 09:52:00 riastradh Exp $");
50
51 #include <sys/types.h>
52 #include <sys/errno.h>
53 #include <sys/file.h>
54 #include <sys/systm.h>
55
56 #include <drm/drm_print.h>
57 #include "../dist/drm/drm_internal.h"
58 #include "../dist/drm/drm_legacy.h"
59
60 static bool drm_lock_acquire(struct drm_lock_data *, int);
61 static void drm_lock_release(struct drm_lock_data *, int);
62
63 #if IS_ENABLED(CONFIG_DRM_LEGACY)
64 static int drm_lock_block_signals(struct drm_device *, struct drm_lock *,
65 struct drm_file *);
66 static void drm_lock_unblock_signals(struct drm_device *,
67 struct drm_lock *, struct drm_file *);
68
69 /*
70 * Take the lock on behalf of userland.
71 */
72 int
73 drm_legacy_lock(struct drm_device *dev, void *data, struct drm_file *file)
74 {
75 struct drm_lock *lock_request = data;
76 struct drm_master *master = file->master;
77 int error;
78
79 /* Sanitize the drm global mutex bollocks until we get rid of it. */
80 KASSERT(mutex_is_locked(&drm_global_mutex));
81 mutex_unlock(&drm_global_mutex);
82
83 /* Refuse to lock on behalf of the kernel. */
84 if (lock_request->context == DRM_KERNEL_CONTEXT) {
85 error = -EINVAL;
86 goto out0;
87 }
88
89 /* Refuse to set the magic bits. */
90 if (lock_request->context !=
91 _DRM_LOCKING_CONTEXT(lock_request->context)) {
92 error = -EINVAL;
93 goto out0;
94 }
95
96 /* Count it in the file and device statistics (XXX why here?). */
97 file->lock_count++;
98
99 /* Wait until the hardware lock is gone or we can acquire it. */
100 spin_lock(&master->lock.spinlock);
101
102 if (master->lock.user_waiters == UINT32_MAX) {
103 error = -EBUSY;
104 goto out1;
105 }
106
107 master->lock.user_waiters++;
108 DRM_SPIN_WAIT_UNTIL(error, &master->lock.lock_queue,
109 &master->lock.spinlock,
110 ((master->lock.hw_lock == NULL) ||
111 drm_lock_acquire(&master->lock, lock_request->context)));
112 KASSERT(0 < master->lock.user_waiters);
113 master->lock.user_waiters--;
114 if (error)
115 goto out1;
116
117 /* If the lock is gone, give up. */
118 if (master->lock.hw_lock == NULL) {
119 #if 0 /* XXX Linux sends SIGTERM, but why? */
120 mutex_enter(&proc_lock);
121 psignal(curproc, SIGTERM);
122 mutex_exit(&proc_lock);
123 error = -EINTR;
124 #else
125 error = -ENXIO;
126 #endif
127 goto out1;
128 }
129
130 /* Mark the lock as owned by file. */
131 master->lock.file_priv = file;
132 master->lock.lock_time = jiffies; /* XXX Unused? */
133
134 /* Block signals while the lock is held. */
135 error = drm_lock_block_signals(dev, lock_request, file);
136 if (error)
137 goto fail2;
138
139 /* Enter the DMA quiescent state if requested and available. */
140 /* XXX Drop the spin lock first... */
141 if (ISSET(lock_request->flags, _DRM_LOCK_QUIESCENT) &&
142 (dev->driver->dma_quiescent != NULL)) {
143 error = (*dev->driver->dma_quiescent)(dev);
144 if (error)
145 goto fail3;
146 }
147
148 /* Success! */
149 error = 0;
150 goto out1;
151
152 fail3: drm_lock_unblock_signals(dev, lock_request, file);
153 fail2: drm_lock_release(&master->lock, lock_request->context);
154 master->lock.file_priv = NULL;
155 out1: spin_unlock(&master->lock.spinlock);
156 out0: mutex_lock(&drm_global_mutex);
157 return error;
158 }
159
160 /*
161 * Try to relinquish a lock that userland thinks it holds, per
162 * userland's request. Fail if it doesn't actually hold the lock.
163 */
164 int
165 drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file)
166 {
167 struct drm_lock *lock_request = data;
168 struct drm_master *master = file->master;
169 int error;
170
171 /* Sanitize the drm global mutex bollocks until we get rid of it. */
172 KASSERT(mutex_is_locked(&drm_global_mutex));
173 mutex_unlock(&drm_global_mutex);
174
175 /* Refuse to unlock on behalf of the kernel. */
176 if (lock_request->context == DRM_KERNEL_CONTEXT) {
177 error = -EINVAL;
178 goto out0;
179 }
180
181 /* Lock the internal spin lock to make changes. */
182 spin_lock(&master->lock.spinlock);
183
184 /* Make sure it's actually locked. */
185 if (!_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock)) {
186 error = -EINVAL; /* XXX Right error? */
187 goto out1;
188 }
189
190 /* Make sure it's locked in the right context. */
191 if (_DRM_LOCKING_CONTEXT(master->lock.hw_lock->lock) !=
192 lock_request->context) {
193 error = -EACCES; /* XXX Right error? */
194 goto out1;
195 }
196
197 /* Make sure it's locked by us. */
198 if (master->lock.file_priv != file) {
199 error = -EACCES; /* XXX Right error? */
200 goto out1;
201 }
202
203 /* Actually release the lock. */
204 drm_lock_release(&master->lock, lock_request->context);
205
206 /* Clear the lock's file pointer, just in case. */
207 master->lock.file_priv = NULL;
208
209 /* Unblock the signals we blocked in drm_lock. */
210 drm_lock_unblock_signals(dev, lock_request, file);
211
212 /* Success! */
213 error = 0;
214
215 out1: spin_unlock(&master->lock.spinlock);
216 out0: mutex_lock(&drm_global_mutex);
217 return error;
218 }
219 #endif
220
221 /*
222 * Try to acquire the lock. Whether or not we acquire it, guarantee
223 * that whoever next releases it relinquishes it to the kernel, not to
224 * anyone else.
225 */
226 void
227 drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
228 {
229
230 spin_lock(&lock_data->spinlock);
231 KASSERT(!lock_data->idle_has_lock);
232 KASSERT(lock_data->kernel_waiters < UINT32_MAX);
233 lock_data->kernel_waiters++;
234 /* Try to acquire the lock. */
235 if (drm_lock_acquire(lock_data, DRM_KERNEL_CONTEXT)) {
236 lock_data->idle_has_lock = 1;
237 } else {
238 /*
239 * Recording that there are kernel waiters will prevent
240 * userland from acquiring the lock again when it is
241 * next released.
242 */
243 }
244 spin_unlock(&lock_data->spinlock);
245 }
246
247 /*
248 * Release whatever drm_idlelock_take managed to acquire.
249 */
250 void
251 drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
252 {
253
254 spin_lock(&lock_data->spinlock);
255 KASSERT(0 < lock_data->kernel_waiters);
256 if (--lock_data->kernel_waiters == 0) {
257 if (lock_data->idle_has_lock) {
258 /* We did acquire it. Release it. */
259 drm_lock_release(lock_data, DRM_KERNEL_CONTEXT);
260 }
261 }
262 spin_unlock(&lock_data->spinlock);
263 }
264
265 #if IS_ENABLED(CONFIG_DRM_LEGACY)
266 /*
267 * Release the lock and free it on closing of a drm file.
268 */
269 void
270 drm_legacy_lock_release(struct drm_device *dev, struct file *fp)
271 {
272 struct drm_file *const file = fp->f_data;
273 struct drm_lock_data *const lock_data = &file->master->lock;
274
275 /* If this file has never locked anything, nothing to do. */
276 if (file->lock_count == 0)
277 return;
278
279 spin_lock(&lock_data->spinlock);
280
281 /* If there is no lock, nothing to do. */
282 if (lock_data->hw_lock == NULL)
283 goto out;
284
285 /* If this lock is not held, nothing to do. */
286 if (!_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock))
287 goto out;
288
289 /*
290 * Otherwise, it boils down to whether this file is the owner
291 * or someone else.
292 *
293 * XXX This is not reliable! Userland doesn't update this when
294 * it takes the lock...
295 */
296 if (file == lock_data->file_priv)
297 drm_lock_release(lock_data,
298 _DRM_LOCKING_CONTEXT(file->master->lock.hw_lock->lock));
299
300 out: spin_unlock(&lock_data->spinlock);
301 }
302 #endif
303
304 /*
305 * Try to acquire the lock. Return true if successful, false if not.
306 *
307 * This is hairy because it races with userland, and if userland
308 * already holds the lock, we must tell it, by marking it
309 * _DRM_LOCK_CONT (contended), that it must call ioctl(DRM_UNLOCK) to
310 * release the lock so that we can wake waiters.
311 *
312 * XXX What happens if the process is interrupted?
313 */
314 static bool
315 drm_lock_acquire(struct drm_lock_data *lock_data, int context)
316 {
317 volatile unsigned int *const lock = &lock_data->hw_lock->lock;
318 unsigned int old, new;
319
320 KASSERT(spin_is_locked(&lock_data->spinlock));
321
322 do {
323 old = *lock;
324 if (!_DRM_LOCK_IS_HELD(old)) {
325 new = (context | _DRM_LOCK_HELD);
326 if ((0 < lock_data->user_waiters) ||
327 (0 < lock_data->kernel_waiters))
328 new |= _DRM_LOCK_CONT;
329 } else if (_DRM_LOCKING_CONTEXT(old) != context) {
330 new = (old | _DRM_LOCK_CONT);
331 } else {
332 DRM_ERROR("%d already holds heavyweight lock\n",
333 context);
334 return false;
335 }
336 } while (atomic_cas_uint(lock, old, new) != old);
337
338 return !_DRM_LOCK_IS_HELD(old);
339 }
340
341 /*
342 * Release the lock held in the given context. Wake any waiters,
343 * preferring kernel waiters over userland waiters.
344 *
345 * Lock's spinlock must be held and lock must be held in this context.
346 */
347 static void
348 drm_lock_release(struct drm_lock_data *lock_data, int context)
349 {
350
351 (void)context; /* ignore */
352 KASSERT(spin_is_locked(&lock_data->spinlock));
353 KASSERT(_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock));
354 KASSERT(_DRM_LOCKING_CONTEXT(lock_data->hw_lock->lock) == context);
355
356 lock_data->hw_lock->lock = 0;
357 DRM_SPIN_WAKEUP_ONE(&lock_data->lock_queue, &lock_data->spinlock);
358 }
359
360 #if IS_ENABLED(CONFIG_DRM_LEGACY)
361 /*
362 * Block signals for a process that holds a drm lock.
363 *
364 * XXX It's not processes but files that hold drm locks, so blocking
365 * signals in a process seems wrong, and it's not clear that blocking
366 * signals automatically is remotely sensible anyway.
367 */
368 static int
369 drm_lock_block_signals(struct drm_device *dev __unused,
370 struct drm_lock *lock_request __unused, struct drm_file *file __unused)
371 {
372 return 0;
373 }
374
375 /*
376 * Unblock the signals that drm_lock_block_signals blocked.
377 */
378 static void
379 drm_lock_unblock_signals(struct drm_device *dev __unused,
380 struct drm_lock *lock_request __unused, struct drm_file *file __unused)
381 {
382 }
383 #endif
384