drm_lock.c revision 1.9 1 /* $NetBSD: drm_lock.c,v 1.9 2021/12/19 00:28:20 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * DRM lock. Each drm master has a heavy-weight lock to provide mutual
34 * exclusion for access to the hardware. The lock can be held by the
35 * kernel or by a drm file; the kernel takes access only for unusual
36 * purposes, with drm_idlelock_take, mainly for idling the GPU when
37 * closing down.
38 *
39 * The physical memory storing the lock state is shared between
40 * userland and kernel: the pointer at dev->master->lock->hw_lock is
41 * mapped into both userland and kernel address spaces. This way,
42 * userland can try to take the hardware lock without a system call,
43 * although if it fails then it will use the DRM_LOCK ioctl to block
44 * atomically until the lock is available. All this means that the
45 * kernel must use atomic_ops to manage the lock state.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: drm_lock.c,v 1.9 2021/12/19 00:28:20 riastradh Exp $");
50
51 #include <sys/types.h>
52 #include <sys/errno.h>
53 #include <sys/systm.h>
54
55 #include <drm/drmP.h>
56
57 #include "../dist/drm/drm_internal.h"
58 #include "../dist/drm/drm_legacy.h"
59
60 static bool drm_lock_acquire(struct drm_lock_data *, int);
61 static void drm_lock_release(struct drm_lock_data *, int);
62 static int drm_lock_block_signals(struct drm_device *, struct drm_lock *,
63 struct drm_file *);
64 static void drm_lock_unblock_signals(struct drm_device *,
65 struct drm_lock *, struct drm_file *);
66
67 /*
68 * Take the lock on behalf of userland.
69 */
70 int
71 drm_legacy_lock(struct drm_device *dev, void *data, struct drm_file *file)
72 {
73 struct drm_lock *lock_request = data;
74 struct drm_master *master = file->master;
75 int error;
76
77 /* Sanitize the drm global mutex bollocks until we get rid of it. */
78 KASSERT(mutex_is_locked(&drm_global_mutex));
79 mutex_unlock(&drm_global_mutex);
80
81 /* Refuse to lock on behalf of the kernel. */
82 if (lock_request->context == DRM_KERNEL_CONTEXT) {
83 error = -EINVAL;
84 goto out0;
85 }
86
87 /* Refuse to set the magic bits. */
88 if (lock_request->context !=
89 _DRM_LOCKING_CONTEXT(lock_request->context)) {
90 error = -EINVAL;
91 goto out0;
92 }
93
94 /* Count it in the file and device statistics (XXX why here?). */
95 file->lock_count++;
96
97 /* Wait until the hardware lock is gone or we can acquire it. */
98 spin_lock(&master->lock.spinlock);
99
100 if (master->lock.user_waiters == UINT32_MAX) {
101 error = -EBUSY;
102 goto out1;
103 }
104
105 master->lock.user_waiters++;
106 DRM_SPIN_WAIT_UNTIL(error, &master->lock.lock_queue,
107 &master->lock.spinlock,
108 ((master->lock.hw_lock == NULL) ||
109 drm_lock_acquire(&master->lock, lock_request->context)));
110 KASSERT(0 < master->lock.user_waiters);
111 master->lock.user_waiters--;
112 if (error)
113 goto out1;
114
115 /* If the lock is gone, give up. */
116 if (master->lock.hw_lock == NULL) {
117 #if 0 /* XXX Linux sends SIGTERM, but why? */
118 mutex_enter(&proc_lock);
119 psignal(curproc, SIGTERM);
120 mutex_exit(&proc_lock);
121 error = -EINTR;
122 #else
123 error = -ENXIO;
124 #endif
125 goto out1;
126 }
127
128 /* Mark the lock as owned by file. */
129 master->lock.file_priv = file;
130 master->lock.lock_time = jiffies; /* XXX Unused? */
131
132 /* Block signals while the lock is held. */
133 error = drm_lock_block_signals(dev, lock_request, file);
134 if (error)
135 goto fail2;
136
137 /* Enter the DMA quiescent state if requested and available. */
138 /* XXX Drop the spin lock first... */
139 if (ISSET(lock_request->flags, _DRM_LOCK_QUIESCENT) &&
140 (dev->driver->dma_quiescent != NULL)) {
141 error = (*dev->driver->dma_quiescent)(dev);
142 if (error)
143 goto fail3;
144 }
145
146 /* Success! */
147 error = 0;
148 goto out1;
149
150 fail3: drm_lock_unblock_signals(dev, lock_request, file);
151 fail2: drm_lock_release(&master->lock, lock_request->context);
152 master->lock.file_priv = NULL;
153 out1: spin_unlock(&master->lock.spinlock);
154 out0: mutex_lock(&drm_global_mutex);
155 return error;
156 }
157
158 /*
159 * Try to relinquish a lock that userland thinks it holds, per
160 * userland's request. Fail if it doesn't actually hold the lock.
161 */
162 int
163 drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file)
164 {
165 struct drm_lock *lock_request = data;
166 struct drm_master *master = file->master;
167 int error;
168
169 /* Sanitize the drm global mutex bollocks until we get rid of it. */
170 KASSERT(mutex_is_locked(&drm_global_mutex));
171 mutex_unlock(&drm_global_mutex);
172
173 /* Refuse to unlock on behalf of the kernel. */
174 if (lock_request->context == DRM_KERNEL_CONTEXT) {
175 error = -EINVAL;
176 goto out0;
177 }
178
179 /* Lock the internal spin lock to make changes. */
180 spin_lock(&master->lock.spinlock);
181
182 /* Make sure it's actually locked. */
183 if (!_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock)) {
184 error = -EINVAL; /* XXX Right error? */
185 goto out1;
186 }
187
188 /* Make sure it's locked in the right context. */
189 if (_DRM_LOCKING_CONTEXT(master->lock.hw_lock->lock) !=
190 lock_request->context) {
191 error = -EACCES; /* XXX Right error? */
192 goto out1;
193 }
194
195 /* Make sure it's locked by us. */
196 if (master->lock.file_priv != file) {
197 error = -EACCES; /* XXX Right error? */
198 goto out1;
199 }
200
201 /* Actually release the lock. */
202 drm_lock_release(&master->lock, lock_request->context);
203
204 /* Clear the lock's file pointer, just in case. */
205 master->lock.file_priv = NULL;
206
207 /* Unblock the signals we blocked in drm_lock. */
208 drm_lock_unblock_signals(dev, lock_request, file);
209
210 /* Success! */
211 error = 0;
212
213 out1: spin_unlock(&master->lock.spinlock);
214 out0: mutex_lock(&drm_global_mutex);
215 return error;
216 }
217
218 /*
219 * Drop the lock.
220 *
221 * Return value is an artefact of Linux. Caller must guarantee
222 * preconditions; failure is fatal.
223 *
224 * XXX Should we also unblock signals like drm_unlock does?
225 */
226 int
227 drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context)
228 {
229
230 spin_lock(&lock_data->spinlock);
231 drm_lock_release(lock_data, context);
232 spin_unlock(&lock_data->spinlock);
233
234 return 0;
235 }
236
237 /*
238 * Try to acquire the lock. Whether or not we acquire it, guarantee
239 * that whoever next releases it relinquishes it to the kernel, not to
240 * anyone else.
241 */
242 void
243 drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
244 {
245
246 spin_lock(&lock_data->spinlock);
247 KASSERT(!lock_data->idle_has_lock);
248 KASSERT(lock_data->kernel_waiters < UINT32_MAX);
249 lock_data->kernel_waiters++;
250 /* Try to acquire the lock. */
251 if (drm_lock_acquire(lock_data, DRM_KERNEL_CONTEXT)) {
252 lock_data->idle_has_lock = 1;
253 } else {
254 /*
255 * Recording that there are kernel waiters will prevent
256 * userland from acquiring the lock again when it is
257 * next released.
258 */
259 }
260 spin_unlock(&lock_data->spinlock);
261 }
262
263 /*
264 * Release whatever drm_idlelock_take managed to acquire.
265 */
266 void
267 drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
268 {
269
270 spin_lock(&lock_data->spinlock);
271 KASSERT(0 < lock_data->kernel_waiters);
272 if (--lock_data->kernel_waiters == 0) {
273 if (lock_data->idle_has_lock) {
274 /* We did acquire it. Release it. */
275 drm_lock_release(lock_data, DRM_KERNEL_CONTEXT);
276 }
277 }
278 spin_unlock(&lock_data->spinlock);
279 }
280
281 /*
282 * Does this file hold this drm device's hardware lock?
283 *
284 * Used to decide whether to release the lock when the file is being
285 * closed.
286 *
287 * XXX I don't think this answers correctly in the case that the
288 * userland has taken the lock and it is uncontended. But I don't
289 * think we can know what the correct answer is in that case.
290 */
291 int
292 drm_legacy_i_have_hw_lock(struct drm_device *dev, struct drm_file *file)
293 {
294 struct drm_lock_data *const lock_data = &file->master->lock;
295 int answer = 0;
296
297 /* If this file has never locked anything, then no. */
298 if (file->lock_count == 0)
299 return 0;
300
301 spin_lock(&lock_data->spinlock);
302
303 /* If there is no lock, then this file doesn't hold it. */
304 if (lock_data->hw_lock == NULL)
305 goto out;
306
307 /* If this lock is not held, then this file doesn't hold it. */
308 if (!_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock))
309 goto out;
310
311 /*
312 * Otherwise, it boils down to whether this file is the owner
313 * or someone else.
314 *
315 * XXX This is not reliable! Userland doesn't update this when
316 * it takes the lock...
317 */
318 answer = (file == lock_data->file_priv);
319
320 out: spin_unlock(&lock_data->spinlock);
321 return answer;
322 }
323
324 /*
325 * Try to acquire the lock. Return true if successful, false if not.
326 *
327 * This is hairy because it races with userland, and if userland
328 * already holds the lock, we must tell it, by marking it
329 * _DRM_LOCK_CONT (contended), that it must call ioctl(DRM_UNLOCK) to
330 * release the lock so that we can wake waiters.
331 *
332 * XXX What happens if the process is interrupted?
333 */
334 static bool
335 drm_lock_acquire(struct drm_lock_data *lock_data, int context)
336 {
337 volatile unsigned int *const lock = &lock_data->hw_lock->lock;
338 unsigned int old, new;
339
340 KASSERT(spin_is_locked(&lock_data->spinlock));
341
342 do {
343 old = *lock;
344 if (!_DRM_LOCK_IS_HELD(old)) {
345 new = (context | _DRM_LOCK_HELD);
346 if ((0 < lock_data->user_waiters) ||
347 (0 < lock_data->kernel_waiters))
348 new |= _DRM_LOCK_CONT;
349 } else if (_DRM_LOCKING_CONTEXT(old) != context) {
350 new = (old | _DRM_LOCK_CONT);
351 } else {
352 DRM_ERROR("%d already holds heavyweight lock\n",
353 context);
354 return false;
355 }
356 } while (atomic_cas_uint(lock, old, new) != old);
357
358 return !_DRM_LOCK_IS_HELD(old);
359 }
360
361 /*
362 * Release the lock held in the given context. Wake any waiters,
363 * preferring kernel waiters over userland waiters.
364 *
365 * Lock's spinlock must be held and lock must be held in this context.
366 */
367 static void
368 drm_lock_release(struct drm_lock_data *lock_data, int context)
369 {
370
371 (void)context; /* ignore */
372 KASSERT(spin_is_locked(&lock_data->spinlock));
373 KASSERT(_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock));
374 KASSERT(_DRM_LOCKING_CONTEXT(lock_data->hw_lock->lock) == context);
375
376 lock_data->hw_lock->lock = 0;
377 DRM_SPIN_WAKEUP_ONE(&lock_data->lock_queue, &lock_data->spinlock);
378 }
379
380 /*
381 * Block signals for a process that holds a drm lock.
382 *
383 * XXX It's not processes but files that hold drm locks, so blocking
384 * signals in a process seems wrong, and it's not clear that blocking
385 * signals automatically is remotely sensible anyway.
386 */
387 static int
388 drm_lock_block_signals(struct drm_device *dev __unused,
389 struct drm_lock *lock_request __unused, struct drm_file *file __unused)
390 {
391 return 0;
392 }
393
394 /*
395 * Unblock the signals that drm_lock_block_signals blocked.
396 */
397 static void
398 drm_lock_unblock_signals(struct drm_device *dev __unused,
399 struct drm_lock *lock_request __unused, struct drm_file *file __unused)
400 {
401 }
402