drm_lock.c revision 1.9 1 1.9 riastrad /* $NetBSD: drm_lock.c,v 1.9 2021/12/19 00:28:20 riastradh Exp $ */
2 1.2 riastrad
3 1.2 riastrad /*-
4 1.2 riastrad * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.2 riastrad * All rights reserved.
6 1.2 riastrad *
7 1.2 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 riastrad * by Taylor R. Campbell.
9 1.2 riastrad *
10 1.2 riastrad * Redistribution and use in source and binary forms, with or without
11 1.2 riastrad * modification, are permitted provided that the following conditions
12 1.2 riastrad * are met:
13 1.2 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.2 riastrad * notice, this list of conditions and the following disclaimer.
15 1.2 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.2 riastrad * documentation and/or other materials provided with the distribution.
18 1.2 riastrad *
19 1.2 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 riastrad */
31 1.2 riastrad
32 1.2 riastrad /*
33 1.2 riastrad * DRM lock. Each drm master has a heavy-weight lock to provide mutual
34 1.2 riastrad * exclusion for access to the hardware. The lock can be held by the
35 1.2 riastrad * kernel or by a drm file; the kernel takes access only for unusual
36 1.2 riastrad * purposes, with drm_idlelock_take, mainly for idling the GPU when
37 1.2 riastrad * closing down.
38 1.2 riastrad *
39 1.2 riastrad * The physical memory storing the lock state is shared between
40 1.2 riastrad * userland and kernel: the pointer at dev->master->lock->hw_lock is
41 1.2 riastrad * mapped into both userland and kernel address spaces. This way,
42 1.2 riastrad * userland can try to take the hardware lock without a system call,
43 1.2 riastrad * although if it fails then it will use the DRM_LOCK ioctl to block
44 1.2 riastrad * atomically until the lock is available. All this means that the
45 1.2 riastrad * kernel must use atomic_ops to manage the lock state.
46 1.2 riastrad */
47 1.2 riastrad
48 1.2 riastrad #include <sys/cdefs.h>
49 1.9 riastrad __KERNEL_RCSID(0, "$NetBSD: drm_lock.c,v 1.9 2021/12/19 00:28:20 riastradh Exp $");
50 1.2 riastrad
51 1.2 riastrad #include <sys/types.h>
52 1.2 riastrad #include <sys/errno.h>
53 1.2 riastrad #include <sys/systm.h>
54 1.2 riastrad
55 1.2 riastrad #include <drm/drmP.h>
56 1.9 riastrad
57 1.9 riastrad #include "../dist/drm/drm_internal.h"
58 1.5 riastrad #include "../dist/drm/drm_legacy.h"
59 1.2 riastrad
60 1.2 riastrad static bool drm_lock_acquire(struct drm_lock_data *, int);
61 1.2 riastrad static void drm_lock_release(struct drm_lock_data *, int);
62 1.2 riastrad static int drm_lock_block_signals(struct drm_device *, struct drm_lock *,
63 1.2 riastrad struct drm_file *);
64 1.2 riastrad static void drm_lock_unblock_signals(struct drm_device *,
65 1.2 riastrad struct drm_lock *, struct drm_file *);
66 1.2 riastrad
67 1.2 riastrad /*
68 1.2 riastrad * Take the lock on behalf of userland.
69 1.2 riastrad */
70 1.2 riastrad int
71 1.6 riastrad drm_legacy_lock(struct drm_device *dev, void *data, struct drm_file *file)
72 1.2 riastrad {
73 1.2 riastrad struct drm_lock *lock_request = data;
74 1.2 riastrad struct drm_master *master = file->master;
75 1.2 riastrad int error;
76 1.2 riastrad
77 1.2 riastrad /* Sanitize the drm global mutex bollocks until we get rid of it. */
78 1.2 riastrad KASSERT(mutex_is_locked(&drm_global_mutex));
79 1.2 riastrad mutex_unlock(&drm_global_mutex);
80 1.2 riastrad
81 1.2 riastrad /* Refuse to lock on behalf of the kernel. */
82 1.2 riastrad if (lock_request->context == DRM_KERNEL_CONTEXT) {
83 1.2 riastrad error = -EINVAL;
84 1.2 riastrad goto out0;
85 1.2 riastrad }
86 1.2 riastrad
87 1.2 riastrad /* Refuse to set the magic bits. */
88 1.2 riastrad if (lock_request->context !=
89 1.2 riastrad _DRM_LOCKING_CONTEXT(lock_request->context)) {
90 1.2 riastrad error = -EINVAL;
91 1.2 riastrad goto out0;
92 1.2 riastrad }
93 1.2 riastrad
94 1.2 riastrad /* Count it in the file and device statistics (XXX why here?). */
95 1.2 riastrad file->lock_count++;
96 1.2 riastrad
97 1.2 riastrad /* Wait until the hardware lock is gone or we can acquire it. */
98 1.2 riastrad spin_lock(&master->lock.spinlock);
99 1.2 riastrad
100 1.2 riastrad if (master->lock.user_waiters == UINT32_MAX) {
101 1.2 riastrad error = -EBUSY;
102 1.2 riastrad goto out1;
103 1.2 riastrad }
104 1.2 riastrad
105 1.2 riastrad master->lock.user_waiters++;
106 1.2 riastrad DRM_SPIN_WAIT_UNTIL(error, &master->lock.lock_queue,
107 1.2 riastrad &master->lock.spinlock,
108 1.2 riastrad ((master->lock.hw_lock == NULL) ||
109 1.2 riastrad drm_lock_acquire(&master->lock, lock_request->context)));
110 1.2 riastrad KASSERT(0 < master->lock.user_waiters);
111 1.2 riastrad master->lock.user_waiters--;
112 1.2 riastrad if (error)
113 1.2 riastrad goto out1;
114 1.2 riastrad
115 1.2 riastrad /* If the lock is gone, give up. */
116 1.2 riastrad if (master->lock.hw_lock == NULL) {
117 1.2 riastrad #if 0 /* XXX Linux sends SIGTERM, but why? */
118 1.8 ad mutex_enter(&proc_lock);
119 1.2 riastrad psignal(curproc, SIGTERM);
120 1.8 ad mutex_exit(&proc_lock);
121 1.2 riastrad error = -EINTR;
122 1.2 riastrad #else
123 1.2 riastrad error = -ENXIO;
124 1.2 riastrad #endif
125 1.2 riastrad goto out1;
126 1.2 riastrad }
127 1.2 riastrad
128 1.2 riastrad /* Mark the lock as owned by file. */
129 1.2 riastrad master->lock.file_priv = file;
130 1.2 riastrad master->lock.lock_time = jiffies; /* XXX Unused? */
131 1.2 riastrad
132 1.2 riastrad /* Block signals while the lock is held. */
133 1.2 riastrad error = drm_lock_block_signals(dev, lock_request, file);
134 1.2 riastrad if (error)
135 1.2 riastrad goto fail2;
136 1.2 riastrad
137 1.2 riastrad /* Enter the DMA quiescent state if requested and available. */
138 1.2 riastrad /* XXX Drop the spin lock first... */
139 1.2 riastrad if (ISSET(lock_request->flags, _DRM_LOCK_QUIESCENT) &&
140 1.2 riastrad (dev->driver->dma_quiescent != NULL)) {
141 1.2 riastrad error = (*dev->driver->dma_quiescent)(dev);
142 1.2 riastrad if (error)
143 1.2 riastrad goto fail3;
144 1.2 riastrad }
145 1.2 riastrad
146 1.2 riastrad /* Success! */
147 1.2 riastrad error = 0;
148 1.2 riastrad goto out1;
149 1.2 riastrad
150 1.2 riastrad fail3: drm_lock_unblock_signals(dev, lock_request, file);
151 1.2 riastrad fail2: drm_lock_release(&master->lock, lock_request->context);
152 1.2 riastrad master->lock.file_priv = NULL;
153 1.2 riastrad out1: spin_unlock(&master->lock.spinlock);
154 1.2 riastrad out0: mutex_lock(&drm_global_mutex);
155 1.2 riastrad return error;
156 1.2 riastrad }
157 1.2 riastrad
158 1.2 riastrad /*
159 1.2 riastrad * Try to relinquish a lock that userland thinks it holds, per
160 1.2 riastrad * userland's request. Fail if it doesn't actually hold the lock.
161 1.2 riastrad */
162 1.2 riastrad int
163 1.6 riastrad drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file)
164 1.2 riastrad {
165 1.2 riastrad struct drm_lock *lock_request = data;
166 1.2 riastrad struct drm_master *master = file->master;
167 1.2 riastrad int error;
168 1.2 riastrad
169 1.2 riastrad /* Sanitize the drm global mutex bollocks until we get rid of it. */
170 1.2 riastrad KASSERT(mutex_is_locked(&drm_global_mutex));
171 1.2 riastrad mutex_unlock(&drm_global_mutex);
172 1.2 riastrad
173 1.2 riastrad /* Refuse to unlock on behalf of the kernel. */
174 1.2 riastrad if (lock_request->context == DRM_KERNEL_CONTEXT) {
175 1.2 riastrad error = -EINVAL;
176 1.2 riastrad goto out0;
177 1.2 riastrad }
178 1.2 riastrad
179 1.2 riastrad /* Lock the internal spin lock to make changes. */
180 1.2 riastrad spin_lock(&master->lock.spinlock);
181 1.2 riastrad
182 1.2 riastrad /* Make sure it's actually locked. */
183 1.2 riastrad if (!_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock)) {
184 1.2 riastrad error = -EINVAL; /* XXX Right error? */
185 1.2 riastrad goto out1;
186 1.2 riastrad }
187 1.2 riastrad
188 1.2 riastrad /* Make sure it's locked in the right context. */
189 1.2 riastrad if (_DRM_LOCKING_CONTEXT(master->lock.hw_lock->lock) !=
190 1.2 riastrad lock_request->context) {
191 1.2 riastrad error = -EACCES; /* XXX Right error? */
192 1.2 riastrad goto out1;
193 1.2 riastrad }
194 1.2 riastrad
195 1.2 riastrad /* Make sure it's locked by us. */
196 1.2 riastrad if (master->lock.file_priv != file) {
197 1.2 riastrad error = -EACCES; /* XXX Right error? */
198 1.2 riastrad goto out1;
199 1.2 riastrad }
200 1.2 riastrad
201 1.2 riastrad /* Actually release the lock. */
202 1.2 riastrad drm_lock_release(&master->lock, lock_request->context);
203 1.2 riastrad
204 1.2 riastrad /* Clear the lock's file pointer, just in case. */
205 1.2 riastrad master->lock.file_priv = NULL;
206 1.2 riastrad
207 1.2 riastrad /* Unblock the signals we blocked in drm_lock. */
208 1.2 riastrad drm_lock_unblock_signals(dev, lock_request, file);
209 1.2 riastrad
210 1.2 riastrad /* Success! */
211 1.2 riastrad error = 0;
212 1.2 riastrad
213 1.2 riastrad out1: spin_unlock(&master->lock.spinlock);
214 1.2 riastrad out0: mutex_lock(&drm_global_mutex);
215 1.2 riastrad return error;
216 1.2 riastrad }
217 1.2 riastrad
218 1.2 riastrad /*
219 1.2 riastrad * Drop the lock.
220 1.2 riastrad *
221 1.2 riastrad * Return value is an artefact of Linux. Caller must guarantee
222 1.2 riastrad * preconditions; failure is fatal.
223 1.2 riastrad *
224 1.2 riastrad * XXX Should we also unblock signals like drm_unlock does?
225 1.2 riastrad */
226 1.2 riastrad int
227 1.6 riastrad drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context)
228 1.2 riastrad {
229 1.2 riastrad
230 1.2 riastrad spin_lock(&lock_data->spinlock);
231 1.2 riastrad drm_lock_release(lock_data, context);
232 1.2 riastrad spin_unlock(&lock_data->spinlock);
233 1.2 riastrad
234 1.2 riastrad return 0;
235 1.2 riastrad }
236 1.2 riastrad
237 1.2 riastrad /*
238 1.4 riastrad * Try to acquire the lock. Whether or not we acquire it, guarantee
239 1.4 riastrad * that whoever next releases it relinquishes it to the kernel, not to
240 1.4 riastrad * anyone else.
241 1.2 riastrad */
242 1.2 riastrad void
243 1.6 riastrad drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
244 1.2 riastrad {
245 1.4 riastrad
246 1.4 riastrad spin_lock(&lock_data->spinlock);
247 1.4 riastrad KASSERT(!lock_data->idle_has_lock);
248 1.4 riastrad KASSERT(lock_data->kernel_waiters < UINT32_MAX);
249 1.4 riastrad lock_data->kernel_waiters++;
250 1.4 riastrad /* Try to acquire the lock. */
251 1.4 riastrad if (drm_lock_acquire(lock_data, DRM_KERNEL_CONTEXT)) {
252 1.4 riastrad lock_data->idle_has_lock = 1;
253 1.4 riastrad } else {
254 1.4 riastrad /*
255 1.4 riastrad * Recording that there are kernel waiters will prevent
256 1.4 riastrad * userland from acquiring the lock again when it is
257 1.4 riastrad * next released.
258 1.4 riastrad */
259 1.4 riastrad }
260 1.4 riastrad spin_unlock(&lock_data->spinlock);
261 1.2 riastrad }
262 1.2 riastrad
263 1.2 riastrad /*
264 1.4 riastrad * Release whatever drm_idlelock_take managed to acquire.
265 1.2 riastrad */
266 1.2 riastrad void
267 1.6 riastrad drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
268 1.2 riastrad {
269 1.4 riastrad
270 1.4 riastrad spin_lock(&lock_data->spinlock);
271 1.4 riastrad KASSERT(0 < lock_data->kernel_waiters);
272 1.4 riastrad if (--lock_data->kernel_waiters == 0) {
273 1.4 riastrad if (lock_data->idle_has_lock) {
274 1.4 riastrad /* We did acquire it. Release it. */
275 1.4 riastrad drm_lock_release(lock_data, DRM_KERNEL_CONTEXT);
276 1.4 riastrad }
277 1.4 riastrad }
278 1.4 riastrad spin_unlock(&lock_data->spinlock);
279 1.2 riastrad }
280 1.2 riastrad
281 1.2 riastrad /*
282 1.2 riastrad * Does this file hold this drm device's hardware lock?
283 1.2 riastrad *
284 1.2 riastrad * Used to decide whether to release the lock when the file is being
285 1.2 riastrad * closed.
286 1.2 riastrad *
287 1.2 riastrad * XXX I don't think this answers correctly in the case that the
288 1.2 riastrad * userland has taken the lock and it is uncontended. But I don't
289 1.2 riastrad * think we can know what the correct answer is in that case.
290 1.2 riastrad */
291 1.2 riastrad int
292 1.6 riastrad drm_legacy_i_have_hw_lock(struct drm_device *dev, struct drm_file *file)
293 1.2 riastrad {
294 1.2 riastrad struct drm_lock_data *const lock_data = &file->master->lock;
295 1.2 riastrad int answer = 0;
296 1.2 riastrad
297 1.2 riastrad /* If this file has never locked anything, then no. */
298 1.2 riastrad if (file->lock_count == 0)
299 1.2 riastrad return 0;
300 1.2 riastrad
301 1.2 riastrad spin_lock(&lock_data->spinlock);
302 1.2 riastrad
303 1.2 riastrad /* If there is no lock, then this file doesn't hold it. */
304 1.2 riastrad if (lock_data->hw_lock == NULL)
305 1.2 riastrad goto out;
306 1.2 riastrad
307 1.2 riastrad /* If this lock is not held, then this file doesn't hold it. */
308 1.2 riastrad if (!_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock))
309 1.2 riastrad goto out;
310 1.2 riastrad
311 1.2 riastrad /*
312 1.2 riastrad * Otherwise, it boils down to whether this file is the owner
313 1.2 riastrad * or someone else.
314 1.2 riastrad *
315 1.2 riastrad * XXX This is not reliable! Userland doesn't update this when
316 1.2 riastrad * it takes the lock...
317 1.2 riastrad */
318 1.2 riastrad answer = (file == lock_data->file_priv);
319 1.2 riastrad
320 1.2 riastrad out: spin_unlock(&lock_data->spinlock);
321 1.2 riastrad return answer;
322 1.2 riastrad }
323 1.2 riastrad
324 1.2 riastrad /*
325 1.2 riastrad * Try to acquire the lock. Return true if successful, false if not.
326 1.2 riastrad *
327 1.2 riastrad * This is hairy because it races with userland, and if userland
328 1.2 riastrad * already holds the lock, we must tell it, by marking it
329 1.2 riastrad * _DRM_LOCK_CONT (contended), that it must call ioctl(DRM_UNLOCK) to
330 1.2 riastrad * release the lock so that we can wake waiters.
331 1.2 riastrad *
332 1.2 riastrad * XXX What happens if the process is interrupted?
333 1.2 riastrad */
334 1.2 riastrad static bool
335 1.2 riastrad drm_lock_acquire(struct drm_lock_data *lock_data, int context)
336 1.2 riastrad {
337 1.2 riastrad volatile unsigned int *const lock = &lock_data->hw_lock->lock;
338 1.2 riastrad unsigned int old, new;
339 1.2 riastrad
340 1.2 riastrad KASSERT(spin_is_locked(&lock_data->spinlock));
341 1.2 riastrad
342 1.2 riastrad do {
343 1.2 riastrad old = *lock;
344 1.2 riastrad if (!_DRM_LOCK_IS_HELD(old)) {
345 1.2 riastrad new = (context | _DRM_LOCK_HELD);
346 1.2 riastrad if ((0 < lock_data->user_waiters) ||
347 1.2 riastrad (0 < lock_data->kernel_waiters))
348 1.2 riastrad new |= _DRM_LOCK_CONT;
349 1.2 riastrad } else if (_DRM_LOCKING_CONTEXT(old) != context) {
350 1.2 riastrad new = (old | _DRM_LOCK_CONT);
351 1.2 riastrad } else {
352 1.2 riastrad DRM_ERROR("%d already holds heavyweight lock\n",
353 1.2 riastrad context);
354 1.2 riastrad return false;
355 1.2 riastrad }
356 1.2 riastrad } while (atomic_cas_uint(lock, old, new) != old);
357 1.2 riastrad
358 1.2 riastrad return !_DRM_LOCK_IS_HELD(old);
359 1.2 riastrad }
360 1.2 riastrad
361 1.2 riastrad /*
362 1.2 riastrad * Release the lock held in the given context. Wake any waiters,
363 1.2 riastrad * preferring kernel waiters over userland waiters.
364 1.2 riastrad *
365 1.2 riastrad * Lock's spinlock must be held and lock must be held in this context.
366 1.2 riastrad */
367 1.2 riastrad static void
368 1.2 riastrad drm_lock_release(struct drm_lock_data *lock_data, int context)
369 1.2 riastrad {
370 1.2 riastrad
371 1.2 riastrad (void)context; /* ignore */
372 1.2 riastrad KASSERT(spin_is_locked(&lock_data->spinlock));
373 1.2 riastrad KASSERT(_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock));
374 1.2 riastrad KASSERT(_DRM_LOCKING_CONTEXT(lock_data->hw_lock->lock) == context);
375 1.2 riastrad
376 1.2 riastrad lock_data->hw_lock->lock = 0;
377 1.2 riastrad DRM_SPIN_WAKEUP_ONE(&lock_data->lock_queue, &lock_data->spinlock);
378 1.2 riastrad }
379 1.2 riastrad
380 1.2 riastrad /*
381 1.2 riastrad * Block signals for a process that holds a drm lock.
382 1.2 riastrad *
383 1.2 riastrad * XXX It's not processes but files that hold drm locks, so blocking
384 1.2 riastrad * signals in a process seems wrong, and it's not clear that blocking
385 1.2 riastrad * signals automatically is remotely sensible anyway.
386 1.2 riastrad */
387 1.2 riastrad static int
388 1.2 riastrad drm_lock_block_signals(struct drm_device *dev __unused,
389 1.2 riastrad struct drm_lock *lock_request __unused, struct drm_file *file __unused)
390 1.2 riastrad {
391 1.2 riastrad return 0;
392 1.2 riastrad }
393 1.2 riastrad
394 1.2 riastrad /*
395 1.2 riastrad * Unblock the signals that drm_lock_block_signals blocked.
396 1.2 riastrad */
397 1.2 riastrad static void
398 1.2 riastrad drm_lock_unblock_signals(struct drm_device *dev __unused,
399 1.2 riastrad struct drm_lock *lock_request __unused, struct drm_file *file __unused)
400 1.2 riastrad {
401 1.2 riastrad }
402