drm_lock.c revision 1.8 1 /* $NetBSD: drm_lock.c,v 1.8 2020/05/23 23:42:43 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * DRM lock. Each drm master has a heavy-weight lock to provide mutual
34 * exclusion for access to the hardware. The lock can be held by the
35 * kernel or by a drm file; the kernel takes access only for unusual
36 * purposes, with drm_idlelock_take, mainly for idling the GPU when
37 * closing down.
38 *
39 * The physical memory storing the lock state is shared between
40 * userland and kernel: the pointer at dev->master->lock->hw_lock is
41 * mapped into both userland and kernel address spaces. This way,
42 * userland can try to take the hardware lock without a system call,
43 * although if it fails then it will use the DRM_LOCK ioctl to block
44 * atomically until the lock is available. All this means that the
45 * kernel must use atomic_ops to manage the lock state.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: drm_lock.c,v 1.8 2020/05/23 23:42:43 ad Exp $");
50
51 #include <sys/types.h>
52 #include <sys/errno.h>
53 #include <sys/systm.h>
54
55 #include <drm/drmP.h>
56 #include <drm/drm_internal.h>
57 #include "../dist/drm/drm_legacy.h"
58
59 static bool drm_lock_acquire(struct drm_lock_data *, int);
60 static void drm_lock_release(struct drm_lock_data *, int);
61 static int drm_lock_block_signals(struct drm_device *, struct drm_lock *,
62 struct drm_file *);
63 static void drm_lock_unblock_signals(struct drm_device *,
64 struct drm_lock *, struct drm_file *);
65
66 /*
67 * Take the lock on behalf of userland.
68 */
69 int
70 drm_legacy_lock(struct drm_device *dev, void *data, struct drm_file *file)
71 {
72 struct drm_lock *lock_request = data;
73 struct drm_master *master = file->master;
74 int error;
75
76 /* Sanitize the drm global mutex bollocks until we get rid of it. */
77 KASSERT(mutex_is_locked(&drm_global_mutex));
78 mutex_unlock(&drm_global_mutex);
79
80 /* Refuse to lock on behalf of the kernel. */
81 if (lock_request->context == DRM_KERNEL_CONTEXT) {
82 error = -EINVAL;
83 goto out0;
84 }
85
86 /* Refuse to set the magic bits. */
87 if (lock_request->context !=
88 _DRM_LOCKING_CONTEXT(lock_request->context)) {
89 error = -EINVAL;
90 goto out0;
91 }
92
93 /* Count it in the file and device statistics (XXX why here?). */
94 file->lock_count++;
95
96 /* Wait until the hardware lock is gone or we can acquire it. */
97 spin_lock(&master->lock.spinlock);
98
99 if (master->lock.user_waiters == UINT32_MAX) {
100 error = -EBUSY;
101 goto out1;
102 }
103
104 master->lock.user_waiters++;
105 DRM_SPIN_WAIT_UNTIL(error, &master->lock.lock_queue,
106 &master->lock.spinlock,
107 ((master->lock.hw_lock == NULL) ||
108 drm_lock_acquire(&master->lock, lock_request->context)));
109 KASSERT(0 < master->lock.user_waiters);
110 master->lock.user_waiters--;
111 if (error)
112 goto out1;
113
114 /* If the lock is gone, give up. */
115 if (master->lock.hw_lock == NULL) {
116 #if 0 /* XXX Linux sends SIGTERM, but why? */
117 mutex_enter(&proc_lock);
118 psignal(curproc, SIGTERM);
119 mutex_exit(&proc_lock);
120 error = -EINTR;
121 #else
122 error = -ENXIO;
123 #endif
124 goto out1;
125 }
126
127 /* Mark the lock as owned by file. */
128 master->lock.file_priv = file;
129 master->lock.lock_time = jiffies; /* XXX Unused? */
130
131 /* Block signals while the lock is held. */
132 error = drm_lock_block_signals(dev, lock_request, file);
133 if (error)
134 goto fail2;
135
136 /* Enter the DMA quiescent state if requested and available. */
137 /* XXX Drop the spin lock first... */
138 if (ISSET(lock_request->flags, _DRM_LOCK_QUIESCENT) &&
139 (dev->driver->dma_quiescent != NULL)) {
140 error = (*dev->driver->dma_quiescent)(dev);
141 if (error)
142 goto fail3;
143 }
144
145 /* Success! */
146 error = 0;
147 goto out1;
148
149 fail3: drm_lock_unblock_signals(dev, lock_request, file);
150 fail2: drm_lock_release(&master->lock, lock_request->context);
151 master->lock.file_priv = NULL;
152 out1: spin_unlock(&master->lock.spinlock);
153 out0: mutex_lock(&drm_global_mutex);
154 return error;
155 }
156
157 /*
158 * Try to relinquish a lock that userland thinks it holds, per
159 * userland's request. Fail if it doesn't actually hold the lock.
160 */
161 int
162 drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file)
163 {
164 struct drm_lock *lock_request = data;
165 struct drm_master *master = file->master;
166 int error;
167
168 /* Sanitize the drm global mutex bollocks until we get rid of it. */
169 KASSERT(mutex_is_locked(&drm_global_mutex));
170 mutex_unlock(&drm_global_mutex);
171
172 /* Refuse to unlock on behalf of the kernel. */
173 if (lock_request->context == DRM_KERNEL_CONTEXT) {
174 error = -EINVAL;
175 goto out0;
176 }
177
178 /* Lock the internal spin lock to make changes. */
179 spin_lock(&master->lock.spinlock);
180
181 /* Make sure it's actually locked. */
182 if (!_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock)) {
183 error = -EINVAL; /* XXX Right error? */
184 goto out1;
185 }
186
187 /* Make sure it's locked in the right context. */
188 if (_DRM_LOCKING_CONTEXT(master->lock.hw_lock->lock) !=
189 lock_request->context) {
190 error = -EACCES; /* XXX Right error? */
191 goto out1;
192 }
193
194 /* Make sure it's locked by us. */
195 if (master->lock.file_priv != file) {
196 error = -EACCES; /* XXX Right error? */
197 goto out1;
198 }
199
200 /* Actually release the lock. */
201 drm_lock_release(&master->lock, lock_request->context);
202
203 /* Clear the lock's file pointer, just in case. */
204 master->lock.file_priv = NULL;
205
206 /* Unblock the signals we blocked in drm_lock. */
207 drm_lock_unblock_signals(dev, lock_request, file);
208
209 /* Success! */
210 error = 0;
211
212 out1: spin_unlock(&master->lock.spinlock);
213 out0: mutex_lock(&drm_global_mutex);
214 return error;
215 }
216
217 /*
218 * Drop the lock.
219 *
220 * Return value is an artefact of Linux. Caller must guarantee
221 * preconditions; failure is fatal.
222 *
223 * XXX Should we also unblock signals like drm_unlock does?
224 */
225 int
226 drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context)
227 {
228
229 spin_lock(&lock_data->spinlock);
230 drm_lock_release(lock_data, context);
231 spin_unlock(&lock_data->spinlock);
232
233 return 0;
234 }
235
236 /*
237 * Try to acquire the lock. Whether or not we acquire it, guarantee
238 * that whoever next releases it relinquishes it to the kernel, not to
239 * anyone else.
240 */
241 void
242 drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
243 {
244
245 spin_lock(&lock_data->spinlock);
246 KASSERT(!lock_data->idle_has_lock);
247 KASSERT(lock_data->kernel_waiters < UINT32_MAX);
248 lock_data->kernel_waiters++;
249 /* Try to acquire the lock. */
250 if (drm_lock_acquire(lock_data, DRM_KERNEL_CONTEXT)) {
251 lock_data->idle_has_lock = 1;
252 } else {
253 /*
254 * Recording that there are kernel waiters will prevent
255 * userland from acquiring the lock again when it is
256 * next released.
257 */
258 }
259 spin_unlock(&lock_data->spinlock);
260 }
261
262 /*
263 * Release whatever drm_idlelock_take managed to acquire.
264 */
265 void
266 drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
267 {
268
269 spin_lock(&lock_data->spinlock);
270 KASSERT(0 < lock_data->kernel_waiters);
271 if (--lock_data->kernel_waiters == 0) {
272 if (lock_data->idle_has_lock) {
273 /* We did acquire it. Release it. */
274 drm_lock_release(lock_data, DRM_KERNEL_CONTEXT);
275 }
276 }
277 spin_unlock(&lock_data->spinlock);
278 }
279
280 /*
281 * Does this file hold this drm device's hardware lock?
282 *
283 * Used to decide whether to release the lock when the file is being
284 * closed.
285 *
286 * XXX I don't think this answers correctly in the case that the
287 * userland has taken the lock and it is uncontended. But I don't
288 * think we can know what the correct answer is in that case.
289 */
290 int
291 drm_legacy_i_have_hw_lock(struct drm_device *dev, struct drm_file *file)
292 {
293 struct drm_lock_data *const lock_data = &file->master->lock;
294 int answer = 0;
295
296 /* If this file has never locked anything, then no. */
297 if (file->lock_count == 0)
298 return 0;
299
300 spin_lock(&lock_data->spinlock);
301
302 /* If there is no lock, then this file doesn't hold it. */
303 if (lock_data->hw_lock == NULL)
304 goto out;
305
306 /* If this lock is not held, then this file doesn't hold it. */
307 if (!_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock))
308 goto out;
309
310 /*
311 * Otherwise, it boils down to whether this file is the owner
312 * or someone else.
313 *
314 * XXX This is not reliable! Userland doesn't update this when
315 * it takes the lock...
316 */
317 answer = (file == lock_data->file_priv);
318
319 out: spin_unlock(&lock_data->spinlock);
320 return answer;
321 }
322
323 /*
324 * Try to acquire the lock. Return true if successful, false if not.
325 *
326 * This is hairy because it races with userland, and if userland
327 * already holds the lock, we must tell it, by marking it
328 * _DRM_LOCK_CONT (contended), that it must call ioctl(DRM_UNLOCK) to
329 * release the lock so that we can wake waiters.
330 *
331 * XXX What happens if the process is interrupted?
332 */
333 static bool
334 drm_lock_acquire(struct drm_lock_data *lock_data, int context)
335 {
336 volatile unsigned int *const lock = &lock_data->hw_lock->lock;
337 unsigned int old, new;
338
339 KASSERT(spin_is_locked(&lock_data->spinlock));
340
341 do {
342 old = *lock;
343 if (!_DRM_LOCK_IS_HELD(old)) {
344 new = (context | _DRM_LOCK_HELD);
345 if ((0 < lock_data->user_waiters) ||
346 (0 < lock_data->kernel_waiters))
347 new |= _DRM_LOCK_CONT;
348 } else if (_DRM_LOCKING_CONTEXT(old) != context) {
349 new = (old | _DRM_LOCK_CONT);
350 } else {
351 DRM_ERROR("%d already holds heavyweight lock\n",
352 context);
353 return false;
354 }
355 } while (atomic_cas_uint(lock, old, new) != old);
356
357 return !_DRM_LOCK_IS_HELD(old);
358 }
359
360 /*
361 * Release the lock held in the given context. Wake any waiters,
362 * preferring kernel waiters over userland waiters.
363 *
364 * Lock's spinlock must be held and lock must be held in this context.
365 */
366 static void
367 drm_lock_release(struct drm_lock_data *lock_data, int context)
368 {
369
370 (void)context; /* ignore */
371 KASSERT(spin_is_locked(&lock_data->spinlock));
372 KASSERT(_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock));
373 KASSERT(_DRM_LOCKING_CONTEXT(lock_data->hw_lock->lock) == context);
374
375 lock_data->hw_lock->lock = 0;
376 DRM_SPIN_WAKEUP_ONE(&lock_data->lock_queue, &lock_data->spinlock);
377 }
378
379 /*
380 * Block signals for a process that holds a drm lock.
381 *
382 * XXX It's not processes but files that hold drm locks, so blocking
383 * signals in a process seems wrong, and it's not clear that blocking
384 * signals automatically is remotely sensible anyway.
385 */
386 static int
387 drm_lock_block_signals(struct drm_device *dev __unused,
388 struct drm_lock *lock_request __unused, struct drm_file *file __unused)
389 {
390 return 0;
391 }
392
393 /*
394 * Unblock the signals that drm_lock_block_signals blocked.
395 */
396 static void
397 drm_lock_unblock_signals(struct drm_device *dev __unused,
398 struct drm_lock *lock_request __unused, struct drm_file *file __unused)
399 {
400 }
401