drm_lock.c revision 1.3.6.1 1 1.3.6.1 skrll /* $NetBSD: drm_lock.c,v 1.3.6.1 2016/04/22 15:44:16 skrll Exp $ */
2 1.2 riastrad
3 1.2 riastrad /*-
4 1.2 riastrad * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.2 riastrad * All rights reserved.
6 1.2 riastrad *
7 1.2 riastrad * This code is derived from software contributed to The NetBSD Foundation
8 1.2 riastrad * by Taylor R. Campbell.
9 1.2 riastrad *
10 1.2 riastrad * Redistribution and use in source and binary forms, with or without
11 1.2 riastrad * modification, are permitted provided that the following conditions
12 1.2 riastrad * are met:
13 1.2 riastrad * 1. Redistributions of source code must retain the above copyright
14 1.2 riastrad * notice, this list of conditions and the following disclaimer.
15 1.2 riastrad * 2. Redistributions in binary form must reproduce the above copyright
16 1.2 riastrad * notice, this list of conditions and the following disclaimer in the
17 1.2 riastrad * documentation and/or other materials provided with the distribution.
18 1.2 riastrad *
19 1.2 riastrad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2 riastrad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2 riastrad * POSSIBILITY OF SUCH DAMAGE.
30 1.2 riastrad */
31 1.2 riastrad
32 1.2 riastrad /*
33 1.2 riastrad * DRM lock. Each drm master has a heavy-weight lock to provide mutual
34 1.2 riastrad * exclusion for access to the hardware. The lock can be held by the
35 1.2 riastrad * kernel or by a drm file; the kernel takes access only for unusual
36 1.2 riastrad * purposes, with drm_idlelock_take, mainly for idling the GPU when
37 1.2 riastrad * closing down.
38 1.2 riastrad *
39 1.2 riastrad * The physical memory storing the lock state is shared between
40 1.2 riastrad * userland and kernel: the pointer at dev->master->lock->hw_lock is
41 1.2 riastrad * mapped into both userland and kernel address spaces. This way,
42 1.2 riastrad * userland can try to take the hardware lock without a system call,
43 1.2 riastrad * although if it fails then it will use the DRM_LOCK ioctl to block
44 1.2 riastrad * atomically until the lock is available. All this means that the
45 1.2 riastrad * kernel must use atomic_ops to manage the lock state.
46 1.2 riastrad */
47 1.2 riastrad
48 1.2 riastrad #include <sys/cdefs.h>
49 1.3.6.1 skrll __KERNEL_RCSID(0, "$NetBSD: drm_lock.c,v 1.3.6.1 2016/04/22 15:44:16 skrll Exp $");
50 1.2 riastrad
51 1.2 riastrad #include <sys/types.h>
52 1.2 riastrad #include <sys/errno.h>
53 1.2 riastrad #include <sys/systm.h>
54 1.2 riastrad
55 1.2 riastrad #include <drm/drmP.h>
56 1.2 riastrad
57 1.2 riastrad static bool drm_lock_acquire(struct drm_lock_data *, int);
58 1.2 riastrad static void drm_lock_release(struct drm_lock_data *, int);
59 1.2 riastrad static int drm_lock_block_signals(struct drm_device *, struct drm_lock *,
60 1.2 riastrad struct drm_file *);
61 1.2 riastrad static void drm_lock_unblock_signals(struct drm_device *,
62 1.2 riastrad struct drm_lock *, struct drm_file *);
63 1.2 riastrad
64 1.2 riastrad /*
65 1.2 riastrad * Take the lock on behalf of userland.
66 1.2 riastrad */
67 1.2 riastrad int
68 1.2 riastrad drm_lock(struct drm_device *dev, void *data, struct drm_file *file)
69 1.2 riastrad {
70 1.2 riastrad struct drm_lock *lock_request = data;
71 1.2 riastrad struct drm_master *master = file->master;
72 1.2 riastrad int error;
73 1.2 riastrad
74 1.2 riastrad /* Sanitize the drm global mutex bollocks until we get rid of it. */
75 1.2 riastrad KASSERT(mutex_is_locked(&drm_global_mutex));
76 1.2 riastrad mutex_unlock(&drm_global_mutex);
77 1.2 riastrad
78 1.2 riastrad /* Refuse to lock on behalf of the kernel. */
79 1.2 riastrad if (lock_request->context == DRM_KERNEL_CONTEXT) {
80 1.2 riastrad error = -EINVAL;
81 1.2 riastrad goto out0;
82 1.2 riastrad }
83 1.2 riastrad
84 1.2 riastrad /* Refuse to set the magic bits. */
85 1.2 riastrad if (lock_request->context !=
86 1.2 riastrad _DRM_LOCKING_CONTEXT(lock_request->context)) {
87 1.2 riastrad error = -EINVAL;
88 1.2 riastrad goto out0;
89 1.2 riastrad }
90 1.2 riastrad
91 1.2 riastrad /* Count it in the file and device statistics (XXX why here?). */
92 1.2 riastrad file->lock_count++;
93 1.2 riastrad
94 1.2 riastrad /* Wait until the hardware lock is gone or we can acquire it. */
95 1.2 riastrad spin_lock(&master->lock.spinlock);
96 1.2 riastrad
97 1.2 riastrad if (master->lock.user_waiters == UINT32_MAX) {
98 1.2 riastrad error = -EBUSY;
99 1.2 riastrad goto out1;
100 1.2 riastrad }
101 1.2 riastrad
102 1.2 riastrad master->lock.user_waiters++;
103 1.2 riastrad DRM_SPIN_WAIT_UNTIL(error, &master->lock.lock_queue,
104 1.2 riastrad &master->lock.spinlock,
105 1.2 riastrad ((master->lock.hw_lock == NULL) ||
106 1.2 riastrad drm_lock_acquire(&master->lock, lock_request->context)));
107 1.2 riastrad KASSERT(0 < master->lock.user_waiters);
108 1.2 riastrad master->lock.user_waiters--;
109 1.2 riastrad if (error)
110 1.2 riastrad goto out1;
111 1.2 riastrad
112 1.2 riastrad /* If the lock is gone, give up. */
113 1.2 riastrad if (master->lock.hw_lock == NULL) {
114 1.2 riastrad #if 0 /* XXX Linux sends SIGTERM, but why? */
115 1.2 riastrad mutex_enter(proc_lock);
116 1.2 riastrad psignal(curproc, SIGTERM);
117 1.2 riastrad mutex_exit(proc_lock);
118 1.2 riastrad error = -EINTR;
119 1.2 riastrad #else
120 1.2 riastrad error = -ENXIO;
121 1.2 riastrad #endif
122 1.2 riastrad goto out1;
123 1.2 riastrad }
124 1.2 riastrad
125 1.2 riastrad /* Mark the lock as owned by file. */
126 1.2 riastrad master->lock.file_priv = file;
127 1.2 riastrad master->lock.lock_time = jiffies; /* XXX Unused? */
128 1.2 riastrad
129 1.2 riastrad /* Block signals while the lock is held. */
130 1.2 riastrad error = drm_lock_block_signals(dev, lock_request, file);
131 1.2 riastrad if (error)
132 1.2 riastrad goto fail2;
133 1.2 riastrad
134 1.2 riastrad /* Enter the DMA quiescent state if requested and available. */
135 1.2 riastrad /* XXX Drop the spin lock first... */
136 1.2 riastrad if (ISSET(lock_request->flags, _DRM_LOCK_QUIESCENT) &&
137 1.2 riastrad (dev->driver->dma_quiescent != NULL)) {
138 1.2 riastrad error = (*dev->driver->dma_quiescent)(dev);
139 1.2 riastrad if (error)
140 1.2 riastrad goto fail3;
141 1.2 riastrad }
142 1.2 riastrad
143 1.2 riastrad /* Success! */
144 1.2 riastrad error = 0;
145 1.2 riastrad goto out1;
146 1.2 riastrad
147 1.2 riastrad fail3: drm_lock_unblock_signals(dev, lock_request, file);
148 1.2 riastrad fail2: drm_lock_release(&master->lock, lock_request->context);
149 1.2 riastrad master->lock.file_priv = NULL;
150 1.2 riastrad out1: spin_unlock(&master->lock.spinlock);
151 1.2 riastrad out0: mutex_lock(&drm_global_mutex);
152 1.2 riastrad return error;
153 1.2 riastrad }
154 1.2 riastrad
155 1.2 riastrad /*
156 1.2 riastrad * Try to relinquish a lock that userland thinks it holds, per
157 1.2 riastrad * userland's request. Fail if it doesn't actually hold the lock.
158 1.2 riastrad */
159 1.2 riastrad int
160 1.2 riastrad drm_unlock(struct drm_device *dev, void *data, struct drm_file *file)
161 1.2 riastrad {
162 1.2 riastrad struct drm_lock *lock_request = data;
163 1.2 riastrad struct drm_master *master = file->master;
164 1.2 riastrad int error;
165 1.2 riastrad
166 1.2 riastrad /* Sanitize the drm global mutex bollocks until we get rid of it. */
167 1.2 riastrad KASSERT(mutex_is_locked(&drm_global_mutex));
168 1.2 riastrad mutex_unlock(&drm_global_mutex);
169 1.2 riastrad
170 1.2 riastrad /* Refuse to unlock on behalf of the kernel. */
171 1.2 riastrad if (lock_request->context == DRM_KERNEL_CONTEXT) {
172 1.2 riastrad error = -EINVAL;
173 1.2 riastrad goto out0;
174 1.2 riastrad }
175 1.2 riastrad
176 1.2 riastrad /* Lock the internal spin lock to make changes. */
177 1.2 riastrad spin_lock(&master->lock.spinlock);
178 1.2 riastrad
179 1.2 riastrad /* Make sure it's actually locked. */
180 1.2 riastrad if (!_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock)) {
181 1.2 riastrad error = -EINVAL; /* XXX Right error? */
182 1.2 riastrad goto out1;
183 1.2 riastrad }
184 1.2 riastrad
185 1.2 riastrad /* Make sure it's locked in the right context. */
186 1.2 riastrad if (_DRM_LOCKING_CONTEXT(master->lock.hw_lock->lock) !=
187 1.2 riastrad lock_request->context) {
188 1.2 riastrad error = -EACCES; /* XXX Right error? */
189 1.2 riastrad goto out1;
190 1.2 riastrad }
191 1.2 riastrad
192 1.2 riastrad /* Make sure it's locked by us. */
193 1.2 riastrad if (master->lock.file_priv != file) {
194 1.2 riastrad error = -EACCES; /* XXX Right error? */
195 1.2 riastrad goto out1;
196 1.2 riastrad }
197 1.2 riastrad
198 1.2 riastrad /* Actually release the lock. */
199 1.2 riastrad drm_lock_release(&master->lock, lock_request->context);
200 1.2 riastrad
201 1.2 riastrad /* Clear the lock's file pointer, just in case. */
202 1.2 riastrad master->lock.file_priv = NULL;
203 1.2 riastrad
204 1.2 riastrad /* Unblock the signals we blocked in drm_lock. */
205 1.2 riastrad drm_lock_unblock_signals(dev, lock_request, file);
206 1.2 riastrad
207 1.2 riastrad /* Success! */
208 1.2 riastrad error = 0;
209 1.2 riastrad
210 1.2 riastrad out1: spin_unlock(&master->lock.spinlock);
211 1.2 riastrad out0: mutex_lock(&drm_global_mutex);
212 1.2 riastrad return error;
213 1.2 riastrad }
214 1.2 riastrad
215 1.2 riastrad /*
216 1.2 riastrad * Drop the lock.
217 1.2 riastrad *
218 1.2 riastrad * Return value is an artefact of Linux. Caller must guarantee
219 1.2 riastrad * preconditions; failure is fatal.
220 1.2 riastrad *
221 1.2 riastrad * XXX Should we also unblock signals like drm_unlock does?
222 1.2 riastrad */
223 1.2 riastrad int
224 1.2 riastrad drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
225 1.2 riastrad {
226 1.2 riastrad
227 1.2 riastrad spin_lock(&lock_data->spinlock);
228 1.2 riastrad drm_lock_release(lock_data, context);
229 1.2 riastrad spin_unlock(&lock_data->spinlock);
230 1.2 riastrad
231 1.2 riastrad return 0;
232 1.2 riastrad }
233 1.2 riastrad
234 1.2 riastrad /*
235 1.3.6.1 skrll * Try to acquire the lock. Whether or not we acquire it, guarantee
236 1.3.6.1 skrll * that whoever next releases it relinquishes it to the kernel, not to
237 1.3.6.1 skrll * anyone else.
238 1.2 riastrad */
239 1.2 riastrad void
240 1.3.6.1 skrll drm_idlelock_take(struct drm_lock_data *lock_data)
241 1.2 riastrad {
242 1.3.6.1 skrll
243 1.3.6.1 skrll spin_lock(&lock_data->spinlock);
244 1.3.6.1 skrll KASSERT(!lock_data->idle_has_lock);
245 1.3.6.1 skrll KASSERT(lock_data->kernel_waiters < UINT32_MAX);
246 1.3.6.1 skrll lock_data->kernel_waiters++;
247 1.3.6.1 skrll /* Try to acquire the lock. */
248 1.3.6.1 skrll if (drm_lock_acquire(lock_data, DRM_KERNEL_CONTEXT)) {
249 1.3.6.1 skrll lock_data->idle_has_lock = 1;
250 1.3.6.1 skrll } else {
251 1.3.6.1 skrll /*
252 1.3.6.1 skrll * Recording that there are kernel waiters will prevent
253 1.3.6.1 skrll * userland from acquiring the lock again when it is
254 1.3.6.1 skrll * next released.
255 1.3.6.1 skrll */
256 1.3.6.1 skrll }
257 1.3.6.1 skrll spin_unlock(&lock_data->spinlock);
258 1.2 riastrad }
259 1.2 riastrad
260 1.2 riastrad /*
261 1.3.6.1 skrll * Release whatever drm_idlelock_take managed to acquire.
262 1.2 riastrad */
263 1.2 riastrad void
264 1.3.6.1 skrll drm_idlelock_release(struct drm_lock_data *lock_data)
265 1.2 riastrad {
266 1.3.6.1 skrll
267 1.3.6.1 skrll spin_lock(&lock_data->spinlock);
268 1.3.6.1 skrll KASSERT(0 < lock_data->kernel_waiters);
269 1.3.6.1 skrll if (--lock_data->kernel_waiters == 0) {
270 1.3.6.1 skrll if (lock_data->idle_has_lock) {
271 1.3.6.1 skrll /* We did acquire it. Release it. */
272 1.3.6.1 skrll drm_lock_release(lock_data, DRM_KERNEL_CONTEXT);
273 1.3.6.1 skrll }
274 1.3.6.1 skrll }
275 1.3.6.1 skrll spin_unlock(&lock_data->spinlock);
276 1.2 riastrad }
277 1.2 riastrad
278 1.2 riastrad /*
279 1.2 riastrad * Does this file hold this drm device's hardware lock?
280 1.2 riastrad *
281 1.2 riastrad * Used to decide whether to release the lock when the file is being
282 1.2 riastrad * closed.
283 1.2 riastrad *
284 1.2 riastrad * XXX I don't think this answers correctly in the case that the
285 1.2 riastrad * userland has taken the lock and it is uncontended. But I don't
286 1.2 riastrad * think we can know what the correct answer is in that case.
287 1.2 riastrad */
288 1.2 riastrad int
289 1.2 riastrad drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file)
290 1.2 riastrad {
291 1.2 riastrad struct drm_lock_data *const lock_data = &file->master->lock;
292 1.2 riastrad int answer = 0;
293 1.2 riastrad
294 1.2 riastrad /* If this file has never locked anything, then no. */
295 1.2 riastrad if (file->lock_count == 0)
296 1.2 riastrad return 0;
297 1.2 riastrad
298 1.2 riastrad spin_lock(&lock_data->spinlock);
299 1.2 riastrad
300 1.2 riastrad /* If there is no lock, then this file doesn't hold it. */
301 1.2 riastrad if (lock_data->hw_lock == NULL)
302 1.2 riastrad goto out;
303 1.2 riastrad
304 1.2 riastrad /* If this lock is not held, then this file doesn't hold it. */
305 1.2 riastrad if (!_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock))
306 1.2 riastrad goto out;
307 1.2 riastrad
308 1.2 riastrad /*
309 1.2 riastrad * Otherwise, it boils down to whether this file is the owner
310 1.2 riastrad * or someone else.
311 1.2 riastrad *
312 1.2 riastrad * XXX This is not reliable! Userland doesn't update this when
313 1.2 riastrad * it takes the lock...
314 1.2 riastrad */
315 1.2 riastrad answer = (file == lock_data->file_priv);
316 1.2 riastrad
317 1.2 riastrad out: spin_unlock(&lock_data->spinlock);
318 1.2 riastrad return answer;
319 1.2 riastrad }
320 1.2 riastrad
321 1.2 riastrad /*
322 1.2 riastrad * Try to acquire the lock. Return true if successful, false if not.
323 1.2 riastrad *
324 1.2 riastrad * This is hairy because it races with userland, and if userland
325 1.2 riastrad * already holds the lock, we must tell it, by marking it
326 1.2 riastrad * _DRM_LOCK_CONT (contended), that it must call ioctl(DRM_UNLOCK) to
327 1.2 riastrad * release the lock so that we can wake waiters.
328 1.2 riastrad *
329 1.2 riastrad * XXX What happens if the process is interrupted?
330 1.2 riastrad */
331 1.2 riastrad static bool
332 1.2 riastrad drm_lock_acquire(struct drm_lock_data *lock_data, int context)
333 1.2 riastrad {
334 1.2 riastrad volatile unsigned int *const lock = &lock_data->hw_lock->lock;
335 1.2 riastrad unsigned int old, new;
336 1.2 riastrad
337 1.2 riastrad KASSERT(spin_is_locked(&lock_data->spinlock));
338 1.2 riastrad
339 1.2 riastrad do {
340 1.2 riastrad old = *lock;
341 1.2 riastrad if (!_DRM_LOCK_IS_HELD(old)) {
342 1.2 riastrad new = (context | _DRM_LOCK_HELD);
343 1.2 riastrad if ((0 < lock_data->user_waiters) ||
344 1.2 riastrad (0 < lock_data->kernel_waiters))
345 1.2 riastrad new |= _DRM_LOCK_CONT;
346 1.2 riastrad } else if (_DRM_LOCKING_CONTEXT(old) != context) {
347 1.2 riastrad new = (old | _DRM_LOCK_CONT);
348 1.2 riastrad } else {
349 1.2 riastrad DRM_ERROR("%d already holds heavyweight lock\n",
350 1.2 riastrad context);
351 1.2 riastrad return false;
352 1.2 riastrad }
353 1.2 riastrad } while (atomic_cas_uint(lock, old, new) != old);
354 1.2 riastrad
355 1.2 riastrad return !_DRM_LOCK_IS_HELD(old);
356 1.2 riastrad }
357 1.2 riastrad
358 1.2 riastrad /*
359 1.2 riastrad * Release the lock held in the given context. Wake any waiters,
360 1.2 riastrad * preferring kernel waiters over userland waiters.
361 1.2 riastrad *
362 1.2 riastrad * Lock's spinlock must be held and lock must be held in this context.
363 1.2 riastrad */
364 1.2 riastrad static void
365 1.2 riastrad drm_lock_release(struct drm_lock_data *lock_data, int context)
366 1.2 riastrad {
367 1.2 riastrad
368 1.2 riastrad (void)context; /* ignore */
369 1.2 riastrad KASSERT(spin_is_locked(&lock_data->spinlock));
370 1.2 riastrad KASSERT(_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock));
371 1.2 riastrad KASSERT(_DRM_LOCKING_CONTEXT(lock_data->hw_lock->lock) == context);
372 1.2 riastrad
373 1.2 riastrad lock_data->hw_lock->lock = 0;
374 1.2 riastrad DRM_SPIN_WAKEUP_ONE(&lock_data->lock_queue, &lock_data->spinlock);
375 1.2 riastrad }
376 1.2 riastrad
377 1.2 riastrad /*
378 1.2 riastrad * Block signals for a process that holds a drm lock.
379 1.2 riastrad *
380 1.2 riastrad * XXX It's not processes but files that hold drm locks, so blocking
381 1.2 riastrad * signals in a process seems wrong, and it's not clear that blocking
382 1.2 riastrad * signals automatically is remotely sensible anyway.
383 1.2 riastrad */
384 1.2 riastrad static int
385 1.2 riastrad drm_lock_block_signals(struct drm_device *dev __unused,
386 1.2 riastrad struct drm_lock *lock_request __unused, struct drm_file *file __unused)
387 1.2 riastrad {
388 1.2 riastrad return 0;
389 1.2 riastrad }
390 1.2 riastrad
391 1.2 riastrad /*
392 1.2 riastrad * Unblock the signals that drm_lock_block_signals blocked.
393 1.2 riastrad */
394 1.2 riastrad static void
395 1.2 riastrad drm_lock_unblock_signals(struct drm_device *dev __unused,
396 1.2 riastrad struct drm_lock *lock_request __unused, struct drm_file *file __unused)
397 1.2 riastrad {
398 1.2 riastrad }
399