drm_lock.c revision 1.2.6.2 1 1.2.6.2 yamt /* $NetBSD: drm_lock.c,v 1.2.6.2 2014/05/22 11:40:55 yamt Exp $ */
2 1.2.6.2 yamt
3 1.2.6.2 yamt /*-
4 1.2.6.2 yamt * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 1.2.6.2 yamt * All rights reserved.
6 1.2.6.2 yamt *
7 1.2.6.2 yamt * This code is derived from software contributed to The NetBSD Foundation
8 1.2.6.2 yamt * by Taylor R. Campbell.
9 1.2.6.2 yamt *
10 1.2.6.2 yamt * Redistribution and use in source and binary forms, with or without
11 1.2.6.2 yamt * modification, are permitted provided that the following conditions
12 1.2.6.2 yamt * are met:
13 1.2.6.2 yamt * 1. Redistributions of source code must retain the above copyright
14 1.2.6.2 yamt * notice, this list of conditions and the following disclaimer.
15 1.2.6.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
16 1.2.6.2 yamt * notice, this list of conditions and the following disclaimer in the
17 1.2.6.2 yamt * documentation and/or other materials provided with the distribution.
18 1.2.6.2 yamt *
19 1.2.6.2 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.2.6.2 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.2.6.2 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.2.6.2 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.2.6.2 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.2.6.2 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.2.6.2 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.2.6.2 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.2.6.2 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.2.6.2 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.2.6.2 yamt * POSSIBILITY OF SUCH DAMAGE.
30 1.2.6.2 yamt */
31 1.2.6.2 yamt
32 1.2.6.2 yamt /*
33 1.2.6.2 yamt * DRM lock. Each drm master has a heavy-weight lock to provide mutual
34 1.2.6.2 yamt * exclusion for access to the hardware. The lock can be held by the
35 1.2.6.2 yamt * kernel or by a drm file; the kernel takes access only for unusual
36 1.2.6.2 yamt * purposes, with drm_idlelock_take, mainly for idling the GPU when
37 1.2.6.2 yamt * closing down.
38 1.2.6.2 yamt *
39 1.2.6.2 yamt * The physical memory storing the lock state is shared between
40 1.2.6.2 yamt * userland and kernel: the pointer at dev->master->lock->hw_lock is
41 1.2.6.2 yamt * mapped into both userland and kernel address spaces. This way,
42 1.2.6.2 yamt * userland can try to take the hardware lock without a system call,
43 1.2.6.2 yamt * although if it fails then it will use the DRM_LOCK ioctl to block
44 1.2.6.2 yamt * atomically until the lock is available. All this means that the
45 1.2.6.2 yamt * kernel must use atomic_ops to manage the lock state.
46 1.2.6.2 yamt */
47 1.2.6.2 yamt
48 1.2.6.2 yamt #include <sys/cdefs.h>
49 1.2.6.2 yamt __KERNEL_RCSID(0, "$NetBSD: drm_lock.c,v 1.2.6.2 2014/05/22 11:40:55 yamt Exp $");
50 1.2.6.2 yamt
51 1.2.6.2 yamt #include <sys/types.h>
52 1.2.6.2 yamt #include <sys/errno.h>
53 1.2.6.2 yamt #include <sys/systm.h>
54 1.2.6.2 yamt
55 1.2.6.2 yamt #include <drm/drmP.h>
56 1.2.6.2 yamt
57 1.2.6.2 yamt static bool drm_lock_acquire(struct drm_lock_data *, int);
58 1.2.6.2 yamt static void drm_lock_release(struct drm_lock_data *, int);
59 1.2.6.2 yamt static int drm_lock_block_signals(struct drm_device *, struct drm_lock *,
60 1.2.6.2 yamt struct drm_file *);
61 1.2.6.2 yamt static void drm_lock_unblock_signals(struct drm_device *,
62 1.2.6.2 yamt struct drm_lock *, struct drm_file *);
63 1.2.6.2 yamt
64 1.2.6.2 yamt /*
65 1.2.6.2 yamt * Take the lock on behalf of userland.
66 1.2.6.2 yamt */
67 1.2.6.2 yamt int
68 1.2.6.2 yamt drm_lock(struct drm_device *dev, void *data, struct drm_file *file)
69 1.2.6.2 yamt {
70 1.2.6.2 yamt struct drm_lock *lock_request = data;
71 1.2.6.2 yamt struct drm_master *master = file->master;
72 1.2.6.2 yamt int error;
73 1.2.6.2 yamt
74 1.2.6.2 yamt /* Sanitize the drm global mutex bollocks until we get rid of it. */
75 1.2.6.2 yamt KASSERT(mutex_is_locked(&drm_global_mutex));
76 1.2.6.2 yamt mutex_unlock(&drm_global_mutex);
77 1.2.6.2 yamt
78 1.2.6.2 yamt /* Refuse to lock on behalf of the kernel. */
79 1.2.6.2 yamt if (lock_request->context == DRM_KERNEL_CONTEXT) {
80 1.2.6.2 yamt error = -EINVAL;
81 1.2.6.2 yamt goto out0;
82 1.2.6.2 yamt }
83 1.2.6.2 yamt
84 1.2.6.2 yamt /* Refuse to set the magic bits. */
85 1.2.6.2 yamt if (lock_request->context !=
86 1.2.6.2 yamt _DRM_LOCKING_CONTEXT(lock_request->context)) {
87 1.2.6.2 yamt error = -EINVAL;
88 1.2.6.2 yamt goto out0;
89 1.2.6.2 yamt }
90 1.2.6.2 yamt
91 1.2.6.2 yamt /* Count it in the file and device statistics (XXX why here?). */
92 1.2.6.2 yamt file->lock_count++;
93 1.2.6.2 yamt atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
94 1.2.6.2 yamt
95 1.2.6.2 yamt /* Wait until the hardware lock is gone or we can acquire it. */
96 1.2.6.2 yamt spin_lock(&master->lock.spinlock);
97 1.2.6.2 yamt
98 1.2.6.2 yamt if (master->lock.user_waiters == UINT32_MAX) {
99 1.2.6.2 yamt error = -EBUSY;
100 1.2.6.2 yamt goto out1;
101 1.2.6.2 yamt }
102 1.2.6.2 yamt
103 1.2.6.2 yamt master->lock.user_waiters++;
104 1.2.6.2 yamt DRM_SPIN_WAIT_UNTIL(error, &master->lock.lock_queue,
105 1.2.6.2 yamt &master->lock.spinlock,
106 1.2.6.2 yamt ((master->lock.hw_lock == NULL) ||
107 1.2.6.2 yamt drm_lock_acquire(&master->lock, lock_request->context)));
108 1.2.6.2 yamt KASSERT(0 < master->lock.user_waiters);
109 1.2.6.2 yamt master->lock.user_waiters--;
110 1.2.6.2 yamt if (error)
111 1.2.6.2 yamt goto out1;
112 1.2.6.2 yamt
113 1.2.6.2 yamt /* If the lock is gone, give up. */
114 1.2.6.2 yamt if (master->lock.hw_lock == NULL) {
115 1.2.6.2 yamt #if 0 /* XXX Linux sends SIGTERM, but why? */
116 1.2.6.2 yamt mutex_enter(proc_lock);
117 1.2.6.2 yamt psignal(curproc, SIGTERM);
118 1.2.6.2 yamt mutex_exit(proc_lock);
119 1.2.6.2 yamt error = -EINTR;
120 1.2.6.2 yamt #else
121 1.2.6.2 yamt error = -ENXIO;
122 1.2.6.2 yamt #endif
123 1.2.6.2 yamt goto out1;
124 1.2.6.2 yamt }
125 1.2.6.2 yamt
126 1.2.6.2 yamt /* Mark the lock as owned by file. */
127 1.2.6.2 yamt master->lock.file_priv = file;
128 1.2.6.2 yamt master->lock.lock_time = jiffies; /* XXX Unused? */
129 1.2.6.2 yamt
130 1.2.6.2 yamt /* Block signals while the lock is held. */
131 1.2.6.2 yamt error = drm_lock_block_signals(dev, lock_request, file);
132 1.2.6.2 yamt if (error)
133 1.2.6.2 yamt goto fail2;
134 1.2.6.2 yamt
135 1.2.6.2 yamt /* Enter the DMA quiescent state if requested and available. */
136 1.2.6.2 yamt /* XXX Drop the spin lock first... */
137 1.2.6.2 yamt if (ISSET(lock_request->flags, _DRM_LOCK_QUIESCENT) &&
138 1.2.6.2 yamt (dev->driver->dma_quiescent != NULL)) {
139 1.2.6.2 yamt error = (*dev->driver->dma_quiescent)(dev);
140 1.2.6.2 yamt if (error)
141 1.2.6.2 yamt goto fail3;
142 1.2.6.2 yamt }
143 1.2.6.2 yamt
144 1.2.6.2 yamt /* Success! */
145 1.2.6.2 yamt error = 0;
146 1.2.6.2 yamt goto out1;
147 1.2.6.2 yamt
148 1.2.6.2 yamt fail3: drm_lock_unblock_signals(dev, lock_request, file);
149 1.2.6.2 yamt fail2: drm_lock_release(&master->lock, lock_request->context);
150 1.2.6.2 yamt master->lock.file_priv = NULL;
151 1.2.6.2 yamt out1: spin_unlock(&master->lock.spinlock);
152 1.2.6.2 yamt out0: mutex_lock(&drm_global_mutex);
153 1.2.6.2 yamt return error;
154 1.2.6.2 yamt }
155 1.2.6.2 yamt
156 1.2.6.2 yamt /*
157 1.2.6.2 yamt * Try to relinquish a lock that userland thinks it holds, per
158 1.2.6.2 yamt * userland's request. Fail if it doesn't actually hold the lock.
159 1.2.6.2 yamt */
160 1.2.6.2 yamt int
161 1.2.6.2 yamt drm_unlock(struct drm_device *dev, void *data, struct drm_file *file)
162 1.2.6.2 yamt {
163 1.2.6.2 yamt struct drm_lock *lock_request = data;
164 1.2.6.2 yamt struct drm_master *master = file->master;
165 1.2.6.2 yamt int error;
166 1.2.6.2 yamt
167 1.2.6.2 yamt /* Sanitize the drm global mutex bollocks until we get rid of it. */
168 1.2.6.2 yamt KASSERT(mutex_is_locked(&drm_global_mutex));
169 1.2.6.2 yamt mutex_unlock(&drm_global_mutex);
170 1.2.6.2 yamt
171 1.2.6.2 yamt /* Refuse to unlock on behalf of the kernel. */
172 1.2.6.2 yamt if (lock_request->context == DRM_KERNEL_CONTEXT) {
173 1.2.6.2 yamt error = -EINVAL;
174 1.2.6.2 yamt goto out0;
175 1.2.6.2 yamt }
176 1.2.6.2 yamt
177 1.2.6.2 yamt /* Count it in the device statistics. */
178 1.2.6.2 yamt atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
179 1.2.6.2 yamt
180 1.2.6.2 yamt /* Lock the internal spin lock to make changes. */
181 1.2.6.2 yamt spin_lock(&master->lock.spinlock);
182 1.2.6.2 yamt
183 1.2.6.2 yamt /* Make sure it's actually locked. */
184 1.2.6.2 yamt if (!_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock)) {
185 1.2.6.2 yamt error = -EINVAL; /* XXX Right error? */
186 1.2.6.2 yamt goto out1;
187 1.2.6.2 yamt }
188 1.2.6.2 yamt
189 1.2.6.2 yamt /* Make sure it's locked in the right context. */
190 1.2.6.2 yamt if (_DRM_LOCKING_CONTEXT(master->lock.hw_lock->lock) !=
191 1.2.6.2 yamt lock_request->context) {
192 1.2.6.2 yamt error = -EACCES; /* XXX Right error? */
193 1.2.6.2 yamt goto out1;
194 1.2.6.2 yamt }
195 1.2.6.2 yamt
196 1.2.6.2 yamt /* Make sure it's locked by us. */
197 1.2.6.2 yamt if (master->lock.file_priv != file) {
198 1.2.6.2 yamt error = -EACCES; /* XXX Right error? */
199 1.2.6.2 yamt goto out1;
200 1.2.6.2 yamt }
201 1.2.6.2 yamt
202 1.2.6.2 yamt /* Actually release the lock. */
203 1.2.6.2 yamt drm_lock_release(&master->lock, lock_request->context);
204 1.2.6.2 yamt
205 1.2.6.2 yamt /* Clear the lock's file pointer, just in case. */
206 1.2.6.2 yamt master->lock.file_priv = NULL;
207 1.2.6.2 yamt
208 1.2.6.2 yamt /* Unblock the signals we blocked in drm_lock. */
209 1.2.6.2 yamt drm_lock_unblock_signals(dev, lock_request, file);
210 1.2.6.2 yamt
211 1.2.6.2 yamt /* Success! */
212 1.2.6.2 yamt error = 0;
213 1.2.6.2 yamt
214 1.2.6.2 yamt out1: spin_unlock(&master->lock.spinlock);
215 1.2.6.2 yamt out0: mutex_lock(&drm_global_mutex);
216 1.2.6.2 yamt return error;
217 1.2.6.2 yamt }
218 1.2.6.2 yamt
219 1.2.6.2 yamt /*
220 1.2.6.2 yamt * Drop the lock.
221 1.2.6.2 yamt *
222 1.2.6.2 yamt * Return value is an artefact of Linux. Caller must guarantee
223 1.2.6.2 yamt * preconditions; failure is fatal.
224 1.2.6.2 yamt *
225 1.2.6.2 yamt * XXX Should we also unblock signals like drm_unlock does?
226 1.2.6.2 yamt */
227 1.2.6.2 yamt int
228 1.2.6.2 yamt drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
229 1.2.6.2 yamt {
230 1.2.6.2 yamt
231 1.2.6.2 yamt spin_lock(&lock_data->spinlock);
232 1.2.6.2 yamt drm_lock_release(lock_data, context);
233 1.2.6.2 yamt spin_unlock(&lock_data->spinlock);
234 1.2.6.2 yamt
235 1.2.6.2 yamt return 0;
236 1.2.6.2 yamt }
237 1.2.6.2 yamt
238 1.2.6.2 yamt /*
239 1.2.6.2 yamt * Take the lock for the kernel's use.
240 1.2.6.2 yamt *
241 1.2.6.2 yamt * XXX This is unimplemented because it's not clear that the Linux code
242 1.2.6.2 yamt * makes sense at all. Linux's drm_idlelock_take never blocks, but it
243 1.2.6.2 yamt * doesn't guarantee that the kernel holds the lock on return! For
244 1.2.6.2 yamt * now, I'll hope that the code paths relying on this don't matter yet.
245 1.2.6.2 yamt */
246 1.2.6.2 yamt void
247 1.2.6.2 yamt drm_idlelock_take(struct drm_lock_data *lock_data __unused)
248 1.2.6.2 yamt {
249 1.2.6.2 yamt KASSERT(mutex_is_locked(&drm_global_mutex));
250 1.2.6.2 yamt panic("drm_idlelock_take is not yet implemented"); /* XXX */
251 1.2.6.2 yamt }
252 1.2.6.2 yamt
253 1.2.6.2 yamt /*
254 1.2.6.2 yamt * Release the lock from the kernel.
255 1.2.6.2 yamt */
256 1.2.6.2 yamt void
257 1.2.6.2 yamt drm_idlelock_release(struct drm_lock_data *lock_data __unused)
258 1.2.6.2 yamt {
259 1.2.6.2 yamt KASSERT(mutex_is_locked(&drm_global_mutex));
260 1.2.6.2 yamt panic("drm_idlelock_release is not yet implemented"); /* XXX */
261 1.2.6.2 yamt }
262 1.2.6.2 yamt
263 1.2.6.2 yamt /*
264 1.2.6.2 yamt * Does this file hold this drm device's hardware lock?
265 1.2.6.2 yamt *
266 1.2.6.2 yamt * Used to decide whether to release the lock when the file is being
267 1.2.6.2 yamt * closed.
268 1.2.6.2 yamt *
269 1.2.6.2 yamt * XXX I don't think this answers correctly in the case that the
270 1.2.6.2 yamt * userland has taken the lock and it is uncontended. But I don't
271 1.2.6.2 yamt * think we can know what the correct answer is in that case.
272 1.2.6.2 yamt */
273 1.2.6.2 yamt int
274 1.2.6.2 yamt drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file)
275 1.2.6.2 yamt {
276 1.2.6.2 yamt struct drm_lock_data *const lock_data = &file->master->lock;
277 1.2.6.2 yamt int answer = 0;
278 1.2.6.2 yamt
279 1.2.6.2 yamt /* If this file has never locked anything, then no. */
280 1.2.6.2 yamt if (file->lock_count == 0)
281 1.2.6.2 yamt return 0;
282 1.2.6.2 yamt
283 1.2.6.2 yamt spin_lock(&lock_data->spinlock);
284 1.2.6.2 yamt
285 1.2.6.2 yamt /* If there is no lock, then this file doesn't hold it. */
286 1.2.6.2 yamt if (lock_data->hw_lock == NULL)
287 1.2.6.2 yamt goto out;
288 1.2.6.2 yamt
289 1.2.6.2 yamt /* If this lock is not held, then this file doesn't hold it. */
290 1.2.6.2 yamt if (!_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock))
291 1.2.6.2 yamt goto out;
292 1.2.6.2 yamt
293 1.2.6.2 yamt /*
294 1.2.6.2 yamt * Otherwise, it boils down to whether this file is the owner
295 1.2.6.2 yamt * or someone else.
296 1.2.6.2 yamt *
297 1.2.6.2 yamt * XXX This is not reliable! Userland doesn't update this when
298 1.2.6.2 yamt * it takes the lock...
299 1.2.6.2 yamt */
300 1.2.6.2 yamt answer = (file == lock_data->file_priv);
301 1.2.6.2 yamt
302 1.2.6.2 yamt out: spin_unlock(&lock_data->spinlock);
303 1.2.6.2 yamt return answer;
304 1.2.6.2 yamt }
305 1.2.6.2 yamt
306 1.2.6.2 yamt /*
307 1.2.6.2 yamt * Try to acquire the lock. Return true if successful, false if not.
308 1.2.6.2 yamt *
309 1.2.6.2 yamt * This is hairy because it races with userland, and if userland
310 1.2.6.2 yamt * already holds the lock, we must tell it, by marking it
311 1.2.6.2 yamt * _DRM_LOCK_CONT (contended), that it must call ioctl(DRM_UNLOCK) to
312 1.2.6.2 yamt * release the lock so that we can wake waiters.
313 1.2.6.2 yamt *
314 1.2.6.2 yamt * XXX What happens if the process is interrupted?
315 1.2.6.2 yamt */
316 1.2.6.2 yamt static bool
317 1.2.6.2 yamt drm_lock_acquire(struct drm_lock_data *lock_data, int context)
318 1.2.6.2 yamt {
319 1.2.6.2 yamt volatile unsigned int *const lock = &lock_data->hw_lock->lock;
320 1.2.6.2 yamt unsigned int old, new;
321 1.2.6.2 yamt
322 1.2.6.2 yamt KASSERT(spin_is_locked(&lock_data->spinlock));
323 1.2.6.2 yamt
324 1.2.6.2 yamt do {
325 1.2.6.2 yamt old = *lock;
326 1.2.6.2 yamt if (!_DRM_LOCK_IS_HELD(old)) {
327 1.2.6.2 yamt new = (context | _DRM_LOCK_HELD);
328 1.2.6.2 yamt if ((0 < lock_data->user_waiters) ||
329 1.2.6.2 yamt (0 < lock_data->kernel_waiters))
330 1.2.6.2 yamt new |= _DRM_LOCK_CONT;
331 1.2.6.2 yamt } else if (_DRM_LOCKING_CONTEXT(old) != context) {
332 1.2.6.2 yamt new = (old | _DRM_LOCK_CONT);
333 1.2.6.2 yamt } else {
334 1.2.6.2 yamt DRM_ERROR("%d already holds heavyweight lock\n",
335 1.2.6.2 yamt context);
336 1.2.6.2 yamt return false;
337 1.2.6.2 yamt }
338 1.2.6.2 yamt } while (atomic_cas_uint(lock, old, new) != old);
339 1.2.6.2 yamt
340 1.2.6.2 yamt return !_DRM_LOCK_IS_HELD(old);
341 1.2.6.2 yamt }
342 1.2.6.2 yamt
343 1.2.6.2 yamt /*
344 1.2.6.2 yamt * Release the lock held in the given context. Wake any waiters,
345 1.2.6.2 yamt * preferring kernel waiters over userland waiters.
346 1.2.6.2 yamt *
347 1.2.6.2 yamt * Lock's spinlock must be held and lock must be held in this context.
348 1.2.6.2 yamt */
349 1.2.6.2 yamt static void
350 1.2.6.2 yamt drm_lock_release(struct drm_lock_data *lock_data, int context)
351 1.2.6.2 yamt {
352 1.2.6.2 yamt
353 1.2.6.2 yamt (void)context; /* ignore */
354 1.2.6.2 yamt KASSERT(spin_is_locked(&lock_data->spinlock));
355 1.2.6.2 yamt KASSERT(_DRM_LOCK_IS_HELD(lock_data->hw_lock->lock));
356 1.2.6.2 yamt KASSERT(_DRM_LOCKING_CONTEXT(lock_data->hw_lock->lock) == context);
357 1.2.6.2 yamt
358 1.2.6.2 yamt lock_data->hw_lock->lock = 0;
359 1.2.6.2 yamt DRM_SPIN_WAKEUP_ONE(&lock_data->lock_queue, &lock_data->spinlock);
360 1.2.6.2 yamt }
361 1.2.6.2 yamt
362 1.2.6.2 yamt /*
363 1.2.6.2 yamt * Block signals for a process that holds a drm lock.
364 1.2.6.2 yamt *
365 1.2.6.2 yamt * XXX It's not processes but files that hold drm locks, so blocking
366 1.2.6.2 yamt * signals in a process seems wrong, and it's not clear that blocking
367 1.2.6.2 yamt * signals automatically is remotely sensible anyway.
368 1.2.6.2 yamt */
369 1.2.6.2 yamt static int
370 1.2.6.2 yamt drm_lock_block_signals(struct drm_device *dev __unused,
371 1.2.6.2 yamt struct drm_lock *lock_request __unused, struct drm_file *file __unused)
372 1.2.6.2 yamt {
373 1.2.6.2 yamt return 0;
374 1.2.6.2 yamt }
375 1.2.6.2 yamt
376 1.2.6.2 yamt /*
377 1.2.6.2 yamt * Unblock the signals that drm_lock_block_signals blocked.
378 1.2.6.2 yamt */
379 1.2.6.2 yamt static void
380 1.2.6.2 yamt drm_lock_unblock_signals(struct drm_device *dev __unused,
381 1.2.6.2 yamt struct drm_lock *lock_request __unused, struct drm_file *file __unused)
382 1.2.6.2 yamt {
383 1.2.6.2 yamt }
384