drm_cdevsw.c revision 1.23 1 /* $NetBSD: drm_cdevsw.c,v 1.23 2021/12/19 09:52:00 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: drm_cdevsw.c,v 1.23 2021/12/19 09:52:00 riastradh Exp $");
34
35 #include <sys/param.h>
36 #include <sys/types.h>
37 #include <sys/conf.h>
38 #include <sys/device.h>
39 #include <sys/file.h>
40 #include <sys/filedesc.h>
41 #include <sys/ioccom.h>
42 #include <sys/kauth.h>
43 #ifndef _MODULE
44 /* XXX Mega-kludge because modules are broken. */
45 #include <sys/once.h>
46 #endif
47 #include <sys/pmf.h>
48 #include <sys/poll.h>
49 #ifndef _MODULE
50 #include <sys/reboot.h> /* XXX drm_init kludge */
51 #endif
52 #include <sys/select.h>
53
54 #include <uvm/uvm_extern.h>
55
56 #include <linux/err.h>
57
58 #include <linux/pm.h>
59
60 #include <drm/drmP.h>
61 #include <drm/drm_drv.h>
62 #include <drm/drm_file.h>
63 #include <drm/drm_irq.h>
64 #include <drm/drm_legacy.h>
65
66 #include "../dist/drm/drm_internal.h"
67 #include "../dist/drm/drm_legacy.h"
68
69 static dev_type_open(drm_open);
70
71 static int drm_firstopen(struct drm_device *);
72
73 static int drm_close(struct file *);
74 static int drm_read(struct file *, off_t *, struct uio *, kauth_cred_t,
75 int);
76 static int drm_dequeue_event(struct drm_file *, size_t,
77 struct drm_pending_event **, int);
78 static int drm_ioctl_shim(struct file *, unsigned long, void *);
79 static int drm_poll(struct file *, int);
80 static int drm_kqfilter(struct file *, struct knote *);
81 static int drm_stat(struct file *, struct stat *);
82 static int drm_fop_mmap(struct file *, off_t *, size_t, int, int *, int *,
83 struct uvm_object **, int *);
84 static void drm_requeue_event(struct drm_file *, struct drm_pending_event *);
85
86 static paddr_t drm_legacy_mmap(dev_t, off_t, int);
87
88 const struct cdevsw drm_cdevsw = {
89 .d_open = drm_open,
90 .d_close = noclose,
91 .d_read = noread,
92 .d_write = nowrite,
93 .d_ioctl = noioctl,
94 .d_stop = nostop,
95 .d_tty = notty,
96 .d_poll = nopoll,
97 .d_mmap = drm_legacy_mmap,
98 .d_kqfilter = nokqfilter,
99 .d_discard = nodiscard,
100 /* XXX was D_TTY | D_NEGOFFSAFE */
101 /* XXX Add D_MPSAFE some day... */
102 .d_flag = D_NEGOFFSAFE,
103 };
104
105 static const struct fileops drm_fileops = {
106 .fo_name = "drm",
107 .fo_read = drm_read,
108 .fo_write = fbadop_write,
109 .fo_ioctl = drm_ioctl_shim,
110 .fo_fcntl = fnullop_fcntl,
111 .fo_poll = drm_poll,
112 .fo_stat = drm_stat,
113 .fo_close = drm_close,
114 .fo_kqfilter = drm_kqfilter,
115 .fo_restart = fnullop_restart,
116 .fo_mmap = drm_fop_mmap,
117 };
118
119 static int
120 drm_open(dev_t d, int flags, int fmt, struct lwp *l)
121 {
122 struct drm_minor *dminor;
123 struct drm_device *dev;
124 bool firstopen, lastclose;
125 int fd;
126 struct file *fp;
127 int error;
128
129 error = drm_guarantee_initialized();
130 if (error)
131 goto fail0;
132
133 if (flags & O_EXCL) {
134 error = EBUSY;
135 goto fail0;
136 }
137
138 dminor = drm_minor_acquire(minor(d));
139 if (IS_ERR(dminor)) {
140 /* XXX errno Linux->NetBSD */
141 error = -PTR_ERR(dminor);
142 goto fail0;
143 }
144 dev = dminor->dev;
145 if (dev->switch_power_state != DRM_SWITCH_POWER_ON) {
146 error = EINVAL;
147 goto fail1;
148 }
149
150 mutex_lock(&drm_global_mutex);
151 if (dev->open_count == INT_MAX) {
152 mutex_unlock(&drm_global_mutex);
153 error = EBUSY;
154 goto fail1;
155 }
156 firstopen = (dev->open_count == 0);
157 dev->open_count++;
158 mutex_unlock(&drm_global_mutex);
159
160 if (firstopen) {
161 /* XXX errno Linux->NetBSD */
162 error = -drm_firstopen(dev);
163 if (error)
164 goto fail2;
165 }
166
167 error = fd_allocfile(&fp, &fd);
168 if (error)
169 goto fail2;
170
171 struct drm_file *const file = kmem_zalloc(sizeof(*file), KM_SLEEP);
172 /* XXX errno Linux->NetBSD */
173 error = -drm_open_file(file, fp, dminor);
174 if (error)
175 goto fail3;
176
177 error = fd_clone(fp, fd, flags, &drm_fileops, file);
178 KASSERT(error == EMOVEFD); /* XXX */
179
180 /* Success! (But error has to be EMOVEFD, not 0.) */
181 return error;
182
183 fail3: kmem_free(file, sizeof(*file));
184 fd_abort(curproc, fp, fd);
185 fail2: mutex_lock(&drm_global_mutex);
186 KASSERT(0 < dev->open_count);
187 --dev->open_count;
188 lastclose = (dev->open_count == 0);
189 mutex_unlock(&drm_global_mutex);
190 if (lastclose)
191 drm_lastclose(dev);
192 fail1: drm_minor_release(dminor);
193 fail0: KASSERT(error);
194 if (error == ERESTARTSYS)
195 error = ERESTART;
196 return error;
197 }
198
199 static int
200 drm_close(struct file *fp)
201 {
202 struct drm_file *const file = fp->f_data;
203 struct drm_minor *const dminor = file->minor;
204 struct drm_device *const dev = dminor->dev;
205 bool lastclose;
206
207 drm_close_file(file);
208 kmem_free(file, sizeof(*file));
209
210 mutex_lock(&drm_global_mutex);
211 KASSERT(0 < dev->open_count);
212 --dev->open_count;
213 lastclose = (dev->open_count == 0);
214 mutex_unlock(&drm_global_mutex);
215
216 if (lastclose)
217 drm_lastclose(dev);
218
219 drm_minor_release(dminor);
220
221 return 0;
222 }
223
224 static int
225 drm_firstopen(struct drm_device *dev)
226 {
227 int ret;
228
229 if (drm_core_check_feature(dev, DRIVER_MODESET))
230 return 0;
231
232 if (dev->driver->firstopen) {
233 ret = (*dev->driver->firstopen)(dev);
234 if (ret)
235 goto fail0;
236 }
237
238 ret = drm_legacy_dma_setup(dev);
239 if (ret)
240 goto fail1;
241
242 return 0;
243
244 fail2: __unused
245 #if IS_ENABLED(CONFIG_DRM_LEGACY)
246 drm_legacy_dma_takedown(dev);
247 #endif
248 fail1: if (dev->driver->lastclose)
249 (*dev->driver->lastclose)(dev);
250 fail0: KASSERT(ret);
251 return ret;
252 }
253
254 void
255 drm_lastclose(struct drm_device *dev)
256 {
257
258 /* XXX Order is sketchy here... */
259 if (dev->driver->lastclose)
260 (*dev->driver->lastclose)(dev);
261 if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
262 drm_irq_uninstall(dev);
263
264 mutex_lock(&dev->struct_mutex);
265 if (dev->agp)
266 drm_legacy_agp_clear(dev);
267 #if IS_ENABLED(CONFIG_DRM_LEGACY)
268 drm_legacy_sg_cleanup(dev);
269 drm_legacy_dma_takedown(dev);
270 #endif
271 mutex_unlock(&dev->struct_mutex);
272
273 /* XXX Synchronize with drm_legacy_dev_reinit. */
274 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
275 #if IS_ENABLED(CONFIG_DRM_LEGACY)
276 dev->sigdata.lock = NULL;
277 dev->context_flag = 0;
278 dev->last_context = 0;
279 #endif
280 dev->if_version = 0;
281 }
282 }
283
284 static int
285 drm_read(struct file *fp, off_t *off, struct uio *uio, kauth_cred_t cred,
286 int flags)
287 {
288 struct drm_file *const file = fp->f_data;
289 struct drm_device *const dev = file->minor->dev;
290 struct drm_pending_event *event;
291 bool first;
292 int ret = 0;
293
294 /*
295 * Only one event reader at a time, so that if copyout faults
296 * after dequeueing one event and we have to put the event
297 * back, another reader won't see out-of-order events.
298 */
299 spin_lock(&dev->event_lock);
300 DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &file->event_read_wq, &dev->event_lock,
301 file->event_read_lock == NULL);
302 if (ret) {
303 spin_unlock(&dev->event_lock);
304 /* XXX errno Linux->NetBSD */
305 return -ret;
306 }
307 file->event_read_lock = curlwp;
308 spin_unlock(&dev->event_lock);
309
310 for (first = true; ; first = false) {
311 int f = 0;
312 off_t offset;
313 size_t resid;
314
315 if (!first || ISSET(fp->f_flag, FNONBLOCK))
316 f |= FNONBLOCK;
317
318 ret = drm_dequeue_event(file, uio->uio_resid, &event, f);
319 if (ret) {
320 if ((ret == -EWOULDBLOCK) && !first)
321 ret = 0;
322 break;
323 }
324 if (event == NULL)
325 break;
326
327 offset = uio->uio_offset;
328 resid = uio->uio_resid;
329 /* XXX errno NetBSD->Linux */
330 ret = -uiomove(event->event, event->event->length, uio);
331 if (ret) {
332 /*
333 * Faulted on copyout. Put the event back and
334 * stop here.
335 */
336 if (!first) {
337 /*
338 * Already transferred some events.
339 * Rather than back them all out, just
340 * say we succeeded at returning those.
341 */
342 ret = 0;
343 }
344 uio->uio_offset = offset;
345 uio->uio_resid = resid;
346 drm_requeue_event(file, event);
347 break;
348 }
349 kfree(event);
350 }
351
352 /* Release the event read lock. */
353 spin_lock(&dev->event_lock);
354 KASSERT(file->event_read_lock == curlwp);
355 file->event_read_lock = NULL;
356 DRM_SPIN_WAKEUP_ONE(&file->event_read_wq, &dev->event_lock);
357 spin_unlock(&dev->event_lock);
358
359 /* XXX errno Linux->NetBSD */
360
361 /* Success! */
362 if (ret == ERESTARTSYS)
363 ret = ERESTART;
364 return -ret;
365 }
366
367 static int
368 drm_dequeue_event(struct drm_file *file, size_t max_length,
369 struct drm_pending_event **eventp, int flags)
370 {
371 struct drm_device *const dev = file->minor->dev;
372 struct drm_pending_event *event = NULL;
373 unsigned long irqflags;
374 int ret = 0;
375
376 spin_lock_irqsave(&dev->event_lock, irqflags);
377
378 if (ISSET(flags, FNONBLOCK)) {
379 if (list_empty(&file->event_list))
380 ret = -EWOULDBLOCK;
381 } else {
382 DRM_SPIN_WAIT_UNTIL(ret, &file->event_wait, &dev->event_lock,
383 !list_empty(&file->event_list));
384 }
385 if (ret)
386 goto out;
387
388 event = list_first_entry(&file->event_list, struct drm_pending_event,
389 link);
390 if (event->event->length > max_length) {
391 /* Event is too large, can't return it. */
392 event = NULL;
393 ret = 0;
394 goto out;
395 }
396
397 file->event_space += event->event->length;
398 list_del(&event->link);
399
400 out: spin_unlock_irqrestore(&dev->event_lock, irqflags);
401 *eventp = event;
402 return ret;
403 }
404
405 static void
406 drm_requeue_event(struct drm_file *file, struct drm_pending_event *event)
407 {
408 struct drm_device *const dev = file->minor->dev;
409 unsigned long irqflags;
410
411 spin_lock_irqsave(&dev->event_lock, irqflags);
412 list_add(&event->link, &file->event_list);
413 KASSERT(file->event_space >= event->event->length);
414 file->event_space -= event->event->length;
415 spin_unlock_irqrestore(&dev->event_lock, irqflags);
416 }
417
418 static int
419 drm_ioctl_shim(struct file *fp, unsigned long cmd, void *data)
420 {
421 struct drm_file *file = fp->f_data;
422 struct drm_driver *driver = file->minor->dev->driver;
423 int error;
424
425 if (driver->ioctl_override)
426 error = driver->ioctl_override(fp, cmd, data);
427 else
428 error = drm_ioctl(fp, cmd, data);
429 if (error == ERESTARTSYS)
430 error = ERESTART;
431
432 return error;
433 }
434
435 static int
436 drm_poll(struct file *fp __unused, int events __unused)
437 {
438 struct drm_file *const file = fp->f_data;
439 struct drm_device *const dev = file->minor->dev;
440 int revents = 0;
441 unsigned long irqflags;
442
443 if (!ISSET(events, (POLLIN | POLLRDNORM)))
444 return 0;
445
446 spin_lock_irqsave(&dev->event_lock, irqflags);
447 if (list_empty(&file->event_list))
448 selrecord(curlwp, &file->event_selq);
449 else
450 revents |= (events & (POLLIN | POLLRDNORM));
451 spin_unlock_irqrestore(&dev->event_lock, irqflags);
452
453 return revents;
454 }
455
456 static void filt_drm_detach(struct knote *);
457 static int filt_drm_event(struct knote *, long);
458
459 static const struct filterops drm_filtops = {
460 .f_flags = FILTEROP_ISFD,
461 .f_attach = NULL,
462 .f_detach = filt_drm_detach,
463 .f_event = filt_drm_event,
464 };
465
466 static int
467 drm_kqfilter(struct file *fp, struct knote *kn)
468 {
469 struct drm_file *const file = fp->f_data;
470 struct drm_device *const dev = file->minor->dev;
471 unsigned long irqflags;
472
473 switch (kn->kn_filter) {
474 case EVFILT_READ:
475 kn->kn_fop = &drm_filtops;
476 kn->kn_hook = file;
477 spin_lock_irqsave(&dev->event_lock, irqflags);
478 selrecord_knote(&file->event_selq, kn);
479 spin_unlock_irqrestore(&dev->event_lock, irqflags);
480 return 0;
481 case EVFILT_WRITE:
482 default:
483 return EINVAL;
484 }
485 }
486
487 static void
488 filt_drm_detach(struct knote *kn)
489 {
490 struct drm_file *const file = kn->kn_hook;
491 struct drm_device *const dev = file->minor->dev;
492 unsigned long irqflags;
493
494 spin_lock_irqsave(&dev->event_lock, irqflags);
495 selremove_knote(&file->event_selq, kn);
496 spin_unlock_irqrestore(&dev->event_lock, irqflags);
497 }
498
499 static int
500 filt_drm_event(struct knote *kn, long hint)
501 {
502 struct drm_file *const file = kn->kn_hook;
503 struct drm_device *const dev = file->minor->dev;
504 unsigned long irqflags;
505 int ret;
506
507 if (hint == NOTE_SUBMIT)
508 KASSERT(spin_is_locked(&dev->event_lock));
509 else
510 spin_lock_irqsave(&dev->event_lock, irqflags);
511 if (list_empty(&file->event_list)) {
512 ret = 0;
513 } else {
514 struct drm_pending_event *const event =
515 list_first_entry(&file->event_list,
516 struct drm_pending_event, link);
517 kn->kn_data = event->event->length;
518 ret = 1;
519 }
520 if (hint == NOTE_SUBMIT)
521 KASSERT(spin_is_locked(&dev->event_lock));
522 else
523 spin_unlock_irqrestore(&dev->event_lock, irqflags);
524
525 return ret;
526 }
527
528 static int
529 drm_stat(struct file *fp, struct stat *st)
530 {
531 struct drm_file *const file = fp->f_data;
532 struct drm_minor *const dminor = file->minor;
533 const dev_t devno = makedev(cdevsw_lookup_major(&drm_cdevsw),
534 64*dminor->type + dminor->index);
535
536 (void)memset(st, 0, sizeof(*st));
537
538 st->st_dev = devno;
539 st->st_ino = 0; /* XXX (dev,ino) uniqueness bleh */
540 st->st_uid = kauth_cred_geteuid(fp->f_cred);
541 st->st_gid = kauth_cred_getegid(fp->f_cred);
542 st->st_mode = S_IFCHR; /* XXX what? */
543 st->st_rdev = devno;
544 /* XXX what else? */
545
546 return 0;
547 }
548
549 static int
550 drm_fop_mmap(struct file *fp, off_t *offp, size_t len, int prot, int *flagsp,
551 int *advicep, struct uvm_object **uobjp, int *maxprotp)
552 {
553 struct drm_file *const file = fp->f_data;
554 struct drm_device *const dev = file->minor->dev;
555 int error;
556
557 KASSERT(fp == file->filp);
558 /* XXX errno Linux->NetBSD */
559 error = -(*dev->driver->mmap_object)(dev, *offp, len, prot, uobjp,
560 offp, file->filp);
561 *maxprotp = prot;
562 *advicep = UVM_ADV_RANDOM;
563 if (error == ERESTARTSYS)
564 error = ERESTART;
565 return error;
566 }
567
568 static paddr_t
569 drm_legacy_mmap(dev_t d, off_t offset, int prot)
570 {
571 struct drm_minor *dminor;
572 paddr_t paddr;
573
574 dminor = drm_minor_acquire(minor(d));
575 if (IS_ERR(dminor))
576 return (paddr_t)-1;
577
578 paddr = drm_legacy_mmap_paddr(dminor->dev, offset, prot);
579
580 drm_minor_release(dminor);
581 return paddr;
582 }
583