drm_cdevsw.c revision 1.26 1 /* $NetBSD: drm_cdevsw.c,v 1.26 2021/12/19 10:45:33 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: drm_cdevsw.c,v 1.26 2021/12/19 10:45:33 riastradh Exp $");
34
35 #include <sys/param.h>
36 #include <sys/types.h>
37 #include <sys/conf.h>
38 #include <sys/device.h>
39 #include <sys/file.h>
40 #include <sys/filedesc.h>
41 #include <sys/ioccom.h>
42 #include <sys/kauth.h>
43 #ifndef _MODULE
44 /* XXX Mega-kludge because modules are broken. */
45 #include <sys/once.h>
46 #endif
47 #include <sys/pmf.h>
48 #include <sys/poll.h>
49 #ifndef _MODULE
50 #include <sys/reboot.h> /* XXX drm_init kludge */
51 #endif
52 #include <sys/select.h>
53
54 #include <uvm/uvm_extern.h>
55
56 #include <linux/err.h>
57
58 #include <linux/pm.h>
59
60 #include <drm/drm_agpsupport.h>
61 #include <drm/drm_device.h>
62 #include <drm/drm_drv.h>
63 #include <drm/drm_file.h>
64 #include <drm/drm_irq.h>
65 #include <drm/drm_legacy.h>
66
67 #include "../dist/drm/drm_internal.h"
68 #include "../dist/drm/drm_legacy.h"
69
70 static dev_type_open(drm_open);
71
72 static int drm_close(struct file *);
73 static int drm_read(struct file *, off_t *, struct uio *, kauth_cred_t,
74 int);
75 static int drm_dequeue_event(struct drm_file *, size_t,
76 struct drm_pending_event **, int);
77 static int drm_ioctl_shim(struct file *, unsigned long, void *);
78 static int drm_poll(struct file *, int);
79 static int drm_kqfilter(struct file *, struct knote *);
80 static int drm_stat(struct file *, struct stat *);
81 static int drm_fop_mmap(struct file *, off_t *, size_t, int, int *, int *,
82 struct uvm_object **, int *);
83 static void drm_requeue_event(struct drm_file *, struct drm_pending_event *);
84
85 static paddr_t drm_legacy_mmap(dev_t, off_t, int);
86
87 const struct cdevsw drm_cdevsw = {
88 .d_open = drm_open,
89 .d_close = noclose,
90 .d_read = noread,
91 .d_write = nowrite,
92 .d_ioctl = noioctl,
93 .d_stop = nostop,
94 .d_tty = notty,
95 .d_poll = nopoll,
96 .d_mmap = drm_legacy_mmap,
97 .d_kqfilter = nokqfilter,
98 .d_discard = nodiscard,
99 /* XXX was D_TTY | D_NEGOFFSAFE */
100 /* XXX Add D_MPSAFE some day... */
101 .d_flag = D_NEGOFFSAFE,
102 };
103
104 static const struct fileops drm_fileops = {
105 .fo_name = "drm",
106 .fo_read = drm_read,
107 .fo_write = fbadop_write,
108 .fo_ioctl = drm_ioctl_shim,
109 .fo_fcntl = fnullop_fcntl,
110 .fo_poll = drm_poll,
111 .fo_stat = drm_stat,
112 .fo_close = drm_close,
113 .fo_kqfilter = drm_kqfilter,
114 .fo_restart = fnullop_restart,
115 .fo_mmap = drm_fop_mmap,
116 };
117
118 static int
119 drm_open(dev_t d, int flags, int fmt, struct lwp *l)
120 {
121 struct drm_minor *dminor;
122 struct drm_device *dev;
123 bool lastclose;
124 int fd;
125 struct file *fp;
126 struct drm_file *priv;
127 int need_setup = 0;
128 int error;
129
130 error = drm_guarantee_initialized();
131 if (error)
132 goto fail0;
133
134 /* Synchronize with drm_file.c, drm_open and drm_open_helper. */
135
136 if (flags & O_EXCL) {
137 error = EBUSY;
138 goto fail0;
139 }
140
141 dminor = drm_minor_acquire(minor(d));
142 if (IS_ERR(dminor)) {
143 /* XXX errno Linux->NetBSD */
144 error = -PTR_ERR(dminor);
145 goto fail0;
146 }
147 dev = dminor->dev;
148 if (dev->switch_power_state != DRM_SWITCH_POWER_ON) {
149 error = EINVAL;
150 goto fail1;
151 }
152
153 mutex_lock(&drm_global_mutex);
154 if (dev->open_count == INT_MAX) {
155 mutex_unlock(&drm_global_mutex);
156 error = EBUSY;
157 goto fail1;
158 }
159 if (dev->open_count++ == 0)
160 need_setup = 1;
161 mutex_unlock(&drm_global_mutex);
162
163 error = fd_allocfile(&fp, &fd);
164 if (error)
165 goto fail2;
166
167 priv = drm_file_alloc(dminor);
168 if (IS_ERR(priv)) {
169 /* XXX errno Linux->NetBSD */
170 error = -PTR_ERR(priv);
171 goto fail3;
172 }
173
174 if (drm_is_primary_client(priv)) {
175 /* XXX errno Linux->NetBSD */
176 error = -drm_master_open(priv);
177 if (error)
178 goto fail4;
179 }
180
181 mutex_lock(&dev->filelist_mutex);
182 list_add(&priv->lhead, &dev->filelist);
183 mutex_unlock(&dev->filelist_mutex);
184 /* XXX Alpha hose? */
185
186 if (need_setup) {
187 /* XXX errno Linux->NetBSD */
188 error = -drm_legacy_setup(dev);
189 if (error)
190 goto fail5;
191 }
192
193 error = fd_clone(fp, fd, flags, &drm_fileops, priv);
194 KASSERT(error == EMOVEFD); /* XXX */
195
196 /* Success! (But error has to be EMOVEFD, not 0.) */
197 return error;
198
199 fail5: mutex_lock(&dev->filelist_mutex);
200 list_del(&priv->lhead);
201 mutex_unlock(&dev->filelist_mutex);
202 fail4: drm_file_free(priv);
203 fail3: fd_abort(curproc, fp, fd);
204 fail2: mutex_lock(&drm_global_mutex);
205 KASSERT(0 < dev->open_count);
206 --dev->open_count;
207 lastclose = (dev->open_count == 0);
208 mutex_unlock(&drm_global_mutex);
209 if (lastclose)
210 drm_lastclose(dev);
211 fail1: drm_minor_release(dminor);
212 fail0: KASSERT(error);
213 if (error == ERESTARTSYS)
214 error = ERESTART;
215 return error;
216 }
217
218 static int
219 drm_close(struct file *fp)
220 {
221 struct drm_file *const priv = fp->f_data;
222 struct drm_minor *const dminor = priv->minor;
223 struct drm_device *const dev = dminor->dev;
224 bool lastclose;
225
226 /* Synchronize with drm_file.c, drm_release. */
227
228 mutex_lock(&dev->filelist_mutex);
229 list_del(&priv->lhead);
230 mutex_unlock(&dev->filelist_mutex);
231
232 drm_file_free(priv);
233
234 mutex_lock(&drm_global_mutex);
235 KASSERT(0 < dev->open_count);
236 --dev->open_count;
237 lastclose = (dev->open_count == 0);
238 mutex_unlock(&drm_global_mutex);
239
240 if (lastclose)
241 drm_lastclose(dev);
242
243 drm_minor_release(dminor);
244
245 return 0;
246 }
247
248 static int
249 drm_read(struct file *fp, off_t *off, struct uio *uio, kauth_cred_t cred,
250 int flags)
251 {
252 struct drm_file *const file = fp->f_data;
253 struct drm_device *const dev = file->minor->dev;
254 struct drm_pending_event *event;
255 bool first;
256 int ret = 0;
257
258 /*
259 * Only one event reader at a time, so that if copyout faults
260 * after dequeueing one event and we have to put the event
261 * back, another reader won't see out-of-order events.
262 */
263 spin_lock(&dev->event_lock);
264 DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &file->event_read_wq, &dev->event_lock,
265 file->event_read_lock == NULL);
266 if (ret) {
267 spin_unlock(&dev->event_lock);
268 /* XXX errno Linux->NetBSD */
269 return -ret;
270 }
271 file->event_read_lock = curlwp;
272 spin_unlock(&dev->event_lock);
273
274 for (first = true; ; first = false) {
275 int f = 0;
276 off_t offset;
277 size_t resid;
278
279 if (!first || ISSET(fp->f_flag, FNONBLOCK))
280 f |= FNONBLOCK;
281
282 ret = drm_dequeue_event(file, uio->uio_resid, &event, f);
283 if (ret) {
284 if ((ret == -EWOULDBLOCK) && !first)
285 ret = 0;
286 break;
287 }
288 if (event == NULL)
289 break;
290
291 offset = uio->uio_offset;
292 resid = uio->uio_resid;
293 /* XXX errno NetBSD->Linux */
294 ret = -uiomove(event->event, event->event->length, uio);
295 if (ret) {
296 /*
297 * Faulted on copyout. Put the event back and
298 * stop here.
299 */
300 if (!first) {
301 /*
302 * Already transferred some events.
303 * Rather than back them all out, just
304 * say we succeeded at returning those.
305 */
306 ret = 0;
307 }
308 uio->uio_offset = offset;
309 uio->uio_resid = resid;
310 drm_requeue_event(file, event);
311 break;
312 }
313 kfree(event);
314 }
315
316 /* Release the event read lock. */
317 spin_lock(&dev->event_lock);
318 KASSERT(file->event_read_lock == curlwp);
319 file->event_read_lock = NULL;
320 DRM_SPIN_WAKEUP_ONE(&file->event_read_wq, &dev->event_lock);
321 spin_unlock(&dev->event_lock);
322
323 /* XXX errno Linux->NetBSD */
324
325 /* Success! */
326 if (ret == ERESTARTSYS)
327 ret = ERESTART;
328 return -ret;
329 }
330
331 static int
332 drm_dequeue_event(struct drm_file *file, size_t max_length,
333 struct drm_pending_event **eventp, int flags)
334 {
335 struct drm_device *const dev = file->minor->dev;
336 struct drm_pending_event *event = NULL;
337 unsigned long irqflags;
338 int ret = 0;
339
340 spin_lock_irqsave(&dev->event_lock, irqflags);
341
342 if (ISSET(flags, FNONBLOCK)) {
343 if (list_empty(&file->event_list))
344 ret = -EWOULDBLOCK;
345 } else {
346 DRM_SPIN_WAIT_UNTIL(ret, &file->event_wait, &dev->event_lock,
347 !list_empty(&file->event_list));
348 }
349 if (ret)
350 goto out;
351
352 event = list_first_entry(&file->event_list, struct drm_pending_event,
353 link);
354 if (event->event->length > max_length) {
355 /* Event is too large, can't return it. */
356 event = NULL;
357 ret = 0;
358 goto out;
359 }
360
361 file->event_space += event->event->length;
362 list_del(&event->link);
363
364 out: spin_unlock_irqrestore(&dev->event_lock, irqflags);
365 *eventp = event;
366 return ret;
367 }
368
369 static void
370 drm_requeue_event(struct drm_file *file, struct drm_pending_event *event)
371 {
372 struct drm_device *const dev = file->minor->dev;
373 unsigned long irqflags;
374
375 spin_lock_irqsave(&dev->event_lock, irqflags);
376 list_add(&event->link, &file->event_list);
377 KASSERT(file->event_space >= event->event->length);
378 file->event_space -= event->event->length;
379 spin_unlock_irqrestore(&dev->event_lock, irqflags);
380 }
381
382 static int
383 drm_ioctl_shim(struct file *fp, unsigned long cmd, void *data)
384 {
385 struct drm_file *file = fp->f_data;
386 struct drm_driver *driver = file->minor->dev->driver;
387 int error;
388
389 if (driver->ioctl_override)
390 error = driver->ioctl_override(fp, cmd, data);
391 else
392 error = drm_ioctl(fp, cmd, data);
393 if (error == ERESTARTSYS)
394 error = ERESTART;
395
396 return error;
397 }
398
399 static int
400 drm_poll(struct file *fp __unused, int events __unused)
401 {
402 struct drm_file *const file = fp->f_data;
403 struct drm_device *const dev = file->minor->dev;
404 int revents = 0;
405 unsigned long irqflags;
406
407 if (!ISSET(events, (POLLIN | POLLRDNORM)))
408 return 0;
409
410 spin_lock_irqsave(&dev->event_lock, irqflags);
411 if (list_empty(&file->event_list))
412 selrecord(curlwp, &file->event_selq);
413 else
414 revents |= (events & (POLLIN | POLLRDNORM));
415 spin_unlock_irqrestore(&dev->event_lock, irqflags);
416
417 return revents;
418 }
419
420 static void filt_drm_detach(struct knote *);
421 static int filt_drm_event(struct knote *, long);
422
423 static const struct filterops drm_filtops = {
424 .f_flags = FILTEROP_ISFD,
425 .f_attach = NULL,
426 .f_detach = filt_drm_detach,
427 .f_event = filt_drm_event,
428 };
429
430 static int
431 drm_kqfilter(struct file *fp, struct knote *kn)
432 {
433 struct drm_file *const file = fp->f_data;
434 struct drm_device *const dev = file->minor->dev;
435 unsigned long irqflags;
436
437 switch (kn->kn_filter) {
438 case EVFILT_READ:
439 kn->kn_fop = &drm_filtops;
440 kn->kn_hook = file;
441 spin_lock_irqsave(&dev->event_lock, irqflags);
442 selrecord_knote(&file->event_selq, kn);
443 spin_unlock_irqrestore(&dev->event_lock, irqflags);
444 return 0;
445 case EVFILT_WRITE:
446 default:
447 return EINVAL;
448 }
449 }
450
451 static void
452 filt_drm_detach(struct knote *kn)
453 {
454 struct drm_file *const file = kn->kn_hook;
455 struct drm_device *const dev = file->minor->dev;
456 unsigned long irqflags;
457
458 spin_lock_irqsave(&dev->event_lock, irqflags);
459 selremove_knote(&file->event_selq, kn);
460 spin_unlock_irqrestore(&dev->event_lock, irqflags);
461 }
462
463 static int
464 filt_drm_event(struct knote *kn, long hint)
465 {
466 struct drm_file *const file = kn->kn_hook;
467 struct drm_device *const dev = file->minor->dev;
468 unsigned long irqflags;
469 int ret;
470
471 if (hint == NOTE_SUBMIT)
472 KASSERT(spin_is_locked(&dev->event_lock));
473 else
474 spin_lock_irqsave(&dev->event_lock, irqflags);
475 if (list_empty(&file->event_list)) {
476 ret = 0;
477 } else {
478 struct drm_pending_event *const event =
479 list_first_entry(&file->event_list,
480 struct drm_pending_event, link);
481 kn->kn_data = event->event->length;
482 ret = 1;
483 }
484 if (hint == NOTE_SUBMIT)
485 KASSERT(spin_is_locked(&dev->event_lock));
486 else
487 spin_unlock_irqrestore(&dev->event_lock, irqflags);
488
489 return ret;
490 }
491
492 static int
493 drm_stat(struct file *fp, struct stat *st)
494 {
495 struct drm_file *const file = fp->f_data;
496 struct drm_minor *const dminor = file->minor;
497 const dev_t devno = makedev(cdevsw_lookup_major(&drm_cdevsw),
498 64*dminor->type + dminor->index);
499
500 (void)memset(st, 0, sizeof(*st));
501
502 st->st_dev = devno;
503 st->st_ino = 0; /* XXX (dev,ino) uniqueness bleh */
504 st->st_uid = kauth_cred_geteuid(fp->f_cred);
505 st->st_gid = kauth_cred_getegid(fp->f_cred);
506 st->st_mode = S_IFCHR; /* XXX what? */
507 st->st_rdev = devno;
508 /* XXX what else? */
509
510 return 0;
511 }
512
513 static int
514 drm_fop_mmap(struct file *fp, off_t *offp, size_t len, int prot, int *flagsp,
515 int *advicep, struct uvm_object **uobjp, int *maxprotp)
516 {
517 struct drm_file *const file = fp->f_data;
518 struct drm_device *const dev = file->minor->dev;
519 int error;
520
521 KASSERT(fp == file->filp);
522 /* XXX errno Linux->NetBSD */
523 error = -(*dev->driver->mmap_object)(dev, *offp, len, prot, uobjp,
524 offp, file->filp);
525 *maxprotp = prot;
526 *advicep = UVM_ADV_RANDOM;
527 if (error == ERESTARTSYS)
528 error = ERESTART;
529 return error;
530 }
531
532 static paddr_t
533 drm_legacy_mmap(dev_t d, off_t offset, int prot)
534 {
535 struct drm_minor *dminor;
536 paddr_t paddr;
537
538 dminor = drm_minor_acquire(minor(d));
539 if (IS_ERR(dminor))
540 return (paddr_t)-1;
541
542 paddr = drm_legacy_mmap_paddr(dminor->dev, offset, prot);
543
544 drm_minor_release(dminor);
545 return paddr;
546 }
547