sunxi_drm.c revision 1.25 1 /* $NetBSD: sunxi_drm.c,v 1.25 2022/06/28 05:19:03 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2019 Jared D. McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: sunxi_drm.c,v 1.25 2022/06/28 05:19:03 skrll Exp $");
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/conf.h>
35 #include <sys/device.h>
36 #include <sys/intr.h>
37 #include <sys/kernel.h>
38 #include <sys/systm.h>
39
40 #include <uvm/uvm_device.h>
41 #include <uvm/uvm_extern.h>
42 #include <uvm/uvm_object.h>
43
44 #include <dev/fdt/fdt_port.h>
45 #include <dev/fdt/fdtvar.h>
46
47 #include <arm/sunxi/sunxi_drm.h>
48
49 #include <drm/drm_auth.h>
50 #include <drm/drm_crtc_helper.h>
51 #include <drm/drm_drv.h>
52 #include <drm/drm_fb_helper.h>
53 #include <drm/drm_fourcc.h>
54 #include <drm/drm_vblank.h>
55
56 #define SUNXI_DRM_MAX_WIDTH 3840
57 #define SUNXI_DRM_MAX_HEIGHT 2160
58
59 /*
60 * The DRM headers break trunc_page/round_page macros with a redefinition
61 * of PAGE_MASK. Use our own macros instead.
62 */
63 #define SUNXI_PAGE_MASK (PAGE_SIZE - 1)
64 #define SUNXI_TRUNC_PAGE(x) ((x) & ~SUNXI_PAGE_MASK)
65 #define SUNXI_ROUND_PAGE(x) (((x) + SUNXI_PAGE_MASK) & ~SUNXI_PAGE_MASK)
66
67 static TAILQ_HEAD(, sunxi_drm_endpoint) sunxi_drm_endpoints =
68 TAILQ_HEAD_INITIALIZER(sunxi_drm_endpoints);
69
70 static const struct device_compatible_entry compat_data[] = {
71 { .compat = "allwinner,sun8i-h3-display-engine" },
72 { .compat = "allwinner,sun8i-v3s-display-engine" },
73 { .compat = "allwinner,sun50i-a64-display-engine" },
74 DEVICE_COMPAT_EOL
75 };
76
77 static const char * fb_compatible[] = {
78 "allwinner,simple-framebuffer",
79 NULL
80 };
81
82 static int sunxi_drm_match(device_t, cfdata_t, void *);
83 static void sunxi_drm_attach(device_t, device_t, void *);
84
85 static void sunxi_drm_init(device_t);
86 static vmem_t *sunxi_drm_alloc_cma_pool(struct drm_device *, size_t);
87
88 static uint32_t sunxi_drm_get_vblank_counter(struct drm_device *, unsigned int);
89 static int sunxi_drm_enable_vblank(struct drm_device *, unsigned int);
90 static void sunxi_drm_disable_vblank(struct drm_device *, unsigned int);
91
92 static int sunxi_drm_load(struct drm_device *, unsigned long);
93 static void sunxi_drm_unload(struct drm_device *);
94
95 static void sunxi_drm_task_work(struct work *, void *);
96
97 static struct drm_driver sunxi_drm_driver = {
98 .driver_features = DRIVER_MODESET | DRIVER_GEM,
99 .dev_priv_size = 0,
100 .load = sunxi_drm_load,
101 .unload = sunxi_drm_unload,
102
103 .gem_free_object = drm_gem_cma_free_object,
104 .mmap_object = drm_gem_or_legacy_mmap_object,
105 .gem_uvm_ops = &drm_gem_cma_uvm_ops,
106
107 .dumb_create = drm_gem_cma_dumb_create,
108 .dumb_destroy = drm_gem_dumb_destroy,
109
110 .get_vblank_counter = sunxi_drm_get_vblank_counter,
111 .enable_vblank = sunxi_drm_enable_vblank,
112 .disable_vblank = sunxi_drm_disable_vblank,
113
114 .name = DRIVER_NAME,
115 .desc = DRIVER_DESC,
116 .date = DRIVER_DATE,
117 .major = DRIVER_MAJOR,
118 .minor = DRIVER_MINOR,
119 .patchlevel = DRIVER_PATCHLEVEL,
120 };
121
122 CFATTACH_DECL_NEW(sunxi_drm, sizeof(struct sunxi_drm_softc),
123 sunxi_drm_match, sunxi_drm_attach, NULL, NULL);
124
125 static int
126 sunxi_drm_match(device_t parent, cfdata_t cf, void *aux)
127 {
128 struct fdt_attach_args * const faa = aux;
129
130 return of_compatible_match(faa->faa_phandle, compat_data);
131 }
132
133 static void
134 sunxi_drm_attach(device_t parent, device_t self, void *aux)
135 {
136 struct sunxi_drm_softc * const sc = device_private(self);
137 struct fdt_attach_args * const faa = aux;
138 struct drm_driver * const driver = &sunxi_drm_driver;
139 prop_dictionary_t dict = device_properties(self);
140 bool is_disabled;
141
142 aprint_naive("\n");
143
144 if (prop_dictionary_get_bool(dict, "disabled", &is_disabled) &&
145 is_disabled) {
146 aprint_normal(": Display Engine Pipeline (disabled)\n");
147 return;
148 }
149
150 aprint_normal(": Display Engine Pipeline\n");
151
152 sc->sc_dev = self;
153 sc->sc_dmat = faa->faa_dmat;
154 sc->sc_bst = faa->faa_bst;
155 sc->sc_phandle = faa->faa_phandle;
156 sc->sc_task_thread = NULL;
157 SIMPLEQ_INIT(&sc->sc_tasks);
158 if (workqueue_create(&sc->sc_task_wq, "sunxidrm",
159 &sunxi_drm_task_work, NULL, PRI_NONE, IPL_NONE, WQ_MPSAFE)) {
160 aprint_error_dev(self, "unable to create workqueue\n");
161 sc->sc_task_wq = NULL;
162 return;
163 }
164
165 sc->sc_ddev = drm_dev_alloc(driver, sc->sc_dev);
166 if (IS_ERR(sc->sc_ddev)) {
167 aprint_error_dev(self, "couldn't allocate DRM device\n");
168 return;
169 }
170 sc->sc_ddev->dev_private = sc;
171 sc->sc_ddev->bst = sc->sc_bst;
172 sc->sc_ddev->bus_dmat = sc->sc_dmat;
173 sc->sc_ddev->dmat = sc->sc_ddev->bus_dmat;
174 sc->sc_ddev->dmat_subregion_p = false;
175
176 fdt_remove_bycompat(fb_compatible);
177
178 config_defer(self, sunxi_drm_init);
179 }
180
181 static void
182 sunxi_drm_init(device_t dev)
183 {
184 struct sunxi_drm_softc * const sc = device_private(dev);
185 struct drm_driver * const driver = &sunxi_drm_driver;
186 int error;
187
188 /*
189 * Cause any tasks issued synchronously during attach to be
190 * processed at the end of this function.
191 */
192 sc->sc_task_thread = curlwp;
193
194 error = -drm_dev_register(sc->sc_ddev, 0);
195 if (error) {
196 aprint_error_dev(dev, "couldn't register DRM device: %d\n",
197 error);
198 goto out;
199 }
200 sc->sc_dev_registered = true;
201
202 aprint_normal_dev(dev, "initialized %s %d.%d.%d %s on minor %d\n",
203 driver->name, driver->major, driver->minor, driver->patchlevel,
204 driver->date, sc->sc_ddev->primary->index);
205
206 /*
207 * Process asynchronous tasks queued synchronously during
208 * attach. This will be for display detection to attach a
209 * framebuffer, so we have the opportunity for a console device
210 * to attach before autoconf has completed, in time for init(8)
211 * to find that console without panicking.
212 */
213 while (!SIMPLEQ_EMPTY(&sc->sc_tasks)) {
214 struct sunxi_drm_task *const task =
215 SIMPLEQ_FIRST(&sc->sc_tasks);
216
217 SIMPLEQ_REMOVE_HEAD(&sc->sc_tasks, sdt_u.queue);
218 (*task->sdt_fn)(task);
219 }
220
221 out: /* Cause any subsequent tasks to be processed by the workqueue. */
222 atomic_store_relaxed(&sc->sc_task_thread, NULL);
223 }
224
225 static vmem_t *
226 sunxi_drm_alloc_cma_pool(struct drm_device *ddev, size_t cma_size)
227 {
228 struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
229 bus_dma_segment_t segs[1];
230 int nsegs;
231 int error;
232
233 error = bus_dmamem_alloc(sc->sc_dmat, cma_size, PAGE_SIZE, 0,
234 segs, 1, &nsegs, BUS_DMA_NOWAIT);
235 if (error) {
236 aprint_error_dev(sc->sc_dev, "couldn't allocate CMA pool\n");
237 return NULL;
238 }
239
240 return vmem_create("sunxidrm", segs[0].ds_addr, segs[0].ds_len,
241 PAGE_SIZE, NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
242 }
243
244 static int
245 sunxi_drm_fb_create_handle(struct drm_framebuffer *fb,
246 struct drm_file *file, unsigned int *handle)
247 {
248 struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(fb);
249
250 return drm_gem_handle_create(file, &sfb->obj->base, handle);
251 }
252
253 static void
254 sunxi_drm_fb_destroy(struct drm_framebuffer *fb)
255 {
256 struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(fb);
257
258 drm_framebuffer_cleanup(fb);
259 drm_gem_object_put_unlocked(&sfb->obj->base);
260 kmem_free(sfb, sizeof(*sfb));
261 }
262
263 static const struct drm_framebuffer_funcs sunxi_drm_framebuffer_funcs = {
264 .create_handle = sunxi_drm_fb_create_handle,
265 .destroy = sunxi_drm_fb_destroy,
266 };
267
268 static struct drm_framebuffer *
269 sunxi_drm_fb_create(struct drm_device *ddev, struct drm_file *file,
270 const struct drm_mode_fb_cmd2 *cmd)
271 {
272 struct sunxi_drm_framebuffer *fb;
273 struct drm_gem_object *gem_obj;
274 int error;
275
276 if (cmd->flags)
277 return NULL;
278
279 gem_obj = drm_gem_object_lookup(file, cmd->handles[0]);
280 if (gem_obj == NULL)
281 return NULL;
282
283 fb = kmem_zalloc(sizeof(*fb), KM_SLEEP);
284 fb->obj = to_drm_gem_cma_obj(gem_obj);
285 drm_helper_mode_fill_fb_struct(ddev, &fb->base, cmd);
286
287 error = drm_framebuffer_init(ddev, &fb->base, &sunxi_drm_framebuffer_funcs);
288 if (error != 0)
289 goto dealloc;
290
291 return &fb->base;
292
293 dealloc:
294 drm_framebuffer_cleanup(&fb->base);
295 kmem_free(fb, sizeof(*fb));
296 drm_gem_object_put_unlocked(gem_obj);
297
298 return NULL;
299 }
300
301 static struct drm_mode_config_funcs sunxi_drm_mode_config_funcs = {
302 .fb_create = sunxi_drm_fb_create,
303 };
304
305 static int
306 sunxi_drm_simplefb_lookup(bus_addr_t *paddr, bus_size_t *psize)
307 {
308 static const struct device_compatible_entry simplefb_compat[] = {
309 { .compat = "simple-framebuffer" },
310 DEVICE_COMPAT_EOL
311 };
312 int chosen, child, error;
313 bus_addr_t addr_end;
314
315 chosen = OF_finddevice("/chosen");
316 if (chosen == -1)
317 return ENOENT;
318
319 for (child = OF_child(chosen); child; child = OF_peer(child)) {
320 if (!fdtbus_status_okay(child))
321 continue;
322 if (!of_compatible_match(child, simplefb_compat))
323 continue;
324 error = fdtbus_get_reg(child, 0, paddr, psize);
325 if (error != 0)
326 return error;
327
328 /* Reclaim entire pages used by the simplefb */
329 addr_end = *paddr + *psize;
330 *paddr = SUNXI_TRUNC_PAGE(*paddr);
331 *psize = SUNXI_ROUND_PAGE(addr_end - *paddr);
332 return 0;
333 }
334
335 return ENOENT;
336 }
337
338 static int
339 sunxi_drm_fb_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes)
340 {
341 struct sunxi_drm_softc * const sc = sunxi_drm_private(helper->dev);
342 struct drm_device *ddev = helper->dev;
343 struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(helper->fb);
344 struct drm_framebuffer *fb = helper->fb;
345 struct sunxi_drmfb_attach_args sfa;
346 bus_addr_t sfb_addr;
347 bus_size_t sfb_size;
348 size_t cma_size;
349 int error;
350
351 const u_int width = sizes->surface_width;
352 const u_int height = sizes->surface_height;
353 const u_int pitch = width * (32 / 8);
354
355 const size_t size = roundup(height * pitch, PAGE_SIZE);
356
357 if (sunxi_drm_simplefb_lookup(&sfb_addr, &sfb_size) != 0)
358 sfb_size = 0;
359
360 /* Reserve enough memory for a 4K plane, rounded to 1MB */
361 cma_size = (SUNXI_DRM_MAX_WIDTH * SUNXI_DRM_MAX_HEIGHT * 4);
362 if (sfb_size == 0) {
363 /* Add memory for FB console if we cannot reclaim bootloader memory */
364 cma_size += size;
365 }
366 cma_size = roundup(cma_size, 1024 * 1024);
367 sc->sc_ddev->cma_pool = sunxi_drm_alloc_cma_pool(sc->sc_ddev, cma_size);
368 if (sc->sc_ddev->cma_pool != NULL) {
369 if (sfb_size != 0) {
370 error = vmem_add(sc->sc_ddev->cma_pool, sfb_addr,
371 sfb_size, VM_SLEEP);
372 if (error != 0)
373 sfb_size = 0;
374 }
375 aprint_normal_dev(sc->sc_dev, "reserved %u MB DRAM for CMA",
376 (u_int)((cma_size + sfb_size) / (1024 * 1024)));
377 if (sfb_size != 0)
378 aprint_normal(" (%u MB reclaimed from bootloader)",
379 (u_int)(sfb_size / (1024 * 1024)));
380 aprint_normal("\n");
381 }
382
383 sfb->obj = drm_gem_cma_create(ddev, size);
384 if (sfb->obj == NULL) {
385 DRM_ERROR("failed to allocate memory for framebuffer\n");
386 return -ENOMEM;
387 }
388
389 fb->pitches[0] = pitch;
390 fb->offsets[0] = 0;
391 fb->width = width;
392 fb->height = height;
393 fb->format = drm_format_info(DRM_FORMAT_XRGB8888);
394 fb->dev = ddev;
395
396 error = drm_framebuffer_init(ddev, fb, &sunxi_drm_framebuffer_funcs);
397 if (error != 0) {
398 DRM_ERROR("failed to initialize framebuffer\n");
399 return error;
400 }
401
402 memset(&sfa, 0, sizeof(sfa));
403 sfa.sfa_drm_dev = ddev;
404 sfa.sfa_fb_helper = helper;
405 sfa.sfa_fb_sizes = *sizes;
406 sfa.sfa_fb_bst = sc->sc_bst;
407 sfa.sfa_fb_dmat = sc->sc_dmat;
408 sfa.sfa_fb_linebytes = helper->fb->pitches[0];
409
410 helper->fbdev = config_found(ddev->dev, &sfa, NULL,
411 CFARGS(.iattr = "sunxifbbus"));
412 if (helper->fbdev == NULL) {
413 DRM_ERROR("unable to attach framebuffer\n");
414 return -ENXIO;
415 }
416
417 return 0;
418 }
419
420 static struct drm_fb_helper_funcs sunxi_drm_fb_helper_funcs = {
421 .fb_probe = sunxi_drm_fb_probe,
422 };
423
424 static int
425 sunxi_drm_load(struct drm_device *ddev, unsigned long flags)
426 {
427 struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
428 struct sunxi_drm_endpoint *sep;
429 struct sunxi_drm_fbdev *fbdev;
430 const u_int *data;
431 int datalen, error, num_crtc;
432
433 drm_mode_config_init(ddev);
434 ddev->mode_config.min_width = 0;
435 ddev->mode_config.min_height = 0;
436 ddev->mode_config.max_width = SUNXI_DRM_MAX_WIDTH;
437 ddev->mode_config.max_height = SUNXI_DRM_MAX_HEIGHT;
438 ddev->mode_config.funcs = &sunxi_drm_mode_config_funcs;
439
440 num_crtc = 0;
441 data = fdtbus_get_prop(sc->sc_phandle, "allwinner,pipelines", &datalen);
442 while (datalen >= 4) {
443 const int crtc_phandle = fdtbus_get_phandle_from_native(be32dec(data));
444
445 TAILQ_FOREACH(sep, &sunxi_drm_endpoints, entries)
446 if (sep->phandle == crtc_phandle && sep->ddev == NULL) {
447 sep->ddev = ddev;
448 error = fdt_endpoint_activate_direct(sep->ep, true);
449 if (error != 0) {
450 aprint_error_dev(sc->sc_dev, "failed to activate endpoint: %d\n",
451 error);
452 }
453 if (fdt_endpoint_type(sep->ep) == EP_DRM_CRTC)
454 num_crtc++;
455 }
456
457 datalen -= 4;
458 data++;
459 }
460
461 if (num_crtc == 0) {
462 aprint_error_dev(sc->sc_dev, "no pipelines configured\n");
463 error = ENXIO;
464 goto drmerr;
465 }
466
467 fbdev = kmem_zalloc(sizeof(*fbdev), KM_SLEEP);
468
469 drm_fb_helper_prepare(ddev, &fbdev->helper, &sunxi_drm_fb_helper_funcs);
470
471 error = drm_fb_helper_init(ddev, &fbdev->helper, num_crtc);
472 if (error)
473 goto allocerr;
474
475 fbdev->helper.fb = kmem_zalloc(sizeof(struct sunxi_drm_framebuffer), KM_SLEEP);
476
477 drm_fb_helper_single_add_all_connectors(&fbdev->helper);
478
479 drm_helper_disable_unused_functions(ddev);
480
481 drm_fb_helper_initial_config(&fbdev->helper, 32);
482
483 /* XXX */
484 ddev->irq_enabled = true;
485 drm_vblank_init(ddev, num_crtc);
486
487 return 0;
488
489 allocerr:
490 kmem_free(fbdev, sizeof(*fbdev));
491 drmerr:
492 drm_mode_config_cleanup(ddev);
493
494 return error;
495 }
496
497 static uint32_t
498 sunxi_drm_get_vblank_counter(struct drm_device *ddev, unsigned int crtc)
499 {
500 struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
501
502 if (crtc >= __arraycount(sc->sc_vbl))
503 return 0;
504
505 if (sc->sc_vbl[crtc].get_vblank_counter == NULL)
506 return 0;
507
508 return sc->sc_vbl[crtc].get_vblank_counter(sc->sc_vbl[crtc].priv);
509 }
510
511 static int
512 sunxi_drm_enable_vblank(struct drm_device *ddev, unsigned int crtc)
513 {
514 struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
515
516 if (crtc >= __arraycount(sc->sc_vbl))
517 return 0;
518
519 if (sc->sc_vbl[crtc].enable_vblank == NULL)
520 return 0;
521
522 sc->sc_vbl[crtc].enable_vblank(sc->sc_vbl[crtc].priv);
523
524 return 0;
525 }
526
527 static void
528 sunxi_drm_disable_vblank(struct drm_device *ddev, unsigned int crtc)
529 {
530 struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
531
532 if (crtc >= __arraycount(sc->sc_vbl))
533 return;
534
535 if (sc->sc_vbl[crtc].disable_vblank == NULL)
536 return;
537
538 sc->sc_vbl[crtc].disable_vblank(sc->sc_vbl[crtc].priv);
539 }
540
541 static void
542 sunxi_drm_unload(struct drm_device *ddev)
543 {
544 drm_mode_config_cleanup(ddev);
545 }
546
547 int
548 sunxi_drm_register_endpoint(int phandle, struct fdt_endpoint *ep)
549 {
550 struct sunxi_drm_endpoint *sep;
551
552 sep = kmem_zalloc(sizeof(*sep), KM_SLEEP);
553 sep->phandle = phandle;
554 sep->ep = ep;
555 sep->ddev = NULL;
556 TAILQ_INSERT_TAIL(&sunxi_drm_endpoints, sep, entries);
557
558 return 0;
559 }
560
561 struct drm_device *
562 sunxi_drm_endpoint_device(struct fdt_endpoint *ep)
563 {
564 struct sunxi_drm_endpoint *sep;
565
566 TAILQ_FOREACH(sep, &sunxi_drm_endpoints, entries)
567 if (sep->ep == ep)
568 return sep->ddev;
569
570 return NULL;
571 }
572
573 static void
574 sunxi_drm_task_work(struct work *work, void *cookie)
575 {
576 struct sunxi_drm_task *task = container_of(work, struct sunxi_drm_task,
577 sdt_u.work);
578
579 (*task->sdt_fn)(task);
580 }
581
582 void
583 sunxi_task_init(struct sunxi_drm_task *task,
584 void (*fn)(struct sunxi_drm_task *))
585 {
586
587 task->sdt_fn = fn;
588 }
589
590 void
591 sunxi_task_schedule(device_t self, struct sunxi_drm_task *task)
592 {
593 struct sunxi_drm_softc *sc = device_private(self);
594
595 if (atomic_load_relaxed(&sc->sc_task_thread) == curlwp)
596 SIMPLEQ_INSERT_TAIL(&sc->sc_tasks, task, sdt_u.queue);
597 else
598 workqueue_enqueue(sc->sc_task_wq, &task->sdt_u.work, NULL);
599 }
600