sunxi_drm.c revision 1.19 1 /* $NetBSD: sunxi_drm.c,v 1.19 2021/12/19 11:25:09 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2019 Jared D. McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: sunxi_drm.c,v 1.19 2021/12/19 11:25:09 riastradh Exp $");
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/conf.h>
35 #include <sys/device.h>
36 #include <sys/intr.h>
37 #include <sys/kernel.h>
38 #include <sys/systm.h>
39
40 #include <uvm/uvm_device.h>
41 #include <uvm/uvm_extern.h>
42 #include <uvm/uvm_object.h>
43
44 #include <dev/fdt/fdt_port.h>
45 #include <dev/fdt/fdtvar.h>
46
47 #include <arm/sunxi/sunxi_drm.h>
48
49 #include <drm/drm_auth.h>
50 #include <drm/drm_crtc_helper.h>
51 #include <drm/drm_drv.h>
52 #include <drm/drm_fb_helper.h>
53 #include <drm/drm_fourcc.h>
54 #include <drm/drm_vblank.h>
55
56 #define SUNXI_DRM_MAX_WIDTH 3840
57 #define SUNXI_DRM_MAX_HEIGHT 2160
58
59 /*
60 * The DRM headers break trunc_page/round_page macros with a redefinition
61 * of PAGE_MASK. Use our own macros instead.
62 */
63 #define SUNXI_PAGE_MASK (PAGE_SIZE - 1)
64 #define SUNXI_TRUNC_PAGE(x) ((x) & ~SUNXI_PAGE_MASK)
65 #define SUNXI_ROUND_PAGE(x) (((x) + SUNXI_PAGE_MASK) & ~SUNXI_PAGE_MASK)
66
67 static TAILQ_HEAD(, sunxi_drm_endpoint) sunxi_drm_endpoints =
68 TAILQ_HEAD_INITIALIZER(sunxi_drm_endpoints);
69
70 static const struct device_compatible_entry compat_data[] = {
71 { .compat = "allwinner,sun8i-h3-display-engine" },
72 { .compat = "allwinner,sun50i-a64-display-engine" },
73 DEVICE_COMPAT_EOL
74 };
75
76 static const char * fb_compatible[] = {
77 "allwinner,simple-framebuffer",
78 NULL
79 };
80
81 static int sunxi_drm_match(device_t, cfdata_t, void *);
82 static void sunxi_drm_attach(device_t, device_t, void *);
83
84 static void sunxi_drm_init(device_t);
85 static vmem_t *sunxi_drm_alloc_cma_pool(struct drm_device *, size_t);
86
87 static int sunxi_drm_set_busid(struct drm_device *, struct drm_master *);
88
89 static uint32_t sunxi_drm_get_vblank_counter(struct drm_device *, unsigned int);
90 static int sunxi_drm_enable_vblank(struct drm_device *, unsigned int);
91 static void sunxi_drm_disable_vblank(struct drm_device *, unsigned int);
92
93 static int sunxi_drm_load(struct drm_device *, unsigned long);
94 static void sunxi_drm_unload(struct drm_device *);
95
96 static struct drm_driver sunxi_drm_driver = {
97 .driver_features = DRIVER_MODESET | DRIVER_GEM,
98 .dev_priv_size = 0,
99 .load = sunxi_drm_load,
100 .unload = sunxi_drm_unload,
101
102 .gem_free_object = drm_gem_cma_free_object,
103 .mmap_object = drm_gem_or_legacy_mmap_object,
104 .gem_uvm_ops = &drm_gem_cma_uvm_ops,
105
106 .dumb_create = drm_gem_cma_dumb_create,
107 .dumb_destroy = drm_gem_dumb_destroy,
108
109 .get_vblank_counter = sunxi_drm_get_vblank_counter,
110 .enable_vblank = sunxi_drm_enable_vblank,
111 .disable_vblank = sunxi_drm_disable_vblank,
112
113 .name = DRIVER_NAME,
114 .desc = DRIVER_DESC,
115 .date = DRIVER_DATE,
116 .major = DRIVER_MAJOR,
117 .minor = DRIVER_MINOR,
118 .patchlevel = DRIVER_PATCHLEVEL,
119
120 .set_busid = sunxi_drm_set_busid,
121 };
122
123 CFATTACH_DECL_NEW(sunxi_drm, sizeof(struct sunxi_drm_softc),
124 sunxi_drm_match, sunxi_drm_attach, NULL, NULL);
125
126 static int
127 sunxi_drm_match(device_t parent, cfdata_t cf, void *aux)
128 {
129 struct fdt_attach_args * const faa = aux;
130
131 return of_compatible_match(faa->faa_phandle, compat_data);
132 }
133
134 static void
135 sunxi_drm_attach(device_t parent, device_t self, void *aux)
136 {
137 struct sunxi_drm_softc * const sc = device_private(self);
138 struct fdt_attach_args * const faa = aux;
139 struct drm_driver * const driver = &sunxi_drm_driver;
140 prop_dictionary_t dict = device_properties(self);
141 bool is_disabled;
142
143 sc->sc_dev = self;
144 sc->sc_dmat = faa->faa_dmat;
145 sc->sc_bst = faa->faa_bst;
146 sc->sc_phandle = faa->faa_phandle;
147
148 aprint_naive("\n");
149
150 if (prop_dictionary_get_bool(dict, "disabled", &is_disabled) && is_disabled) {
151 aprint_normal(": Display Engine Pipeline (disabled)\n");
152 return;
153 }
154
155 aprint_normal(": Display Engine Pipeline\n");
156
157 sc->sc_ddev = drm_dev_alloc(driver, sc->sc_dev);
158 if (IS_ERR(sc->sc_ddev)) {
159 aprint_error_dev(self, "couldn't allocate DRM device\n");
160 return;
161 }
162 sc->sc_ddev->dev_private = sc;
163 sc->sc_ddev->bst = sc->sc_bst;
164 sc->sc_ddev->bus_dmat = sc->sc_dmat;
165 sc->sc_ddev->dmat = sc->sc_ddev->bus_dmat;
166 sc->sc_ddev->dmat_subregion_p = false;
167
168 fdt_remove_bycompat(fb_compatible);
169
170 config_defer(self, sunxi_drm_init);
171 }
172
173 static void
174 sunxi_drm_init(device_t dev)
175 {
176 struct sunxi_drm_softc * const sc = device_private(dev);
177 struct drm_driver * const driver = &sunxi_drm_driver;
178 int error;
179
180 error = -drm_dev_register(sc->sc_ddev, 0);
181 if (error) {
182 aprint_error_dev(dev, "couldn't register DRM device: %d\n",
183 error);
184 return;
185 }
186
187 aprint_normal_dev(dev, "initialized %s %d.%d.%d %s on minor %d\n",
188 driver->name, driver->major, driver->minor, driver->patchlevel,
189 driver->date, sc->sc_ddev->primary->index);
190 }
191
192 static vmem_t *
193 sunxi_drm_alloc_cma_pool(struct drm_device *ddev, size_t cma_size)
194 {
195 struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
196 bus_dma_segment_t segs[1];
197 int nsegs;
198 int error;
199
200 error = bus_dmamem_alloc(sc->sc_dmat, cma_size, PAGE_SIZE, 0,
201 segs, 1, &nsegs, BUS_DMA_NOWAIT);
202 if (error) {
203 aprint_error_dev(sc->sc_dev, "couldn't allocate CMA pool\n");
204 return NULL;
205 }
206
207 return vmem_create("sunxidrm", segs[0].ds_addr, segs[0].ds_len,
208 PAGE_SIZE, NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
209 }
210
211 static int
212 sunxi_drm_set_busid(struct drm_device *ddev, struct drm_master *master)
213 {
214 struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
215 char id[32];
216
217 snprintf(id, sizeof(id), "platform:sunxi:%u", device_unit(sc->sc_dev));
218
219 master->unique = kzalloc(strlen(id) + 1, GFP_KERNEL);
220 if (master->unique == NULL)
221 return -ENOMEM;
222 strcpy(master->unique, id);
223 master->unique_len = strlen(master->unique);
224
225 return 0;
226 }
227
228 static int
229 sunxi_drm_fb_create_handle(struct drm_framebuffer *fb,
230 struct drm_file *file, unsigned int *handle)
231 {
232 struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(fb);
233
234 return drm_gem_handle_create(file, &sfb->obj->base, handle);
235 }
236
237 static void
238 sunxi_drm_fb_destroy(struct drm_framebuffer *fb)
239 {
240 struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(fb);
241
242 drm_framebuffer_cleanup(fb);
243 drm_gem_object_put_unlocked(&sfb->obj->base);
244 kmem_free(sfb, sizeof(*sfb));
245 }
246
247 static const struct drm_framebuffer_funcs sunxi_drm_framebuffer_funcs = {
248 .create_handle = sunxi_drm_fb_create_handle,
249 .destroy = sunxi_drm_fb_destroy,
250 };
251
252 static struct drm_framebuffer *
253 sunxi_drm_fb_create(struct drm_device *ddev, struct drm_file *file,
254 const struct drm_mode_fb_cmd2 *cmd)
255 {
256 struct sunxi_drm_framebuffer *fb;
257 struct drm_gem_object *gem_obj;
258 int error;
259
260 if (cmd->flags)
261 return NULL;
262
263 gem_obj = drm_gem_object_lookup(file, cmd->handles[0]);
264 if (gem_obj == NULL)
265 return NULL;
266
267 fb = kmem_zalloc(sizeof(*fb), KM_SLEEP);
268 fb->obj = to_drm_gem_cma_obj(gem_obj);
269 fb->base.pitches[0] = cmd->pitches[0];
270 fb->base.pitches[1] = cmd->pitches[1];
271 fb->base.pitches[2] = cmd->pitches[2];
272 fb->base.offsets[0] = cmd->offsets[0];
273 fb->base.offsets[1] = cmd->offsets[2];
274 fb->base.offsets[2] = cmd->offsets[1];
275 fb->base.width = cmd->width;
276 fb->base.height = cmd->height;
277 fb->base.format = drm_format_info(cmd->pixel_format);
278
279 error = drm_framebuffer_init(ddev, &fb->base, &sunxi_drm_framebuffer_funcs);
280 if (error != 0)
281 goto dealloc;
282
283 return &fb->base;
284
285 dealloc:
286 drm_framebuffer_cleanup(&fb->base);
287 kmem_free(fb, sizeof(*fb));
288 drm_gem_object_put_unlocked(gem_obj);
289
290 return NULL;
291 }
292
293 static struct drm_mode_config_funcs sunxi_drm_mode_config_funcs = {
294 .fb_create = sunxi_drm_fb_create,
295 };
296
297 static int
298 sunxi_drm_simplefb_lookup(bus_addr_t *paddr, bus_size_t *psize)
299 {
300 static const struct device_compatible_entry simplefb_compat[] = {
301 { .compat = "simple-framebuffer" },
302 DEVICE_COMPAT_EOL
303 };
304 int chosen, child, error;
305 bus_addr_t addr_end;
306
307 chosen = OF_finddevice("/chosen");
308 if (chosen == -1)
309 return ENOENT;
310
311 for (child = OF_child(chosen); child; child = OF_peer(child)) {
312 if (!fdtbus_status_okay(child))
313 continue;
314 if (!of_compatible_match(child, simplefb_compat))
315 continue;
316 error = fdtbus_get_reg(child, 0, paddr, psize);
317 if (error != 0)
318 return error;
319
320 /* Reclaim entire pages used by the simplefb */
321 addr_end = *paddr + *psize;
322 *paddr = SUNXI_TRUNC_PAGE(*paddr);
323 *psize = SUNXI_ROUND_PAGE(addr_end - *paddr);
324 return 0;
325 }
326
327 return ENOENT;
328 }
329
330 static int
331 sunxi_drm_fb_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes)
332 {
333 struct sunxi_drm_softc * const sc = sunxi_drm_private(helper->dev);
334 struct drm_device *ddev = helper->dev;
335 struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(helper->fb);
336 struct drm_framebuffer *fb = helper->fb;
337 struct sunxi_drmfb_attach_args sfa;
338 bus_addr_t sfb_addr;
339 bus_size_t sfb_size;
340 size_t cma_size;
341 int error;
342
343 const u_int width = sizes->surface_width;
344 const u_int height = sizes->surface_height;
345 const u_int pitch = width * (32 / 8);
346
347 const size_t size = roundup(height * pitch, PAGE_SIZE);
348
349 if (sunxi_drm_simplefb_lookup(&sfb_addr, &sfb_size) != 0)
350 sfb_size = 0;
351
352 /* Reserve enough memory for a 4K plane, rounded to 1MB */
353 cma_size = (SUNXI_DRM_MAX_WIDTH * SUNXI_DRM_MAX_HEIGHT * 4);
354 if (sfb_size == 0) {
355 /* Add memory for FB console if we cannot reclaim bootloader memory */
356 cma_size += size;
357 }
358 cma_size = roundup(cma_size, 1024 * 1024);
359 sc->sc_ddev->cma_pool = sunxi_drm_alloc_cma_pool(sc->sc_ddev, cma_size);
360 if (sc->sc_ddev->cma_pool != NULL) {
361 if (sfb_size != 0) {
362 error = vmem_add(sc->sc_ddev->cma_pool, sfb_addr,
363 sfb_size, VM_SLEEP);
364 if (error != 0)
365 sfb_size = 0;
366 }
367 aprint_normal_dev(sc->sc_dev, "reserved %u MB DRAM for CMA",
368 (u_int)((cma_size + sfb_size) / (1024 * 1024)));
369 if (sfb_size != 0)
370 aprint_normal(" (%u MB reclaimed from bootloader)",
371 (u_int)(sfb_size / (1024 * 1024)));
372 aprint_normal("\n");
373 }
374
375 sfb->obj = drm_gem_cma_create(ddev, size);
376 if (sfb->obj == NULL) {
377 DRM_ERROR("failed to allocate memory for framebuffer\n");
378 return -ENOMEM;
379 }
380
381 fb->pitches[0] = pitch;
382 fb->offsets[0] = 0;
383 fb->width = width;
384 fb->height = height;
385 fb->format = drm_format_info(DRM_FORMAT_XRGB8888);
386 fb->dev = ddev;
387
388 error = drm_framebuffer_init(ddev, fb, &sunxi_drm_framebuffer_funcs);
389 if (error != 0) {
390 DRM_ERROR("failed to initialize framebuffer\n");
391 return error;
392 }
393
394 memset(&sfa, 0, sizeof(sfa));
395 sfa.sfa_drm_dev = ddev;
396 sfa.sfa_fb_helper = helper;
397 sfa.sfa_fb_sizes = *sizes;
398 sfa.sfa_fb_bst = sc->sc_bst;
399 sfa.sfa_fb_dmat = sc->sc_dmat;
400 sfa.sfa_fb_linebytes = helper->fb->pitches[0];
401
402 helper->fbdev = config_found(ddev->dev, &sfa, NULL,
403 CFARGS(.iattr = "sunxifbbus"));
404 if (helper->fbdev == NULL) {
405 DRM_ERROR("unable to attach framebuffer\n");
406 return -ENXIO;
407 }
408
409 return 0;
410 }
411
412 static struct drm_fb_helper_funcs sunxi_drm_fb_helper_funcs = {
413 .fb_probe = sunxi_drm_fb_probe,
414 };
415
416 static int
417 sunxi_drm_load(struct drm_device *ddev, unsigned long flags)
418 {
419 struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
420 struct sunxi_drm_endpoint *sep;
421 struct sunxi_drm_fbdev *fbdev;
422 const u_int *data;
423 int datalen, error, num_crtc;
424
425 drm_mode_config_init(ddev);
426 ddev->mode_config.min_width = 0;
427 ddev->mode_config.min_height = 0;
428 ddev->mode_config.max_width = SUNXI_DRM_MAX_WIDTH;
429 ddev->mode_config.max_height = SUNXI_DRM_MAX_HEIGHT;
430 ddev->mode_config.funcs = &sunxi_drm_mode_config_funcs;
431
432 num_crtc = 0;
433 data = fdtbus_get_prop(sc->sc_phandle, "allwinner,pipelines", &datalen);
434 while (datalen >= 4) {
435 const int crtc_phandle = fdtbus_get_phandle_from_native(be32dec(data));
436
437 TAILQ_FOREACH(sep, &sunxi_drm_endpoints, entries)
438 if (sep->phandle == crtc_phandle && sep->ddev == NULL) {
439 sep->ddev = ddev;
440 error = fdt_endpoint_activate_direct(sep->ep, true);
441 if (error != 0) {
442 aprint_error_dev(sc->sc_dev, "failed to activate endpoint: %d\n",
443 error);
444 }
445 if (fdt_endpoint_type(sep->ep) == EP_DRM_CRTC)
446 num_crtc++;
447 }
448
449 datalen -= 4;
450 data++;
451 }
452
453 if (num_crtc == 0) {
454 aprint_error_dev(sc->sc_dev, "no pipelines configured\n");
455 error = ENXIO;
456 goto drmerr;
457 }
458
459 fbdev = kmem_zalloc(sizeof(*fbdev), KM_SLEEP);
460
461 drm_fb_helper_prepare(ddev, &fbdev->helper, &sunxi_drm_fb_helper_funcs);
462
463 error = drm_fb_helper_init(ddev, &fbdev->helper, num_crtc);
464 if (error)
465 goto allocerr;
466
467 fbdev->helper.fb = kmem_zalloc(sizeof(struct sunxi_drm_framebuffer), KM_SLEEP);
468
469 drm_fb_helper_single_add_all_connectors(&fbdev->helper);
470
471 drm_helper_disable_unused_functions(ddev);
472
473 drm_fb_helper_initial_config(&fbdev->helper, 32);
474
475 /* XXX */
476 ddev->irq_enabled = true;
477 drm_vblank_init(ddev, num_crtc);
478
479 return 0;
480
481 allocerr:
482 kmem_free(fbdev, sizeof(*fbdev));
483 drmerr:
484 drm_mode_config_cleanup(ddev);
485
486 return error;
487 }
488
489 static uint32_t
490 sunxi_drm_get_vblank_counter(struct drm_device *ddev, unsigned int crtc)
491 {
492 struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
493
494 if (crtc >= __arraycount(sc->sc_vbl))
495 return 0;
496
497 if (sc->sc_vbl[crtc].get_vblank_counter == NULL)
498 return 0;
499
500 return sc->sc_vbl[crtc].get_vblank_counter(sc->sc_vbl[crtc].priv);
501 }
502
503 static int
504 sunxi_drm_enable_vblank(struct drm_device *ddev, unsigned int crtc)
505 {
506 struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
507
508 if (crtc >= __arraycount(sc->sc_vbl))
509 return 0;
510
511 if (sc->sc_vbl[crtc].enable_vblank == NULL)
512 return 0;
513
514 sc->sc_vbl[crtc].enable_vblank(sc->sc_vbl[crtc].priv);
515
516 return 0;
517 }
518
519 static void
520 sunxi_drm_disable_vblank(struct drm_device *ddev, unsigned int crtc)
521 {
522 struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
523
524 if (crtc >= __arraycount(sc->sc_vbl))
525 return;
526
527 if (sc->sc_vbl[crtc].disable_vblank == NULL)
528 return;
529
530 sc->sc_vbl[crtc].disable_vblank(sc->sc_vbl[crtc].priv);
531 }
532
533 static void
534 sunxi_drm_unload(struct drm_device *ddev)
535 {
536 drm_mode_config_cleanup(ddev);
537 }
538
539 int
540 sunxi_drm_register_endpoint(int phandle, struct fdt_endpoint *ep)
541 {
542 struct sunxi_drm_endpoint *sep;
543
544 sep = kmem_zalloc(sizeof(*sep), KM_SLEEP);
545 sep->phandle = phandle;
546 sep->ep = ep;
547 sep->ddev = NULL;
548 TAILQ_INSERT_TAIL(&sunxi_drm_endpoints, sep, entries);
549
550 return 0;
551 }
552
553 struct drm_device *
554 sunxi_drm_endpoint_device(struct fdt_endpoint *ep)
555 {
556 struct sunxi_drm_endpoint *sep;
557
558 TAILQ_FOREACH(sep, &sunxi_drm_endpoints, entries)
559 if (sep->ep == ep)
560 return sep->ddev;
561
562 return NULL;
563 }
564