rk_drm.c revision 1.11 1 /* $NetBSD: rk_drm.c,v 1.11 2021/12/19 11:25:48 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2019 Jared D. McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: rk_drm.c,v 1.11 2021/12/19 11:25:48 riastradh Exp $");
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/conf.h>
35 #include <sys/device.h>
36 #include <sys/intr.h>
37 #include <sys/kernel.h>
38 #include <sys/systm.h>
39
40 #include <uvm/uvm_device.h>
41 #include <uvm/uvm_extern.h>
42 #include <uvm/uvm_object.h>
43
44 #include <dev/fdt/fdt_port.h>
45 #include <dev/fdt/fdtvar.h>
46
47 #include <arm/rockchip/rk_drm.h>
48
49 #include <drm/drm_auth.h>
50 #include <drm/drm_crtc_helper.h>
51 #include <drm/drm_drv.h>
52 #include <drm/drm_fb_helper.h>
53 #include <drm/drm_fourcc.h>
54 #include <drm/drm_vblank.h>
55
56 #define RK_DRM_MAX_WIDTH 3840
57 #define RK_DRM_MAX_HEIGHT 2160
58
59 static TAILQ_HEAD(, rk_drm_ports) rk_drm_ports =
60 TAILQ_HEAD_INITIALIZER(rk_drm_ports);
61
62 static const struct device_compatible_entry compat_data[] = {
63 { .compat = "rockchip,display-subsystem" },
64 DEVICE_COMPAT_EOL
65 };
66
67 static const char * fb_compatible[] = {
68 "simple-framebuffer",
69 NULL
70 };
71
72 static int rk_drm_match(device_t, cfdata_t, void *);
73 static void rk_drm_attach(device_t, device_t, void *);
74
75 static void rk_drm_init(device_t);
76 static vmem_t *rk_drm_alloc_cma_pool(struct drm_device *, size_t);
77
78 static uint32_t rk_drm_get_vblank_counter(struct drm_device *, unsigned int);
79 static int rk_drm_enable_vblank(struct drm_device *, unsigned int);
80 static void rk_drm_disable_vblank(struct drm_device *, unsigned int);
81
82 static int rk_drm_load(struct drm_device *, unsigned long);
83 static void rk_drm_unload(struct drm_device *);
84
85 static struct drm_driver rk_drm_driver = {
86 .driver_features = DRIVER_MODESET | DRIVER_GEM,
87 .dev_priv_size = 0,
88 .load = rk_drm_load,
89 .unload = rk_drm_unload,
90
91 .gem_free_object = drm_gem_cma_free_object,
92 .mmap_object = drm_gem_or_legacy_mmap_object,
93 .gem_uvm_ops = &drm_gem_cma_uvm_ops,
94
95 .dumb_create = drm_gem_cma_dumb_create,
96 .dumb_destroy = drm_gem_dumb_destroy,
97
98 .get_vblank_counter = rk_drm_get_vblank_counter,
99 .enable_vblank = rk_drm_enable_vblank,
100 .disable_vblank = rk_drm_disable_vblank,
101
102 .name = DRIVER_NAME,
103 .desc = DRIVER_DESC,
104 .date = DRIVER_DATE,
105 .major = DRIVER_MAJOR,
106 .minor = DRIVER_MINOR,
107 .patchlevel = DRIVER_PATCHLEVEL,
108 };
109
110 CFATTACH_DECL_NEW(rk_drm, sizeof(struct rk_drm_softc),
111 rk_drm_match, rk_drm_attach, NULL, NULL);
112
113 static int
114 rk_drm_match(device_t parent, cfdata_t cf, void *aux)
115 {
116 struct fdt_attach_args * const faa = aux;
117
118 return of_compatible_match(faa->faa_phandle, compat_data);
119 }
120
121 static void
122 rk_drm_attach(device_t parent, device_t self, void *aux)
123 {
124 struct rk_drm_softc * const sc = device_private(self);
125 struct fdt_attach_args * const faa = aux;
126 struct drm_driver * const driver = &rk_drm_driver;
127 prop_dictionary_t dict = device_properties(self);
128 bool is_disabled;
129
130 sc->sc_dev = self;
131 sc->sc_dmat = faa->faa_dmat;
132 sc->sc_bst = faa->faa_bst;
133 sc->sc_phandle = faa->faa_phandle;
134
135 aprint_naive("\n");
136
137 if (prop_dictionary_get_bool(dict, "disabled", &is_disabled) && is_disabled) {
138 aprint_normal(": (disabled)\n");
139 return;
140 }
141
142 aprint_normal("\n");
143
144 sc->sc_ddev = drm_dev_alloc(driver, sc->sc_dev);
145 if (IS_ERR(sc->sc_ddev)) {
146 aprint_error_dev(self, "couldn't allocate DRM device\n");
147 return;
148 }
149 sc->sc_ddev->dev_private = sc;
150 sc->sc_ddev->bst = sc->sc_bst;
151 sc->sc_ddev->bus_dmat = sc->sc_dmat;
152 sc->sc_ddev->dmat = sc->sc_ddev->bus_dmat;
153 sc->sc_ddev->dmat_subregion_p = false;
154
155 fdt_remove_bycompat(fb_compatible);
156
157 config_defer(self, rk_drm_init);
158 }
159
160 static void
161 rk_drm_init(device_t dev)
162 {
163 struct rk_drm_softc * const sc = device_private(dev);
164 struct drm_driver * const driver = &rk_drm_driver;
165 int error;
166
167 error = -drm_dev_register(sc->sc_ddev, 0);
168 if (error) {
169 aprint_error_dev(dev, "couldn't register DRM device: %d\n",
170 error);
171 return;
172 }
173
174 aprint_normal_dev(dev, "initialized %s %d.%d.%d %s on minor %d\n",
175 driver->name, driver->major, driver->minor, driver->patchlevel,
176 driver->date, sc->sc_ddev->primary->index);
177 }
178
179 static vmem_t *
180 rk_drm_alloc_cma_pool(struct drm_device *ddev, size_t cma_size)
181 {
182 struct rk_drm_softc * const sc = rk_drm_private(ddev);
183 bus_dma_segment_t segs[1];
184 int nsegs;
185 int error;
186
187 error = bus_dmamem_alloc(sc->sc_dmat, cma_size, PAGE_SIZE, 0,
188 segs, 1, &nsegs, BUS_DMA_NOWAIT);
189 if (error) {
190 aprint_error_dev(sc->sc_dev, "couldn't allocate CMA pool\n");
191 return NULL;
192 }
193
194 return vmem_create("rkdrm", segs[0].ds_addr, segs[0].ds_len,
195 PAGE_SIZE, NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
196 }
197
198 static int
199 rk_drm_fb_create_handle(struct drm_framebuffer *fb,
200 struct drm_file *file, unsigned int *handle)
201 {
202 struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(fb);
203
204 return drm_gem_handle_create(file, &sfb->obj->base, handle);
205 }
206
207 static void
208 rk_drm_fb_destroy(struct drm_framebuffer *fb)
209 {
210 struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(fb);
211
212 drm_framebuffer_cleanup(fb);
213 drm_gem_object_put_unlocked(&sfb->obj->base);
214 kmem_free(sfb, sizeof(*sfb));
215 }
216
217 static const struct drm_framebuffer_funcs rk_drm_framebuffer_funcs = {
218 .create_handle = rk_drm_fb_create_handle,
219 .destroy = rk_drm_fb_destroy,
220 };
221
222 static struct drm_framebuffer *
223 rk_drm_fb_create(struct drm_device *ddev, struct drm_file *file,
224 const struct drm_mode_fb_cmd2 *cmd)
225 {
226 struct rk_drm_framebuffer *fb;
227 struct drm_gem_object *gem_obj;
228 int error;
229
230 if (cmd->flags)
231 return NULL;
232
233 gem_obj = drm_gem_object_lookup(file, cmd->handles[0]);
234 if (gem_obj == NULL)
235 return NULL;
236
237 fb = kmem_zalloc(sizeof(*fb), KM_SLEEP);
238 fb->obj = to_drm_gem_cma_obj(gem_obj);
239 fb->base.pitches[0] = cmd->pitches[0];
240 fb->base.pitches[1] = cmd->pitches[1];
241 fb->base.pitches[2] = cmd->pitches[2];
242 fb->base.offsets[0] = cmd->offsets[0];
243 fb->base.offsets[1] = cmd->offsets[2];
244 fb->base.offsets[2] = cmd->offsets[1];
245 fb->base.width = cmd->width;
246 fb->base.height = cmd->height;
247 fb->base.format = drm_format_info(cmd->pixel_format);
248
249 error = drm_framebuffer_init(ddev, &fb->base, &rk_drm_framebuffer_funcs);
250 if (error != 0)
251 goto dealloc;
252
253 return &fb->base;
254
255 dealloc:
256 drm_framebuffer_cleanup(&fb->base);
257 kmem_free(fb, sizeof(*fb));
258 drm_gem_object_put_unlocked(gem_obj);
259
260 return NULL;
261 }
262
263 static struct drm_mode_config_funcs rk_drm_mode_config_funcs = {
264 .fb_create = rk_drm_fb_create,
265 };
266
267 static int
268 rk_drm_fb_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes)
269 {
270 struct rk_drm_softc * const sc = rk_drm_private(helper->dev);
271 struct drm_device *ddev = helper->dev;
272 struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(helper->fb);
273 struct drm_framebuffer *fb = helper->fb;
274 struct rk_drmfb_attach_args sfa;
275 size_t cma_size;
276 int error;
277
278 const u_int width = sizes->surface_width;
279 const u_int height = sizes->surface_height;
280 const u_int pitch = width * (32 / 8);
281
282 const size_t size = roundup(height * pitch, PAGE_SIZE);
283
284 /* Reserve enough memory for the FB console plus a 4K plane, rounded to 1MB */
285 cma_size = size;
286 cma_size += (RK_DRM_MAX_WIDTH * RK_DRM_MAX_HEIGHT * 4);
287 cma_size = roundup(cma_size, 1024 * 1024);
288 sc->sc_ddev->cma_pool = rk_drm_alloc_cma_pool(sc->sc_ddev, cma_size);
289 if (sc->sc_ddev->cma_pool != NULL)
290 aprint_normal_dev(sc->sc_dev, "reserved %u MB DRAM for CMA\n",
291 (u_int)(cma_size / (1024 * 1024)));
292
293 sfb->obj = drm_gem_cma_create(ddev, size);
294 if (sfb->obj == NULL) {
295 DRM_ERROR("failed to allocate memory for framebuffer\n");
296 return -ENOMEM;
297 }
298
299 fb->pitches[0] = pitch;
300 fb->offsets[0] = 0;
301 fb->width = width;
302 fb->height = height;
303 #ifdef __ARM_BIG_ENDIAN
304 fb->format = drm_format_info(DRM_FORMAT_BGRX8888);
305 #else
306 fb->format = drm_format_info(DRM_FORMAT_XRGB8888);
307 #endif
308
309 error = drm_framebuffer_init(ddev, fb, &rk_drm_framebuffer_funcs);
310 if (error != 0) {
311 DRM_ERROR("failed to initialize framebuffer\n");
312 return error;
313 }
314
315 memset(&sfa, 0, sizeof(sfa));
316 sfa.sfa_drm_dev = ddev;
317 sfa.sfa_fb_helper = helper;
318 sfa.sfa_fb_sizes = *sizes;
319 sfa.sfa_fb_bst = sc->sc_bst;
320 sfa.sfa_fb_dmat = sc->sc_dmat;
321 sfa.sfa_fb_linebytes = helper->fb->pitches[0];
322
323 helper->fbdev = config_found(ddev->dev, &sfa, NULL,
324 CFARGS(.iattr = "rkfbbus"));
325 if (helper->fbdev == NULL) {
326 DRM_ERROR("unable to attach framebuffer\n");
327 return -ENXIO;
328 }
329
330 return 0;
331 }
332
333 static struct drm_fb_helper_funcs rk_drm_fb_helper_funcs = {
334 .fb_probe = rk_drm_fb_probe,
335 };
336
337 static int
338 rk_drm_load(struct drm_device *ddev, unsigned long flags)
339 {
340 struct rk_drm_softc * const sc = rk_drm_private(ddev);
341 struct rk_drm_ports *sport;
342 struct rk_drm_fbdev *fbdev;
343 struct fdt_endpoint *ep;
344 const u_int *data;
345 int datalen, error, num_crtc, ep_index;
346
347 drm_mode_config_init(ddev);
348 ddev->mode_config.min_width = 0;
349 ddev->mode_config.min_height = 0;
350 ddev->mode_config.max_width = RK_DRM_MAX_WIDTH;
351 ddev->mode_config.max_height = RK_DRM_MAX_HEIGHT;
352 ddev->mode_config.funcs = &rk_drm_mode_config_funcs;
353
354 num_crtc = 0;
355 data = fdtbus_get_prop(sc->sc_phandle, "ports", &datalen);
356 while (datalen >= 4) {
357 const int crtc_phandle = fdtbus_get_phandle_from_native(be32dec(data));
358
359 TAILQ_FOREACH(sport, &rk_drm_ports, entries)
360 if (sport->phandle == crtc_phandle && sport->ddev == NULL) {
361 sport->ddev = ddev;
362 for (ep_index = 0; (ep = fdt_endpoint_get_from_index(sport->port, 0, ep_index)) != NULL; ep_index++) {
363 error = fdt_endpoint_activate_direct(ep, true);
364 if (error != 0)
365 aprint_debug_dev(sc->sc_dev,
366 "failed to activate endpoint %d: %d\n",
367 ep_index, error);
368 }
369 num_crtc++;
370 }
371
372 datalen -= 4;
373 data++;
374 }
375
376 if (num_crtc == 0) {
377 aprint_error_dev(sc->sc_dev, "no display interface ports configured\n");
378 error = ENXIO;
379 goto drmerr;
380 }
381
382 fbdev = kmem_zalloc(sizeof(*fbdev), KM_SLEEP);
383
384 drm_fb_helper_prepare(ddev, &fbdev->helper, &rk_drm_fb_helper_funcs);
385
386 error = drm_fb_helper_init(ddev, &fbdev->helper, num_crtc);
387 if (error)
388 goto allocerr;
389
390 fbdev->helper.fb = kmem_zalloc(sizeof(struct rk_drm_framebuffer), KM_SLEEP);
391
392 drm_fb_helper_single_add_all_connectors(&fbdev->helper);
393
394 drm_helper_disable_unused_functions(ddev);
395
396 drm_fb_helper_initial_config(&fbdev->helper, 32);
397
398 /* XXX */
399 ddev->irq_enabled = true;
400 drm_vblank_init(ddev, num_crtc);
401
402 return 0;
403
404 allocerr:
405 kmem_free(fbdev, sizeof(*fbdev));
406 drmerr:
407 drm_mode_config_cleanup(ddev);
408
409 return error;
410 }
411
412 static uint32_t
413 rk_drm_get_vblank_counter(struct drm_device *ddev, unsigned int crtc)
414 {
415 struct rk_drm_softc * const sc = rk_drm_private(ddev);
416
417 if (crtc >= __arraycount(sc->sc_vbl))
418 return 0;
419
420 if (sc->sc_vbl[crtc].get_vblank_counter == NULL)
421 return 0;
422
423 return sc->sc_vbl[crtc].get_vblank_counter(sc->sc_vbl[crtc].priv);
424 }
425
426 static int
427 rk_drm_enable_vblank(struct drm_device *ddev, unsigned int crtc)
428 {
429 struct rk_drm_softc * const sc = rk_drm_private(ddev);
430
431 if (crtc >= __arraycount(sc->sc_vbl))
432 return 0;
433
434 if (sc->sc_vbl[crtc].enable_vblank == NULL)
435 return 0;
436
437 sc->sc_vbl[crtc].enable_vblank(sc->sc_vbl[crtc].priv);
438
439 return 0;
440 }
441
442 static void
443 rk_drm_disable_vblank(struct drm_device *ddev, unsigned int crtc)
444 {
445 struct rk_drm_softc * const sc = rk_drm_private(ddev);
446
447 if (crtc >= __arraycount(sc->sc_vbl))
448 return;
449
450 if (sc->sc_vbl[crtc].disable_vblank == NULL)
451 return;
452
453 sc->sc_vbl[crtc].disable_vblank(sc->sc_vbl[crtc].priv);
454 }
455
456 static void
457 rk_drm_unload(struct drm_device *ddev)
458 {
459 drm_mode_config_cleanup(ddev);
460 }
461
462 int
463 rk_drm_register_port(int phandle, struct fdt_device_ports *port)
464 {
465 struct rk_drm_ports *sport;
466
467 sport = kmem_zalloc(sizeof(*sport), KM_SLEEP);
468 sport->phandle = phandle;
469 sport->port = port;
470 sport->ddev = NULL;
471 TAILQ_INSERT_TAIL(&rk_drm_ports, sport, entries);
472
473 return 0;
474 }
475
476 struct drm_device *
477 rk_drm_port_device(struct fdt_device_ports *port)
478 {
479 struct rk_drm_ports *sport;
480
481 TAILQ_FOREACH(sport, &rk_drm_ports, entries)
482 if (sport->port == port)
483 return sport->ddev;
484
485 return NULL;
486 }
487