rk_drm.c revision 1.1 1 /* $NetBSD: rk_drm.c,v 1.1 2019/11/09 23:30:14 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2019 Jared D. McNeill <jmcneill (at) invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: rk_drm.c,v 1.1 2019/11/09 23:30:14 jmcneill Exp $");
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/device.h>
35 #include <sys/intr.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/conf.h>
39
40 #include <uvm/uvm_extern.h>
41 #include <uvm/uvm_object.h>
42 #include <uvm/uvm_device.h>
43
44 #include <drm/drmP.h>
45 #include <drm/drm_crtc_helper.h>
46 #include <drm/drm_fb_helper.h>
47
48 #include <dev/fdt/fdtvar.h>
49 #include <dev/fdt/fdt_port.h>
50
51 #include <arm/rockchip/rk_drm.h>
52
53 #define RK_DRM_MAX_WIDTH 3840
54 #define RK_DRM_MAX_HEIGHT 2160
55
56 static TAILQ_HEAD(, rk_drm_ports) rk_drm_ports =
57 TAILQ_HEAD_INITIALIZER(rk_drm_ports);
58
59 static const char * const compatible[] = {
60 "rockchip,display-subsystem",
61 NULL
62 };
63
64 static const char * fb_compatible[] = {
65 "simple-framebuffer",
66 NULL
67 };
68
69 static int rk_drm_match(device_t, cfdata_t, void *);
70 static void rk_drm_attach(device_t, device_t, void *);
71
72 static void rk_drm_init(device_t);
73 static vmem_t *rk_drm_alloc_cma_pool(struct drm_device *, size_t);
74
75 static int rk_drm_set_busid(struct drm_device *, struct drm_master *);
76
77 static uint32_t rk_drm_get_vblank_counter(struct drm_device *, unsigned int);
78 static int rk_drm_enable_vblank(struct drm_device *, unsigned int);
79 static void rk_drm_disable_vblank(struct drm_device *, unsigned int);
80
81 static int rk_drm_load(struct drm_device *, unsigned long);
82 static int rk_drm_unload(struct drm_device *);
83
84 static struct drm_driver rk_drm_driver = {
85 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
86 .dev_priv_size = 0,
87 .load = rk_drm_load,
88 .unload = rk_drm_unload,
89
90 .gem_free_object = drm_gem_cma_free_object,
91 .mmap_object = drm_gem_or_legacy_mmap_object,
92 .gem_uvm_ops = &drm_gem_cma_uvm_ops,
93
94 .dumb_create = drm_gem_cma_dumb_create,
95 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
96 .dumb_destroy = drm_gem_dumb_destroy,
97
98 .get_vblank_counter = rk_drm_get_vblank_counter,
99 .enable_vblank = rk_drm_enable_vblank,
100 .disable_vblank = rk_drm_disable_vblank,
101
102 .name = DRIVER_NAME,
103 .desc = DRIVER_DESC,
104 .date = DRIVER_DATE,
105 .major = DRIVER_MAJOR,
106 .minor = DRIVER_MINOR,
107 .patchlevel = DRIVER_PATCHLEVEL,
108
109 .set_busid = rk_drm_set_busid,
110 };
111
112 CFATTACH_DECL_NEW(rk_drm, sizeof(struct rk_drm_softc),
113 rk_drm_match, rk_drm_attach, NULL, NULL);
114
115 static int
116 rk_drm_match(device_t parent, cfdata_t cf, void *aux)
117 {
118 struct fdt_attach_args * const faa = aux;
119
120 return of_match_compatible(faa->faa_phandle, compatible);
121 }
122
123 static void
124 rk_drm_attach(device_t parent, device_t self, void *aux)
125 {
126 struct rk_drm_softc * const sc = device_private(self);
127 struct fdt_attach_args * const faa = aux;
128 struct drm_driver * const driver = &rk_drm_driver;
129 prop_dictionary_t dict = device_properties(self);
130 bool is_disabled;
131
132 sc->sc_dev = self;
133 sc->sc_dmat = faa->faa_dmat;
134 sc->sc_bst = faa->faa_bst;
135 sc->sc_phandle = faa->faa_phandle;
136
137 drm_debug = 0xff;
138
139 aprint_naive("\n");
140
141 if (prop_dictionary_get_bool(dict, "disabled", &is_disabled) && is_disabled) {
142 aprint_normal(": (disabled)\n");
143 return;
144 }
145
146 aprint_normal("\n");
147
148 sc->sc_ddev = drm_dev_alloc(driver, sc->sc_dev);
149 if (sc->sc_ddev == NULL) {
150 aprint_error_dev(self, "couldn't allocate DRM device\n");
151 return;
152 }
153 sc->sc_ddev->dev_private = sc;
154 sc->sc_ddev->bst = sc->sc_bst;
155 sc->sc_ddev->bus_dmat = sc->sc_dmat;
156 sc->sc_ddev->dmat = sc->sc_ddev->bus_dmat;
157 sc->sc_ddev->dmat_subregion_p = false;
158
159 fdt_remove_bycompat(fb_compatible);
160
161 config_defer(self, rk_drm_init);
162 }
163
164 static void
165 rk_drm_init(device_t dev)
166 {
167 struct rk_drm_softc * const sc = device_private(dev);
168 struct drm_driver * const driver = &rk_drm_driver;
169 int error;
170
171 error = -drm_dev_register(sc->sc_ddev, 0);
172 if (error) {
173 drm_dev_unref(sc->sc_ddev);
174 aprint_error_dev(dev, "couldn't register DRM device: %d\n",
175 error);
176 return;
177 }
178
179 aprint_normal_dev(dev, "initialized %s %d.%d.%d %s on minor %d\n",
180 driver->name, driver->major, driver->minor, driver->patchlevel,
181 driver->date, sc->sc_ddev->primary->index);
182 }
183
184 static vmem_t *
185 rk_drm_alloc_cma_pool(struct drm_device *ddev, size_t cma_size)
186 {
187 struct rk_drm_softc * const sc = rk_drm_private(ddev);
188 bus_dma_segment_t segs[1];
189 int nsegs;
190 int error;
191
192 error = bus_dmamem_alloc(sc->sc_dmat, cma_size, PAGE_SIZE, 0,
193 segs, 1, &nsegs, BUS_DMA_NOWAIT);
194 if (error) {
195 aprint_error_dev(sc->sc_dev, "couldn't allocate CMA pool\n");
196 return NULL;
197 }
198
199 return vmem_create("rkdrm", segs[0].ds_addr, segs[0].ds_len,
200 PAGE_SIZE, NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
201 }
202
203 static int
204 rk_drm_set_busid(struct drm_device *ddev, struct drm_master *master)
205 {
206 struct rk_drm_softc * const sc = rk_drm_private(ddev);
207 char id[32];
208
209 snprintf(id, sizeof(id), "platform:rk:%u", device_unit(sc->sc_dev));
210
211 master->unique = kzalloc(strlen(id) + 1, GFP_KERNEL);
212 if (master->unique == NULL)
213 return -ENOMEM;
214 strcpy(master->unique, id);
215 master->unique_len = strlen(master->unique);
216
217 return 0;
218 }
219
220 static int
221 rk_drm_fb_create_handle(struct drm_framebuffer *fb,
222 struct drm_file *file, unsigned int *handle)
223 {
224 struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(fb);
225
226 return drm_gem_handle_create(file, &sfb->obj->base, handle);
227 }
228
229 static void
230 rk_drm_fb_destroy(struct drm_framebuffer *fb)
231 {
232 struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(fb);
233
234 drm_framebuffer_cleanup(fb);
235 drm_gem_object_unreference_unlocked(&sfb->obj->base);
236 kmem_free(sfb, sizeof(*sfb));
237 }
238
239 static const struct drm_framebuffer_funcs rk_drm_framebuffer_funcs = {
240 .create_handle = rk_drm_fb_create_handle,
241 .destroy = rk_drm_fb_destroy,
242 };
243
244 static struct drm_framebuffer *
245 rk_drm_fb_create(struct drm_device *ddev, struct drm_file *file,
246 struct drm_mode_fb_cmd2 *cmd)
247 {
248 struct rk_drm_framebuffer *fb;
249 struct drm_gem_object *gem_obj;
250 int error;
251
252 if (cmd->flags)
253 return NULL;
254
255 gem_obj = drm_gem_object_lookup(ddev, file, cmd->handles[0]);
256 if (gem_obj == NULL)
257 return NULL;
258
259 fb = kmem_zalloc(sizeof(*fb), KM_SLEEP);
260 fb->obj = to_drm_gem_cma_obj(gem_obj);
261 fb->base.pitches[0] = cmd->pitches[0];
262 fb->base.pitches[1] = cmd->pitches[1];
263 fb->base.pitches[2] = cmd->pitches[2];
264 fb->base.offsets[0] = cmd->offsets[0];
265 fb->base.offsets[1] = cmd->offsets[2];
266 fb->base.offsets[2] = cmd->offsets[1];
267 fb->base.width = cmd->width;
268 fb->base.height = cmd->height;
269 fb->base.pixel_format = cmd->pixel_format;
270 fb->base.bits_per_pixel = drm_format_plane_cpp(fb->base.pixel_format, 0) * 8;
271
272 switch (fb->base.pixel_format) {
273 case DRM_FORMAT_XRGB8888:
274 case DRM_FORMAT_ARGB8888:
275 fb->base.depth = 32;
276 break;
277 default:
278 break;
279 }
280
281 error = drm_framebuffer_init(ddev, &fb->base, &rk_drm_framebuffer_funcs);
282 if (error != 0)
283 goto dealloc;
284
285 return &fb->base;
286
287 dealloc:
288 drm_framebuffer_cleanup(&fb->base);
289 kmem_free(fb, sizeof(*fb));
290 drm_gem_object_unreference_unlocked(gem_obj);
291
292 return NULL;
293 }
294
295 static struct drm_mode_config_funcs rk_drm_mode_config_funcs = {
296 .fb_create = rk_drm_fb_create,
297 };
298
299 static int
300 rk_drm_fb_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes)
301 {
302 struct rk_drm_softc * const sc = rk_drm_private(helper->dev);
303 struct drm_device *ddev = helper->dev;
304 struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(helper->fb);
305 struct drm_framebuffer *fb = helper->fb;
306 struct rk_drmfb_attach_args sfa;
307 size_t cma_size;
308 int error;
309
310 const u_int width = sizes->surface_width;
311 const u_int height = sizes->surface_height;
312 const u_int pitch = width * (32 / 8);
313
314 const size_t size = roundup(height * pitch, PAGE_SIZE);
315
316 /* Reserve enough memory for the FB console plus a 4K plane, rounded to 1MB */
317 cma_size = size;
318 cma_size += (RK_DRM_MAX_WIDTH * RK_DRM_MAX_HEIGHT * 4);
319 cma_size = roundup(cma_size, 1024 * 1024);
320 sc->sc_ddev->cma_pool = rk_drm_alloc_cma_pool(sc->sc_ddev, cma_size);
321 if (sc->sc_ddev->cma_pool != NULL)
322 aprint_normal_dev(sc->sc_dev, "reserved %u MB DRAM for CMA\n",
323 (u_int)(cma_size / (1024 * 1024)));
324
325 sfb->obj = drm_gem_cma_create(ddev, size);
326 if (sfb->obj == NULL) {
327 DRM_ERROR("failed to allocate memory for framebuffer\n");
328 return -ENOMEM;
329 }
330
331 fb->pitches[0] = pitch;
332 fb->offsets[0] = 0;
333 fb->width = width;
334 fb->height = height;
335 fb->pixel_format = DRM_FORMAT_XRGB8888;
336 drm_fb_get_bpp_depth(fb->pixel_format, &fb->depth, &fb->bits_per_pixel);
337
338 error = drm_framebuffer_init(ddev, fb, &rk_drm_framebuffer_funcs);
339 if (error != 0) {
340 DRM_ERROR("failed to initialize framebuffer\n");
341 return error;
342 }
343
344 memset(&sfa, 0, sizeof(sfa));
345 sfa.sfa_drm_dev = ddev;
346 sfa.sfa_fb_helper = helper;
347 sfa.sfa_fb_sizes = *sizes;
348 sfa.sfa_fb_bst = sc->sc_bst;
349 sfa.sfa_fb_dmat = sc->sc_dmat;
350 sfa.sfa_fb_linebytes = helper->fb->pitches[0];
351
352 helper->fbdev = config_found_ia(ddev->dev, "rkfbbus", &sfa, NULL);
353 if (helper->fbdev == NULL) {
354 DRM_ERROR("unable to attach framebuffer\n");
355 return -ENXIO;
356 }
357
358 return 0;
359 }
360
361 static struct drm_fb_helper_funcs rk_drm_fb_helper_funcs = {
362 .fb_probe = rk_drm_fb_probe,
363 };
364
365 static int
366 rk_drm_load(struct drm_device *ddev, unsigned long flags)
367 {
368 struct rk_drm_softc * const sc = rk_drm_private(ddev);
369 struct rk_drm_ports *sport;
370 struct rk_drm_fbdev *fbdev;
371 struct fdt_endpoint *ep;
372 const u_int *data;
373 int datalen, error, num_crtc, ep_index;
374
375 drm_mode_config_init(ddev);
376 ddev->mode_config.min_width = 0;
377 ddev->mode_config.min_height = 0;
378 ddev->mode_config.max_width = RK_DRM_MAX_WIDTH;
379 ddev->mode_config.max_height = RK_DRM_MAX_HEIGHT;
380 ddev->mode_config.funcs = &rk_drm_mode_config_funcs;
381
382 num_crtc = 0;
383 data = fdtbus_get_prop(sc->sc_phandle, "ports", &datalen);
384 while (datalen >= 4) {
385 const int crtc_phandle = fdtbus_get_phandle_from_native(be32dec(data));
386
387 TAILQ_FOREACH(sport, &rk_drm_ports, entries)
388 if (sport->phandle == crtc_phandle && sport->ddev == NULL) {
389 sport->ddev = ddev;
390 for (ep_index = 0; (ep = fdt_endpoint_get_from_index(sport->port, 0, ep_index)) != NULL; ep_index++) {
391 error = fdt_endpoint_activate_direct(ep, true);
392 if (error != 0)
393 aprint_debug_dev(sc->sc_dev,
394 "failed to activate endpoint %d: %d\n",
395 ep_index, error);
396 }
397 num_crtc++;
398 }
399
400 datalen -= 4;
401 data++;
402 }
403
404 if (num_crtc == 0) {
405 aprint_error_dev(sc->sc_dev, "no display interface ports configured\n");
406 return ENXIO;
407 }
408
409 fbdev = kmem_zalloc(sizeof(*fbdev), KM_SLEEP);
410
411 drm_fb_helper_prepare(ddev, &fbdev->helper, &rk_drm_fb_helper_funcs);
412
413 error = drm_fb_helper_init(ddev, &fbdev->helper, num_crtc, num_crtc);
414 if (error)
415 goto drmerr;
416
417 fbdev->helper.fb = kmem_zalloc(sizeof(struct rk_drm_framebuffer), KM_SLEEP);
418
419 drm_fb_helper_single_add_all_connectors(&fbdev->helper);
420
421 drm_helper_disable_unused_functions(ddev);
422
423 drm_fb_helper_initial_config(&fbdev->helper, 32);
424
425 /* XXX */
426 ddev->irq_enabled = true;
427 drm_vblank_init(ddev, num_crtc);
428
429 return 0;
430
431 drmerr:
432 drm_mode_config_cleanup(ddev);
433 kmem_free(fbdev, sizeof(*fbdev));
434
435 return error;
436 }
437
438 static uint32_t
439 rk_drm_get_vblank_counter(struct drm_device *ddev, unsigned int crtc)
440 {
441 struct rk_drm_softc * const sc = rk_drm_private(ddev);
442
443 if (crtc >= __arraycount(sc->sc_vbl))
444 return 0;
445
446 if (sc->sc_vbl[crtc].get_vblank_counter == NULL)
447 return 0;
448
449 return sc->sc_vbl[crtc].get_vblank_counter(sc->sc_vbl[crtc].priv);
450 }
451
452 static int
453 rk_drm_enable_vblank(struct drm_device *ddev, unsigned int crtc)
454 {
455 struct rk_drm_softc * const sc = rk_drm_private(ddev);
456
457 if (crtc >= __arraycount(sc->sc_vbl))
458 return 0;
459
460 if (sc->sc_vbl[crtc].enable_vblank == NULL)
461 return 0;
462
463 sc->sc_vbl[crtc].enable_vblank(sc->sc_vbl[crtc].priv);
464
465 return 0;
466 }
467
468 static void
469 rk_drm_disable_vblank(struct drm_device *ddev, unsigned int crtc)
470 {
471 struct rk_drm_softc * const sc = rk_drm_private(ddev);
472
473 if (crtc >= __arraycount(sc->sc_vbl))
474 return;
475
476 if (sc->sc_vbl[crtc].disable_vblank == NULL)
477 return;
478
479 sc->sc_vbl[crtc].disable_vblank(sc->sc_vbl[crtc].priv);
480 }
481
482 static int
483 rk_drm_unload(struct drm_device *ddev)
484 {
485 drm_mode_config_cleanup(ddev);
486
487 return 0;
488 }
489
490 int
491 rk_drm_register_port(int phandle, struct fdt_device_ports *port)
492 {
493 struct rk_drm_ports *sport;
494
495 sport = kmem_zalloc(sizeof(*sport), KM_SLEEP);
496 sport->phandle = phandle;
497 sport->port = port;
498 sport->ddev = NULL;
499 TAILQ_INSERT_TAIL(&rk_drm_ports, sport, entries);
500
501 return 0;
502 }
503
504 struct drm_device *
505 rk_drm_port_device(struct fdt_device_ports *port)
506 {
507 struct rk_drm_ports *sport;
508
509 TAILQ_FOREACH(sport, &rk_drm_ports, entries)
510 if (sport->port == port)
511 return sport->ddev;
512
513 return NULL;
514 }
515