Home | History | Annotate | Line # | Download | only in rockchip
rk_drm.c revision 1.18
      1 /* $NetBSD: rk_drm.c,v 1.18 2021/12/20 00:27:17 riastradh Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2019 Jared D. McNeill <jmcneill (at) invisible.ca>
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: rk_drm.c,v 1.18 2021/12/20 00:27:17 riastradh Exp $");
     31 
     32 #include <sys/param.h>
     33 #include <sys/bus.h>
     34 #include <sys/conf.h>
     35 #include <sys/device.h>
     36 #include <sys/intr.h>
     37 #include <sys/kernel.h>
     38 #include <sys/systm.h>
     39 
     40 #include <uvm/uvm_device.h>
     41 #include <uvm/uvm_extern.h>
     42 #include <uvm/uvm_object.h>
     43 
     44 #include <dev/fdt/fdt_port.h>
     45 #include <dev/fdt/fdtvar.h>
     46 
     47 #include <arm/rockchip/rk_drm.h>
     48 
     49 #include <drm/drm_atomic_helper.h>
     50 #include <drm/drm_auth.h>
     51 #include <drm/drm_crtc_helper.h>
     52 #include <drm/drm_damage_helper.h>
     53 #include <drm/drm_drv.h>
     54 #include <drm/drm_fb_helper.h>
     55 #include <drm/drm_fourcc.h>
     56 #include <drm/drm_vblank.h>
     57 
     58 #define	RK_DRM_MAX_WIDTH	3840
     59 #define	RK_DRM_MAX_HEIGHT	2160
     60 
     61 static TAILQ_HEAD(, rk_drm_ports) rk_drm_ports =
     62     TAILQ_HEAD_INITIALIZER(rk_drm_ports);
     63 
     64 static const struct device_compatible_entry compat_data[] = {
     65 	{ .compat = "rockchip,display-subsystem" },
     66 	DEVICE_COMPAT_EOL
     67 };
     68 
     69 static const char * fb_compatible[] = {
     70 	"simple-framebuffer",
     71 	NULL
     72 };
     73 
     74 static int	rk_drm_match(device_t, cfdata_t, void *);
     75 static void	rk_drm_attach(device_t, device_t, void *);
     76 
     77 static void	rk_drm_init(device_t);
     78 static vmem_t	*rk_drm_alloc_cma_pool(struct drm_device *, size_t);
     79 
     80 static int	rk_drm_load(struct drm_device *, unsigned long);
     81 static void	rk_drm_unload(struct drm_device *);
     82 
     83 static void	rk_drm_task_work(struct work *, void *);
     84 
     85 static struct drm_driver rk_drm_driver = {
     86 	.driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
     87 	.dev_priv_size = 0,
     88 	.load = rk_drm_load,
     89 	.unload = rk_drm_unload,
     90 
     91 	.gem_free_object = drm_gem_cma_free_object,
     92 	.mmap_object = drm_gem_or_legacy_mmap_object,
     93 	.gem_uvm_ops = &drm_gem_cma_uvm_ops,
     94 
     95 	.dumb_create = drm_gem_cma_dumb_create,
     96 	.dumb_destroy = drm_gem_dumb_destroy,
     97 
     98 	.name = DRIVER_NAME,
     99 	.desc = DRIVER_DESC,
    100 	.date = DRIVER_DATE,
    101 	.major = DRIVER_MAJOR,
    102 	.minor = DRIVER_MINOR,
    103 	.patchlevel = DRIVER_PATCHLEVEL,
    104 };
    105 
    106 CFATTACH_DECL_NEW(rk_drm, sizeof(struct rk_drm_softc),
    107 	rk_drm_match, rk_drm_attach, NULL, NULL);
    108 
    109 static int
    110 rk_drm_match(device_t parent, cfdata_t cf, void *aux)
    111 {
    112 	struct fdt_attach_args * const faa = aux;
    113 
    114 	return of_compatible_match(faa->faa_phandle, compat_data);
    115 }
    116 
    117 static void
    118 rk_drm_attach(device_t parent, device_t self, void *aux)
    119 {
    120 	struct rk_drm_softc * const sc = device_private(self);
    121 	struct fdt_attach_args * const faa = aux;
    122 	struct drm_driver * const driver = &rk_drm_driver;
    123 	prop_dictionary_t dict = device_properties(self);
    124 	bool is_disabled;
    125 
    126 	aprint_naive("\n");
    127 
    128 	if (prop_dictionary_get_bool(dict, "disabled", &is_disabled) &&
    129 	    is_disabled) {
    130 		aprint_normal(": (disabled)\n");
    131 		return;
    132 	}
    133 
    134 	aprint_normal("\n");
    135 
    136 	sc->sc_dev = self;
    137 	sc->sc_dmat = faa->faa_dmat;
    138 	sc->sc_bst = faa->faa_bst;
    139 	sc->sc_phandle = faa->faa_phandle;
    140 	sc->sc_task_thread = NULL;
    141 	SIMPLEQ_INIT(&sc->sc_tasks);
    142 	if (workqueue_create(&sc->sc_task_wq, "rkdrm",
    143 	    &rk_drm_task_work, NULL, PRI_NONE, IPL_NONE, WQ_MPSAFE)) {
    144 		aprint_error_dev(self, "unable to create workqueue\n");
    145 		sc->sc_task_wq = NULL;
    146 		return;
    147 	}
    148 
    149 	sc->sc_ddev = drm_dev_alloc(driver, sc->sc_dev);
    150 	if (IS_ERR(sc->sc_ddev)) {
    151 		aprint_error_dev(self, "couldn't allocate DRM device\n");
    152 		return;
    153 	}
    154 	sc->sc_ddev->dev_private = sc;
    155 	sc->sc_ddev->bst = sc->sc_bst;
    156 	sc->sc_ddev->bus_dmat = sc->sc_dmat;
    157 	sc->sc_ddev->dmat = sc->sc_ddev->bus_dmat;
    158 	sc->sc_ddev->dmat_subregion_p = false;
    159 
    160 	fdt_remove_bycompat(fb_compatible);
    161 
    162 	/*
    163 	 * Wait until rk_vop is attached as a sibling to this device --
    164 	 * we need that to actually display our framebuffer.
    165 	 */
    166 	config_defer(self, rk_drm_init);
    167 }
    168 
    169 static void
    170 rk_drm_init(device_t dev)
    171 {
    172 	struct rk_drm_softc * const sc = device_private(dev);
    173 	struct drm_driver * const driver = &rk_drm_driver;
    174 	int error;
    175 
    176 	/*
    177 	 * Cause any tasks issued synchronously during attach to be
    178 	 * processed at the end of this function.
    179 	 */
    180 	sc->sc_task_thread = curlwp;
    181 
    182 	error = -drm_dev_register(sc->sc_ddev, 0);
    183 	if (error) {
    184 		aprint_error_dev(dev, "couldn't register DRM device: %d\n",
    185 		    error);
    186 		goto out;
    187 	}
    188 	sc->sc_dev_registered = true;
    189 
    190 	aprint_normal_dev(dev, "initialized %s %d.%d.%d %s on minor %d\n",
    191 	    driver->name, driver->major, driver->minor, driver->patchlevel,
    192 	    driver->date, sc->sc_ddev->primary->index);
    193 
    194 	/*
    195 	 * Process asynchronous tasks queued synchronously during
    196 	 * attach.  This will be for display detection to attach a
    197 	 * framebuffer, so we have the opportunity for a console device
    198 	 * to attach before autoconf has completed, in time for init(8)
    199 	 * to find that console without panicking.
    200 	 */
    201 	while (!SIMPLEQ_EMPTY(&sc->sc_tasks)) {
    202 		struct rk_drm_task *const task = SIMPLEQ_FIRST(&sc->sc_tasks);
    203 
    204 		SIMPLEQ_REMOVE_HEAD(&sc->sc_tasks, rdt_u.queue);
    205 		(*task->rdt_fn)(task);
    206 	}
    207 
    208 out:	/* Cause any subesquent tasks to be processed by the workqueue.  */
    209 	atomic_store_relaxed(&sc->sc_task_thread, NULL);
    210 }
    211 
    212 static vmem_t *
    213 rk_drm_alloc_cma_pool(struct drm_device *ddev, size_t cma_size)
    214 {
    215 	struct rk_drm_softc * const sc = rk_drm_private(ddev);
    216 	bus_dma_segment_t segs[1];
    217 	int nsegs;
    218 	int error;
    219 
    220 	error = bus_dmamem_alloc(sc->sc_dmat, cma_size, PAGE_SIZE, 0,
    221 	    segs, 1, &nsegs, BUS_DMA_NOWAIT);
    222 	if (error) {
    223 		aprint_error_dev(sc->sc_dev, "couldn't allocate CMA pool\n");
    224 		return NULL;
    225 	}
    226 
    227 	return vmem_create("rkdrm", segs[0].ds_addr, segs[0].ds_len,
    228 	    PAGE_SIZE, NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
    229 }
    230 
    231 static int
    232 rk_drm_fb_create_handle(struct drm_framebuffer *fb,
    233     struct drm_file *file, unsigned int *handle)
    234 {
    235 	struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(fb);
    236 
    237 	return drm_gem_handle_create(file, &sfb->obj->base, handle);
    238 }
    239 
    240 static void
    241 rk_drm_fb_destroy(struct drm_framebuffer *fb)
    242 {
    243 	struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(fb);
    244 
    245 	drm_framebuffer_cleanup(fb);
    246 	drm_gem_object_put_unlocked(&sfb->obj->base);
    247 	kmem_free(sfb, sizeof(*sfb));
    248 }
    249 
    250 static const struct drm_framebuffer_funcs rk_drm_framebuffer_funcs = {
    251 	.create_handle = rk_drm_fb_create_handle,
    252 	.destroy = rk_drm_fb_destroy,
    253 	.dirty = drm_atomic_helper_dirtyfb,
    254 };
    255 
    256 static struct drm_framebuffer *
    257 rk_drm_fb_create(struct drm_device *ddev, struct drm_file *file,
    258     const struct drm_mode_fb_cmd2 *cmd)
    259 {
    260 	struct rk_drm_framebuffer *fb;
    261 	struct drm_gem_object *gem_obj;
    262 	int error;
    263 
    264 	if (cmd->flags)
    265 		return NULL;
    266 
    267 	gem_obj = drm_gem_object_lookup(file, cmd->handles[0]);
    268 	if (gem_obj == NULL)
    269 		return NULL;
    270 
    271 	fb = kmem_zalloc(sizeof(*fb), KM_SLEEP);
    272 	drm_helper_mode_fill_fb_struct(ddev, &fb->base, cmd);
    273 	fb->obj = to_drm_gem_cma_obj(gem_obj);
    274 
    275 	error = drm_framebuffer_init(ddev, &fb->base, &rk_drm_framebuffer_funcs);
    276 	if (error != 0)
    277 		goto dealloc;
    278 
    279 	return &fb->base;
    280 
    281 dealloc:
    282 	drm_framebuffer_cleanup(&fb->base);
    283 	kmem_free(fb, sizeof(*fb));
    284 	drm_gem_object_put_unlocked(gem_obj);
    285 
    286 	return NULL;
    287 }
    288 
    289 static struct drm_mode_config_funcs rk_drm_mode_config_funcs = {
    290 	.fb_create = rk_drm_fb_create,
    291 	.atomic_check = drm_atomic_helper_check,
    292 	.atomic_commit = drm_atomic_helper_commit,
    293 };
    294 
    295 static struct drm_mode_config_helper_funcs rk_drm_mode_config_helper_funcs = {
    296 	.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
    297 };
    298 
    299 static int
    300 rk_drm_fb_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes)
    301 {
    302 	struct rk_drm_softc * const sc = rk_drm_private(helper->dev);
    303 	struct drm_device *ddev = helper->dev;
    304 	struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(helper->fb);
    305 	struct drm_framebuffer *fb = helper->fb;
    306 	struct rk_drmfb_attach_args sfa;
    307 	size_t cma_size;
    308 	int error;
    309 
    310 	const u_int width = sizes->surface_width;
    311 	const u_int height = sizes->surface_height;
    312 	const u_int pitch = width * (32 / 8);
    313 
    314 	const size_t size = roundup(height * pitch, PAGE_SIZE);
    315 
    316 	/* Reserve enough memory for the FB console plus a 4K plane, rounded to 1MB */
    317 	cma_size = size;
    318 	cma_size += (RK_DRM_MAX_WIDTH * RK_DRM_MAX_HEIGHT * 4);
    319 	cma_size = roundup(cma_size, 1024 * 1024);
    320 	sc->sc_ddev->cma_pool = rk_drm_alloc_cma_pool(sc->sc_ddev, cma_size);
    321 	if (sc->sc_ddev->cma_pool != NULL)
    322 		aprint_normal_dev(sc->sc_dev, "reserved %u MB DRAM for CMA\n",
    323 		    (u_int)(cma_size / (1024 * 1024)));
    324 
    325 	sfb->obj = drm_gem_cma_create(ddev, size);
    326 	if (sfb->obj == NULL) {
    327 		DRM_ERROR("failed to allocate memory for framebuffer\n");
    328 		return -ENOMEM;
    329 	}
    330 
    331 	/* similar to drm_helper_mode_fill_fb_struct(), but we have no cmd */
    332 	fb->pitches[0] = pitch;
    333 	fb->offsets[0] = 0;
    334 	fb->width = width;
    335 	fb->height = height;
    336 	fb->modifier = 0;
    337 	fb->flags = 0;
    338 #ifdef __ARM_BIG_ENDIAN
    339 	fb->format = drm_format_info(DRM_FORMAT_BGRX8888);
    340 #else
    341 	fb->format = drm_format_info(DRM_FORMAT_XRGB8888);
    342 #endif
    343 	fb->dev = ddev;
    344 
    345 	error = drm_framebuffer_init(ddev, fb, &rk_drm_framebuffer_funcs);
    346 	if (error != 0) {
    347 		DRM_ERROR("failed to initialize framebuffer\n");
    348 		return error;
    349 	}
    350 
    351 	memset(&sfa, 0, sizeof(sfa));
    352 	sfa.sfa_drm_dev = ddev;
    353 	sfa.sfa_fb_helper = helper;
    354 	sfa.sfa_fb_sizes = *sizes;
    355 	sfa.sfa_fb_bst = sc->sc_bst;
    356 	sfa.sfa_fb_dmat = sc->sc_dmat;
    357 	sfa.sfa_fb_linebytes = helper->fb->pitches[0];
    358 
    359 	helper->fbdev = config_found(ddev->dev, &sfa, NULL,
    360 	    CFARGS(.iattr = "rkfbbus"));
    361 	if (helper->fbdev == NULL) {
    362 		DRM_ERROR("unable to attach framebuffer\n");
    363 		return -ENXIO;
    364 	}
    365 
    366 	return 0;
    367 }
    368 
    369 static struct drm_fb_helper_funcs rk_drm_fb_helper_funcs = {
    370 	.fb_probe = rk_drm_fb_probe,
    371 };
    372 
    373 static int
    374 rk_drm_load(struct drm_device *ddev, unsigned long flags)
    375 {
    376 	struct rk_drm_softc * const sc = rk_drm_private(ddev);
    377 	struct rk_drm_ports *sport;
    378 	struct rk_drm_fbdev *fbdev;
    379 	struct fdt_endpoint *ep;
    380 	const u_int *data;
    381 	int datalen, error, num_crtc, ep_index;
    382 
    383 	drm_mode_config_init(ddev);
    384 	ddev->mode_config.min_width = 0;
    385 	ddev->mode_config.min_height = 0;
    386 	ddev->mode_config.max_width = RK_DRM_MAX_WIDTH;
    387 	ddev->mode_config.max_height = RK_DRM_MAX_HEIGHT;
    388 	ddev->mode_config.funcs = &rk_drm_mode_config_funcs;
    389 	ddev->mode_config.helper_private = &rk_drm_mode_config_helper_funcs;
    390 
    391 	num_crtc = 0;
    392 	data = fdtbus_get_prop(sc->sc_phandle, "ports", &datalen);
    393 	while (datalen >= 4) {
    394 		const int crtc_phandle = fdtbus_get_phandle_from_native(be32dec(data));
    395 
    396 		TAILQ_FOREACH(sport, &rk_drm_ports, entries)
    397 			if (sport->phandle == crtc_phandle && sport->ddev == NULL) {
    398 				sport->ddev = ddev;
    399 				for (ep_index = 0; (ep = fdt_endpoint_get_from_index(sport->port, 0, ep_index)) != NULL; ep_index++) {
    400 					error = fdt_endpoint_activate_direct(ep, true);
    401 					if (error != 0)
    402 						aprint_debug_dev(sc->sc_dev,
    403 						    "failed to activate endpoint %d: %d\n",
    404 						    ep_index, error);
    405 				}
    406 				num_crtc++;
    407 			}
    408 
    409 		datalen -= 4;
    410 		data++;
    411 	}
    412 
    413 	if (num_crtc == 0) {
    414 		aprint_error_dev(sc->sc_dev, "no display interface ports configured\n");
    415 		error = ENXIO;
    416 		goto drmerr;
    417 	}
    418 
    419 	drm_mode_config_reset(ddev);
    420 
    421 	fbdev = kmem_zalloc(sizeof(*fbdev), KM_SLEEP);
    422 
    423 	drm_fb_helper_prepare(ddev, &fbdev->helper, &rk_drm_fb_helper_funcs);
    424 
    425 	error = drm_fb_helper_init(ddev, &fbdev->helper, num_crtc);
    426 	if (error)
    427 		goto allocerr;
    428 
    429 	fbdev->helper.fb = kmem_zalloc(sizeof(struct rk_drm_framebuffer), KM_SLEEP);
    430 
    431 	drm_fb_helper_single_add_all_connectors(&fbdev->helper);
    432 
    433 	drm_fb_helper_initial_config(&fbdev->helper, 32);
    434 
    435 	/* XXX Delegate this to rk_vop.c?  */
    436 	ddev->irq_enabled = true;
    437 	drm_vblank_init(ddev, num_crtc);
    438 
    439 	return 0;
    440 
    441 allocerr:
    442 	kmem_free(fbdev, sizeof(*fbdev));
    443 drmerr:
    444 	drm_mode_config_cleanup(ddev);
    445 
    446 	return error;
    447 }
    448 
    449 static void
    450 rk_drm_unload(struct drm_device *ddev)
    451 {
    452 	drm_mode_config_cleanup(ddev);
    453 }
    454 
    455 int
    456 rk_drm_register_port(int phandle, struct fdt_device_ports *port)
    457 {
    458 	struct rk_drm_ports *sport;
    459 
    460 	sport = kmem_zalloc(sizeof(*sport), KM_SLEEP);
    461 	sport->phandle = phandle;
    462 	sport->port = port;
    463 	sport->ddev = NULL;
    464 	TAILQ_INSERT_TAIL(&rk_drm_ports, sport, entries);
    465 
    466 	return 0;
    467 }
    468 
    469 struct drm_device *
    470 rk_drm_port_device(struct fdt_device_ports *port)
    471 {
    472 	struct rk_drm_ports *sport;
    473 
    474 	TAILQ_FOREACH(sport, &rk_drm_ports, entries)
    475 		if (sport->port == port)
    476 			return sport->ddev;
    477 
    478 	return NULL;
    479 }
    480 
    481 static void
    482 rk_drm_task_work(struct work *work, void *cookie)
    483 {
    484 	struct rk_drm_task *task = container_of(work, struct rk_drm_task,
    485 	    rdt_u.work);
    486 
    487 	(*task->rdt_fn)(task);
    488 }
    489 
    490 void
    491 rk_task_init(struct rk_drm_task *task,
    492     void (*fn)(struct rk_drm_task *))
    493 {
    494 
    495 	task->rdt_fn = fn;
    496 }
    497 
    498 void
    499 rk_task_schedule(device_t self, struct rk_drm_task *task)
    500 {
    501 	struct rk_drm_softc *sc = device_private(self);
    502 
    503 	if (atomic_load_relaxed(&sc->sc_task_thread) == curlwp)
    504 		SIMPLEQ_INSERT_TAIL(&sc->sc_tasks, task, rdt_u.queue);
    505 	else
    506 		workqueue_enqueue(sc->sc_task_wq, &task->rdt_u.work, NULL);
    507 }
    508