Home | History | Annotate | Line # | Download | only in sunxi
sunxi_drm.c revision 1.8
      1 /* $NetBSD: sunxi_drm.c,v 1.8 2019/11/05 23:31:23 jmcneill Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 2019 Jared D. McNeill <jmcneill (at) invisible.ca>
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26  * SUCH DAMAGE.
     27  */
     28 
     29 #include <sys/cdefs.h>
     30 __KERNEL_RCSID(0, "$NetBSD: sunxi_drm.c,v 1.8 2019/11/05 23:31:23 jmcneill Exp $");
     31 
     32 #include <sys/param.h>
     33 #include <sys/bus.h>
     34 #include <sys/device.h>
     35 #include <sys/intr.h>
     36 #include <sys/systm.h>
     37 #include <sys/kernel.h>
     38 #include <sys/conf.h>
     39 
     40 #include <uvm/uvm_extern.h>
     41 #include <uvm/uvm_object.h>
     42 #include <uvm/uvm_device.h>
     43 
     44 #include <drm/drmP.h>
     45 #include <drm/drm_crtc_helper.h>
     46 #include <drm/drm_fb_helper.h>
     47 
     48 #include <dev/fdt/fdtvar.h>
     49 #include <dev/fdt/fdt_port.h>
     50 
     51 #include <arm/sunxi/sunxi_drm.h>
     52 
     53 #define	SUNXI_DRM_MAX_WIDTH	3840
     54 #define	SUNXI_DRM_MAX_HEIGHT	2160
     55 
     56 static TAILQ_HEAD(, sunxi_drm_endpoint) sunxi_drm_endpoints =
     57     TAILQ_HEAD_INITIALIZER(sunxi_drm_endpoints);
     58 
     59 static const char * const compatible[] = {
     60 	"allwinner,sun8i-h3-display-engine",
     61 	"allwinner,sun50i-a64-display-engine",
     62 	NULL
     63 };
     64 
     65 static const char * fb_compatible[] = {
     66 	"allwinner,simple-framebuffer",
     67 	NULL
     68 };
     69 
     70 static int	sunxi_drm_match(device_t, cfdata_t, void *);
     71 static void	sunxi_drm_attach(device_t, device_t, void *);
     72 
     73 static void	sunxi_drm_init(device_t);
     74 static vmem_t	*sunxi_drm_alloc_cma_pool(struct drm_device *, size_t);
     75 
     76 static int	sunxi_drm_set_busid(struct drm_device *, struct drm_master *);
     77 
     78 static uint32_t	sunxi_drm_get_vblank_counter(struct drm_device *, unsigned int);
     79 static int	sunxi_drm_enable_vblank(struct drm_device *, unsigned int);
     80 static void	sunxi_drm_disable_vblank(struct drm_device *, unsigned int);
     81 
     82 static int	sunxi_drm_load(struct drm_device *, unsigned long);
     83 static int	sunxi_drm_unload(struct drm_device *);
     84 
     85 static struct drm_driver sunxi_drm_driver = {
     86 	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
     87 	.dev_priv_size = 0,
     88 	.load = sunxi_drm_load,
     89 	.unload = sunxi_drm_unload,
     90 
     91 	.gem_free_object = drm_gem_cma_free_object,
     92 	.mmap_object = drm_gem_or_legacy_mmap_object,
     93 	.gem_uvm_ops = &drm_gem_cma_uvm_ops,
     94 
     95 	.dumb_create = drm_gem_cma_dumb_create,
     96 	.dumb_map_offset = drm_gem_cma_dumb_map_offset,
     97 	.dumb_destroy = drm_gem_dumb_destroy,
     98 
     99 	.get_vblank_counter = sunxi_drm_get_vblank_counter,
    100 	.enable_vblank = sunxi_drm_enable_vblank,
    101 	.disable_vblank = sunxi_drm_disable_vblank,
    102 
    103 	.name = DRIVER_NAME,
    104 	.desc = DRIVER_DESC,
    105 	.date = DRIVER_DATE,
    106 	.major = DRIVER_MAJOR,
    107 	.minor = DRIVER_MINOR,
    108 	.patchlevel = DRIVER_PATCHLEVEL,
    109 
    110 	.set_busid = sunxi_drm_set_busid,
    111 };
    112 
    113 CFATTACH_DECL_NEW(sunxi_drm, sizeof(struct sunxi_drm_softc),
    114 	sunxi_drm_match, sunxi_drm_attach, NULL, NULL);
    115 
    116 static int
    117 sunxi_drm_match(device_t parent, cfdata_t cf, void *aux)
    118 {
    119 	struct fdt_attach_args * const faa = aux;
    120 
    121 	return of_match_compatible(faa->faa_phandle, compatible);
    122 }
    123 
    124 static void
    125 sunxi_drm_attach(device_t parent, device_t self, void *aux)
    126 {
    127 	struct sunxi_drm_softc * const sc = device_private(self);
    128 	struct fdt_attach_args * const faa = aux;
    129 	struct drm_driver * const driver = &sunxi_drm_driver;
    130 	prop_dictionary_t dict = device_properties(self);
    131 	bool is_disabled;
    132 
    133 	sc->sc_dev = self;
    134 	sc->sc_dmat = faa->faa_dmat;
    135 	sc->sc_bst = faa->faa_bst;
    136 	sc->sc_phandle = faa->faa_phandle;
    137 
    138 	aprint_naive("\n");
    139 
    140 	if (prop_dictionary_get_bool(dict, "disabled", &is_disabled) && is_disabled) {
    141 		aprint_normal(": Display Engine Pipeline (disabled)\n");
    142 		return;
    143 	}
    144 
    145 	aprint_normal(": Display Engine Pipeline\n");
    146 
    147 	sc->sc_ddev = drm_dev_alloc(driver, sc->sc_dev);
    148 	if (sc->sc_ddev == NULL) {
    149 		aprint_error_dev(self, "couldn't allocate DRM device\n");
    150 		return;
    151 	}
    152 	sc->sc_ddev->dev_private = sc;
    153 	sc->sc_ddev->bst = sc->sc_bst;
    154 	sc->sc_ddev->bus_dmat = sc->sc_dmat;
    155 	sc->sc_ddev->dmat = sc->sc_ddev->bus_dmat;
    156 	sc->sc_ddev->dmat_subregion_p = false;
    157 
    158 	fdt_remove_bycompat(fb_compatible);
    159 
    160 	config_defer(self, sunxi_drm_init);
    161 }
    162 
    163 static void
    164 sunxi_drm_init(device_t dev)
    165 {
    166 	struct sunxi_drm_softc * const sc = device_private(dev);
    167 	struct drm_driver * const driver = &sunxi_drm_driver;
    168 	int error;
    169 
    170 	error = -drm_dev_register(sc->sc_ddev, 0);
    171 	if (error) {
    172 		drm_dev_unref(sc->sc_ddev);
    173 		aprint_error_dev(dev, "couldn't register DRM device: %d\n",
    174 		    error);
    175 		return;
    176 	}
    177 
    178 	aprint_normal_dev(dev, "initialized %s %d.%d.%d %s on minor %d\n",
    179 	    driver->name, driver->major, driver->minor, driver->patchlevel,
    180 	    driver->date, sc->sc_ddev->primary->index);
    181 }
    182 
    183 static vmem_t *
    184 sunxi_drm_alloc_cma_pool(struct drm_device *ddev, size_t cma_size)
    185 {
    186 	struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
    187 	bus_dma_segment_t segs[1];
    188 	int nsegs;
    189 	int error;
    190 
    191 	error = bus_dmamem_alloc(sc->sc_dmat, cma_size, PAGE_SIZE, 0,
    192 	    segs, 1, &nsegs, BUS_DMA_NOWAIT);
    193 	if (error) {
    194 		aprint_error_dev(sc->sc_dev, "couldn't allocate CMA pool\n");
    195 		return NULL;
    196 	}
    197 
    198 	return vmem_create("sunxidrm", segs[0].ds_addr, segs[0].ds_len,
    199 	    PAGE_SIZE, NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
    200 }
    201 
    202 static int
    203 sunxi_drm_set_busid(struct drm_device *ddev, struct drm_master *master)
    204 {
    205 	struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
    206 	char id[32];
    207 
    208 	snprintf(id, sizeof(id), "platform:sunxi:%u", device_unit(sc->sc_dev));
    209 
    210 	master->unique = kzalloc(strlen(id) + 1, GFP_KERNEL);
    211 	if (master->unique == NULL)
    212 		return -ENOMEM;
    213 	strcpy(master->unique, id);
    214 	master->unique_len = strlen(master->unique);
    215 
    216 	return 0;
    217 }
    218 
    219 static int
    220 sunxi_drm_fb_create_handle(struct drm_framebuffer *fb,
    221     struct drm_file *file, unsigned int *handle)
    222 {
    223 	struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(fb);
    224 
    225 	return drm_gem_handle_create(file, &sfb->obj->base, handle);
    226 }
    227 
    228 static void
    229 sunxi_drm_fb_destroy(struct drm_framebuffer *fb)
    230 {
    231 	struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(fb);
    232 
    233 	drm_framebuffer_cleanup(fb);
    234 	drm_gem_object_unreference_unlocked(&sfb->obj->base);
    235 	kmem_free(sfb, sizeof(*sfb));
    236 }
    237 
    238 static const struct drm_framebuffer_funcs sunxi_drm_framebuffer_funcs = {
    239 	.create_handle = sunxi_drm_fb_create_handle,
    240 	.destroy = sunxi_drm_fb_destroy,
    241 };
    242 
    243 static struct drm_framebuffer *
    244 sunxi_drm_fb_create(struct drm_device *ddev, struct drm_file *file,
    245     struct drm_mode_fb_cmd2 *cmd)
    246 {
    247 	struct sunxi_drm_framebuffer *fb;
    248 	struct drm_gem_object *gem_obj;
    249 	int error;
    250 
    251 	if (cmd->flags)
    252 		return NULL;
    253 
    254 	gem_obj = drm_gem_object_lookup(ddev, file, cmd->handles[0]);
    255 	if (gem_obj == NULL)
    256 		return NULL;
    257 
    258 	fb = kmem_zalloc(sizeof(*fb), KM_SLEEP);
    259 	fb->obj = to_drm_gem_cma_obj(gem_obj);
    260 	fb->base.pitches[0] = cmd->pitches[0];
    261 	fb->base.pitches[1] = cmd->pitches[1];
    262 	fb->base.pitches[2] = cmd->pitches[2];
    263 	fb->base.offsets[0] = cmd->offsets[0];
    264 	fb->base.offsets[1] = cmd->offsets[2];
    265 	fb->base.offsets[2] = cmd->offsets[1];
    266 	fb->base.width = cmd->width;
    267 	fb->base.height = cmd->height;
    268 	fb->base.pixel_format = cmd->pixel_format;
    269 	fb->base.bits_per_pixel = drm_format_plane_cpp(fb->base.pixel_format, 0) * 8;
    270 
    271 	switch (fb->base.pixel_format) {
    272 	case DRM_FORMAT_XRGB8888:
    273 	case DRM_FORMAT_ARGB8888:
    274 		fb->base.depth = 32;
    275 		break;
    276 	default:
    277 		break;
    278 	}
    279 
    280 	error = drm_framebuffer_init(ddev, &fb->base, &sunxi_drm_framebuffer_funcs);
    281 	if (error != 0)
    282 		goto dealloc;
    283 
    284 	return &fb->base;
    285 
    286 dealloc:
    287 	drm_framebuffer_cleanup(&fb->base);
    288 	kmem_free(fb, sizeof(*fb));
    289 	drm_gem_object_unreference_unlocked(gem_obj);
    290 
    291 	return NULL;
    292 }
    293 
    294 static struct drm_mode_config_funcs sunxi_drm_mode_config_funcs = {
    295 	.fb_create = sunxi_drm_fb_create,
    296 };
    297 
    298 static int
    299 sunxi_drm_fb_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes)
    300 {
    301 	struct sunxi_drm_softc * const sc = sunxi_drm_private(helper->dev);
    302 	struct drm_device *ddev = helper->dev;
    303 	struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(helper->fb);
    304 	struct drm_framebuffer *fb = helper->fb;
    305 	struct sunxi_drmfb_attach_args sfa;
    306 	size_t cma_size;
    307 	int error;
    308 
    309 	const u_int width = sizes->surface_width;
    310 	const u_int height = sizes->surface_height;
    311 	const u_int pitch = width * (32 / 8);
    312 
    313 	const size_t size = roundup(height * pitch, PAGE_SIZE);
    314 
    315 	/* Reserve enough memory for the FB console plus a 4K plane, rounded to 1MB */
    316 	cma_size = size;
    317 	cma_size += (SUNXI_DRM_MAX_WIDTH * SUNXI_DRM_MAX_HEIGHT * 4);
    318 	cma_size = roundup(cma_size, 1024 * 1024);
    319 	sc->sc_ddev->cma_pool = sunxi_drm_alloc_cma_pool(sc->sc_ddev, cma_size);
    320 	if (sc->sc_ddev->cma_pool != NULL)
    321 		aprint_normal_dev(sc->sc_dev, "reserved %u MB DRAM for CMA\n",
    322 		    (u_int)(cma_size / (1024 * 1024)));
    323 
    324 	sfb->obj = drm_gem_cma_create(ddev, size);
    325 	if (sfb->obj == NULL) {
    326 		DRM_ERROR("failed to allocate memory for framebuffer\n");
    327 		return -ENOMEM;
    328 	}
    329 
    330 	fb->pitches[0] = pitch;
    331 	fb->offsets[0] = 0;
    332 	fb->width = width;
    333 	fb->height = height;
    334 	fb->pixel_format = DRM_FORMAT_XRGB8888;
    335 	drm_fb_get_bpp_depth(fb->pixel_format, &fb->depth, &fb->bits_per_pixel);
    336 
    337 	error = drm_framebuffer_init(ddev, fb, &sunxi_drm_framebuffer_funcs);
    338 	if (error != 0) {
    339 		DRM_ERROR("failed to initialize framebuffer\n");
    340 		return error;
    341 	}
    342 
    343 	memset(&sfa, 0, sizeof(sfa));
    344 	sfa.sfa_drm_dev = ddev;
    345 	sfa.sfa_fb_helper = helper;
    346 	sfa.sfa_fb_sizes = *sizes;
    347 	sfa.sfa_fb_bst = sc->sc_bst;
    348 	sfa.sfa_fb_dmat = sc->sc_dmat;
    349 	sfa.sfa_fb_linebytes = helper->fb->pitches[0];
    350 
    351 	helper->fbdev = config_found_ia(ddev->dev, "sunxifbbus", &sfa, NULL);
    352 	if (helper->fbdev == NULL) {
    353 		DRM_ERROR("unable to attach framebuffer\n");
    354 		return -ENXIO;
    355 	}
    356 
    357 	return 0;
    358 }
    359 
    360 static struct drm_fb_helper_funcs sunxi_drm_fb_helper_funcs = {
    361 	.fb_probe = sunxi_drm_fb_probe,
    362 };
    363 
    364 static int
    365 sunxi_drm_load(struct drm_device *ddev, unsigned long flags)
    366 {
    367 	struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
    368 	struct sunxi_drm_endpoint *sep;
    369 	struct sunxi_drm_fbdev *fbdev;
    370 	const u_int *data;
    371 	int datalen, error, num_crtc;
    372 
    373 	drm_mode_config_init(ddev);
    374 	ddev->mode_config.min_width = 0;
    375 	ddev->mode_config.min_height = 0;
    376 	ddev->mode_config.max_width = SUNXI_DRM_MAX_WIDTH;
    377 	ddev->mode_config.max_height = SUNXI_DRM_MAX_HEIGHT;
    378 	ddev->mode_config.funcs = &sunxi_drm_mode_config_funcs;
    379 
    380 	num_crtc = 0;
    381 	data = fdtbus_get_prop(sc->sc_phandle, "allwinner,pipelines", &datalen);
    382 	while (datalen >= 4) {
    383 		const int crtc_phandle = fdtbus_get_phandle_from_native(be32dec(data));
    384 
    385 		TAILQ_FOREACH(sep, &sunxi_drm_endpoints, entries)
    386 			if (sep->phandle == crtc_phandle && sep->ddev == NULL) {
    387 				sep->ddev = ddev;
    388 				error = fdt_endpoint_activate_direct(sep->ep, true);
    389 				if (error != 0) {
    390 					aprint_error_dev(sc->sc_dev, "failed to activate endpoint: %d\n",
    391 					    error);
    392 				}
    393 				if (fdt_endpoint_type(sep->ep) == EP_DRM_CRTC)
    394 					num_crtc++;
    395 			}
    396 
    397 		datalen -= 4;
    398 		data++;
    399 	}
    400 
    401 	if (num_crtc == 0) {
    402 		aprint_error_dev(sc->sc_dev, "no pipelines configured\n");
    403 		return ENXIO;
    404 	}
    405 
    406 	fbdev = kmem_zalloc(sizeof(*fbdev), KM_SLEEP);
    407 
    408 	drm_fb_helper_prepare(ddev, &fbdev->helper, &sunxi_drm_fb_helper_funcs);
    409 
    410 	error = drm_fb_helper_init(ddev, &fbdev->helper, num_crtc, num_crtc);
    411 	if (error)
    412 		goto drmerr;
    413 
    414 	fbdev->helper.fb = kmem_zalloc(sizeof(struct sunxi_drm_framebuffer), KM_SLEEP);
    415 
    416 	drm_fb_helper_single_add_all_connectors(&fbdev->helper);
    417 
    418 	drm_helper_disable_unused_functions(ddev);
    419 
    420 	drm_fb_helper_initial_config(&fbdev->helper, 32);
    421 
    422 	/* XXX */
    423 	ddev->irq_enabled = true;
    424 	drm_vblank_init(ddev, num_crtc);
    425 
    426 	return 0;
    427 
    428 drmerr:
    429 	drm_mode_config_cleanup(ddev);
    430 	kmem_free(fbdev, sizeof(*fbdev));
    431 
    432 	return error;
    433 }
    434 
    435 static uint32_t
    436 sunxi_drm_get_vblank_counter(struct drm_device *ddev, unsigned int crtc)
    437 {
    438 	struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
    439 
    440 	if (crtc >= __arraycount(sc->sc_vbl))
    441 		return 0;
    442 
    443 	if (sc->sc_vbl[crtc].get_vblank_counter == NULL)
    444 		return 0;
    445 
    446 	return sc->sc_vbl[crtc].get_vblank_counter(sc->sc_vbl[crtc].priv);
    447 }
    448 
    449 static int
    450 sunxi_drm_enable_vblank(struct drm_device *ddev, unsigned int crtc)
    451 {
    452 	struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
    453 
    454 	if (crtc >= __arraycount(sc->sc_vbl))
    455 		return 0;
    456 
    457 	if (sc->sc_vbl[crtc].enable_vblank == NULL)
    458 		return 0;
    459 
    460 	sc->sc_vbl[crtc].enable_vblank(sc->sc_vbl[crtc].priv);
    461 
    462 	return 0;
    463 }
    464 
    465 static void
    466 sunxi_drm_disable_vblank(struct drm_device *ddev, unsigned int crtc)
    467 {
    468 	struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
    469 
    470 	if (crtc >= __arraycount(sc->sc_vbl))
    471 		return;
    472 
    473 	if (sc->sc_vbl[crtc].disable_vblank == NULL)
    474 		return;
    475 
    476 	sc->sc_vbl[crtc].disable_vblank(sc->sc_vbl[crtc].priv);
    477 }
    478 
    479 static int
    480 sunxi_drm_unload(struct drm_device *ddev)
    481 {
    482 	drm_mode_config_cleanup(ddev);
    483 
    484 	return 0;
    485 }
    486 
    487 int
    488 sunxi_drm_register_endpoint(int phandle, struct fdt_endpoint *ep)
    489 {
    490 	struct sunxi_drm_endpoint *sep;
    491 
    492 	sep = kmem_zalloc(sizeof(*sep), KM_SLEEP);
    493 	sep->phandle = phandle;
    494 	sep->ep = ep;
    495 	sep->ddev = NULL;
    496 	TAILQ_INSERT_TAIL(&sunxi_drm_endpoints, sep, entries);
    497 
    498 	return 0;
    499 }
    500 
    501 struct drm_device *
    502 sunxi_drm_endpoint_device(struct fdt_endpoint *ep)
    503 {
    504 	struct sunxi_drm_endpoint *sep;
    505 
    506 	TAILQ_FOREACH(sep, &sunxi_drm_endpoints, entries)
    507 		if (sep->ep == ep)
    508 			return sep->ddev;
    509 
    510 	return NULL;
    511 }
    512