1 1.26 riastrad /* $NetBSD: sunxi_drm.c,v 1.26 2022/09/25 07:50:23 riastradh Exp $ */ 2 1.1 jmcneill 3 1.1 jmcneill /*- 4 1.1 jmcneill * Copyright (c) 2019 Jared D. McNeill <jmcneill (at) invisible.ca> 5 1.1 jmcneill * All rights reserved. 6 1.1 jmcneill * 7 1.1 jmcneill * Redistribution and use in source and binary forms, with or without 8 1.1 jmcneill * modification, are permitted provided that the following conditions 9 1.1 jmcneill * are met: 10 1.1 jmcneill * 1. Redistributions of source code must retain the above copyright 11 1.1 jmcneill * notice, this list of conditions and the following disclaimer. 12 1.1 jmcneill * 2. Redistributions in binary form must reproduce the above copyright 13 1.1 jmcneill * notice, this list of conditions and the following disclaimer in the 14 1.1 jmcneill * documentation and/or other materials provided with the distribution. 15 1.1 jmcneill * 16 1.1 jmcneill * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 1.1 jmcneill * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 1.1 jmcneill * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 1.1 jmcneill * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 1.1 jmcneill * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 1.1 jmcneill * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 1.1 jmcneill * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 1.1 jmcneill * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 1.1 jmcneill * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 1.1 jmcneill * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 1.1 jmcneill * SUCH DAMAGE. 27 1.1 jmcneill */ 28 1.1 jmcneill 29 1.1 jmcneill #include <sys/cdefs.h> 30 1.26 riastrad __KERNEL_RCSID(0, "$NetBSD: sunxi_drm.c,v 1.26 2022/09/25 07:50:23 riastradh Exp $"); 31 1.1 jmcneill 32 1.1 jmcneill #include <sys/param.h> 33 1.1 jmcneill #include <sys/bus.h> 34 1.17 riastrad #include <sys/conf.h> 35 1.1 jmcneill #include <sys/device.h> 36 1.1 jmcneill #include <sys/intr.h> 37 1.17 riastrad #include <sys/kernel.h> 38 1.1 jmcneill #include <sys/systm.h> 39 1.1 jmcneill 40 1.17 riastrad #include <uvm/uvm_device.h> 41 1.1 jmcneill #include <uvm/uvm_extern.h> 42 1.1 jmcneill #include <uvm/uvm_object.h> 43 1.17 riastrad 44 1.17 riastrad #include <dev/fdt/fdt_port.h> 45 1.17 riastrad #include <dev/fdt/fdtvar.h> 46 1.17 riastrad 47 1.17 riastrad #include <arm/sunxi/sunxi_drm.h> 48 1.1 jmcneill 49 1.16 riastrad #include <drm/drm_auth.h> 50 1.1 jmcneill #include <drm/drm_crtc_helper.h> 51 1.16 riastrad #include <drm/drm_drv.h> 52 1.1 jmcneill #include <drm/drm_fb_helper.h> 53 1.16 riastrad #include <drm/drm_fourcc.h> 54 1.16 riastrad #include <drm/drm_vblank.h> 55 1.1 jmcneill 56 1.8 jmcneill #define SUNXI_DRM_MAX_WIDTH 3840 57 1.8 jmcneill #define SUNXI_DRM_MAX_HEIGHT 2160 58 1.8 jmcneill 59 1.11 jmcneill /* 60 1.11 jmcneill * The DRM headers break trunc_page/round_page macros with a redefinition 61 1.11 jmcneill * of PAGE_MASK. Use our own macros instead. 62 1.11 jmcneill */ 63 1.11 jmcneill #define SUNXI_PAGE_MASK (PAGE_SIZE - 1) 64 1.11 jmcneill #define SUNXI_TRUNC_PAGE(x) ((x) & ~SUNXI_PAGE_MASK) 65 1.11 jmcneill #define SUNXI_ROUND_PAGE(x) (((x) + SUNXI_PAGE_MASK) & ~SUNXI_PAGE_MASK) 66 1.11 jmcneill 67 1.1 jmcneill static TAILQ_HEAD(, sunxi_drm_endpoint) sunxi_drm_endpoints = 68 1.1 jmcneill TAILQ_HEAD_INITIALIZER(sunxi_drm_endpoints); 69 1.1 jmcneill 70 1.12 thorpej static const struct device_compatible_entry compat_data[] = { 71 1.12 thorpej { .compat = "allwinner,sun8i-h3-display-engine" }, 72 1.25 skrll { .compat = "allwinner,sun8i-v3s-display-engine" }, 73 1.13 rin { .compat = "allwinner,sun50i-a64-display-engine" }, 74 1.12 thorpej DEVICE_COMPAT_EOL 75 1.1 jmcneill }; 76 1.1 jmcneill 77 1.3 jmcneill static const char * fb_compatible[] = { 78 1.3 jmcneill "allwinner,simple-framebuffer", 79 1.3 jmcneill NULL 80 1.3 jmcneill }; 81 1.3 jmcneill 82 1.1 jmcneill static int sunxi_drm_match(device_t, cfdata_t, void *); 83 1.1 jmcneill static void sunxi_drm_attach(device_t, device_t, void *); 84 1.1 jmcneill 85 1.1 jmcneill static void sunxi_drm_init(device_t); 86 1.8 jmcneill static vmem_t *sunxi_drm_alloc_cma_pool(struct drm_device *, size_t); 87 1.1 jmcneill 88 1.6 jmcneill static uint32_t sunxi_drm_get_vblank_counter(struct drm_device *, unsigned int); 89 1.6 jmcneill static int sunxi_drm_enable_vblank(struct drm_device *, unsigned int); 90 1.6 jmcneill static void sunxi_drm_disable_vblank(struct drm_device *, unsigned int); 91 1.6 jmcneill 92 1.1 jmcneill static int sunxi_drm_load(struct drm_device *, unsigned long); 93 1.16 riastrad static void sunxi_drm_unload(struct drm_device *); 94 1.1 jmcneill 95 1.22 riastrad static void sunxi_drm_task_work(struct work *, void *); 96 1.22 riastrad 97 1.1 jmcneill static struct drm_driver sunxi_drm_driver = { 98 1.16 riastrad .driver_features = DRIVER_MODESET | DRIVER_GEM, 99 1.1 jmcneill .dev_priv_size = 0, 100 1.1 jmcneill .load = sunxi_drm_load, 101 1.1 jmcneill .unload = sunxi_drm_unload, 102 1.1 jmcneill 103 1.1 jmcneill .gem_free_object = drm_gem_cma_free_object, 104 1.1 jmcneill .mmap_object = drm_gem_or_legacy_mmap_object, 105 1.1 jmcneill .gem_uvm_ops = &drm_gem_cma_uvm_ops, 106 1.1 jmcneill 107 1.1 jmcneill .dumb_create = drm_gem_cma_dumb_create, 108 1.1 jmcneill .dumb_destroy = drm_gem_dumb_destroy, 109 1.1 jmcneill 110 1.1 jmcneill .get_vblank_counter = sunxi_drm_get_vblank_counter, 111 1.1 jmcneill .enable_vblank = sunxi_drm_enable_vblank, 112 1.1 jmcneill .disable_vblank = sunxi_drm_disable_vblank, 113 1.1 jmcneill 114 1.1 jmcneill .name = DRIVER_NAME, 115 1.1 jmcneill .desc = DRIVER_DESC, 116 1.1 jmcneill .date = DRIVER_DATE, 117 1.1 jmcneill .major = DRIVER_MAJOR, 118 1.1 jmcneill .minor = DRIVER_MINOR, 119 1.1 jmcneill .patchlevel = DRIVER_PATCHLEVEL, 120 1.1 jmcneill }; 121 1.1 jmcneill 122 1.1 jmcneill CFATTACH_DECL_NEW(sunxi_drm, sizeof(struct sunxi_drm_softc), 123 1.1 jmcneill sunxi_drm_match, sunxi_drm_attach, NULL, NULL); 124 1.1 jmcneill 125 1.1 jmcneill static int 126 1.1 jmcneill sunxi_drm_match(device_t parent, cfdata_t cf, void *aux) 127 1.1 jmcneill { 128 1.1 jmcneill struct fdt_attach_args * const faa = aux; 129 1.1 jmcneill 130 1.12 thorpej return of_compatible_match(faa->faa_phandle, compat_data); 131 1.1 jmcneill } 132 1.1 jmcneill 133 1.1 jmcneill static void 134 1.1 jmcneill sunxi_drm_attach(device_t parent, device_t self, void *aux) 135 1.1 jmcneill { 136 1.1 jmcneill struct sunxi_drm_softc * const sc = device_private(self); 137 1.1 jmcneill struct fdt_attach_args * const faa = aux; 138 1.1 jmcneill struct drm_driver * const driver = &sunxi_drm_driver; 139 1.5 jmcneill prop_dictionary_t dict = device_properties(self); 140 1.5 jmcneill bool is_disabled; 141 1.1 jmcneill 142 1.23 riastrad aprint_naive("\n"); 143 1.23 riastrad 144 1.23 riastrad if (prop_dictionary_get_bool(dict, "disabled", &is_disabled) && 145 1.23 riastrad is_disabled) { 146 1.23 riastrad aprint_normal(": Display Engine Pipeline (disabled)\n"); 147 1.23 riastrad return; 148 1.23 riastrad } 149 1.23 riastrad 150 1.23 riastrad aprint_normal(": Display Engine Pipeline\n"); 151 1.23 riastrad 152 1.26 riastrad #ifdef WSDISPLAY_MULTICONS 153 1.26 riastrad const bool is_console = true; 154 1.26 riastrad prop_dictionary_set_bool(dict, "is_console", is_console); 155 1.26 riastrad #endif 156 1.26 riastrad 157 1.1 jmcneill sc->sc_dev = self; 158 1.1 jmcneill sc->sc_dmat = faa->faa_dmat; 159 1.1 jmcneill sc->sc_bst = faa->faa_bst; 160 1.1 jmcneill sc->sc_phandle = faa->faa_phandle; 161 1.22 riastrad sc->sc_task_thread = NULL; 162 1.22 riastrad SIMPLEQ_INIT(&sc->sc_tasks); 163 1.22 riastrad if (workqueue_create(&sc->sc_task_wq, "sunxidrm", 164 1.22 riastrad &sunxi_drm_task_work, NULL, PRI_NONE, IPL_NONE, WQ_MPSAFE)) { 165 1.22 riastrad aprint_error_dev(self, "unable to create workqueue\n"); 166 1.22 riastrad sc->sc_task_wq = NULL; 167 1.22 riastrad return; 168 1.22 riastrad } 169 1.1 jmcneill 170 1.1 jmcneill sc->sc_ddev = drm_dev_alloc(driver, sc->sc_dev); 171 1.18 riastrad if (IS_ERR(sc->sc_ddev)) { 172 1.1 jmcneill aprint_error_dev(self, "couldn't allocate DRM device\n"); 173 1.1 jmcneill return; 174 1.1 jmcneill } 175 1.1 jmcneill sc->sc_ddev->dev_private = sc; 176 1.1 jmcneill sc->sc_ddev->bst = sc->sc_bst; 177 1.1 jmcneill sc->sc_ddev->bus_dmat = sc->sc_dmat; 178 1.1 jmcneill sc->sc_ddev->dmat = sc->sc_ddev->bus_dmat; 179 1.1 jmcneill sc->sc_ddev->dmat_subregion_p = false; 180 1.1 jmcneill 181 1.3 jmcneill fdt_remove_bycompat(fb_compatible); 182 1.3 jmcneill 183 1.1 jmcneill config_defer(self, sunxi_drm_init); 184 1.1 jmcneill } 185 1.1 jmcneill 186 1.1 jmcneill static void 187 1.1 jmcneill sunxi_drm_init(device_t dev) 188 1.1 jmcneill { 189 1.1 jmcneill struct sunxi_drm_softc * const sc = device_private(dev); 190 1.1 jmcneill struct drm_driver * const driver = &sunxi_drm_driver; 191 1.1 jmcneill int error; 192 1.1 jmcneill 193 1.22 riastrad /* 194 1.22 riastrad * Cause any tasks issued synchronously during attach to be 195 1.22 riastrad * processed at the end of this function. 196 1.22 riastrad */ 197 1.22 riastrad sc->sc_task_thread = curlwp; 198 1.22 riastrad 199 1.1 jmcneill error = -drm_dev_register(sc->sc_ddev, 0); 200 1.1 jmcneill if (error) { 201 1.1 jmcneill aprint_error_dev(dev, "couldn't register DRM device: %d\n", 202 1.1 jmcneill error); 203 1.22 riastrad goto out; 204 1.1 jmcneill } 205 1.22 riastrad sc->sc_dev_registered = true; 206 1.1 jmcneill 207 1.1 jmcneill aprint_normal_dev(dev, "initialized %s %d.%d.%d %s on minor %d\n", 208 1.1 jmcneill driver->name, driver->major, driver->minor, driver->patchlevel, 209 1.1 jmcneill driver->date, sc->sc_ddev->primary->index); 210 1.22 riastrad 211 1.22 riastrad /* 212 1.22 riastrad * Process asynchronous tasks queued synchronously during 213 1.22 riastrad * attach. This will be for display detection to attach a 214 1.22 riastrad * framebuffer, so we have the opportunity for a console device 215 1.22 riastrad * to attach before autoconf has completed, in time for init(8) 216 1.22 riastrad * to find that console without panicking. 217 1.22 riastrad */ 218 1.22 riastrad while (!SIMPLEQ_EMPTY(&sc->sc_tasks)) { 219 1.22 riastrad struct sunxi_drm_task *const task = 220 1.22 riastrad SIMPLEQ_FIRST(&sc->sc_tasks); 221 1.22 riastrad 222 1.22 riastrad SIMPLEQ_REMOVE_HEAD(&sc->sc_tasks, sdt_u.queue); 223 1.22 riastrad (*task->sdt_fn)(task); 224 1.22 riastrad } 225 1.22 riastrad 226 1.24 andvar out: /* Cause any subsequent tasks to be processed by the workqueue. */ 227 1.22 riastrad atomic_store_relaxed(&sc->sc_task_thread, NULL); 228 1.1 jmcneill } 229 1.1 jmcneill 230 1.8 jmcneill static vmem_t * 231 1.8 jmcneill sunxi_drm_alloc_cma_pool(struct drm_device *ddev, size_t cma_size) 232 1.8 jmcneill { 233 1.8 jmcneill struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev); 234 1.8 jmcneill bus_dma_segment_t segs[1]; 235 1.8 jmcneill int nsegs; 236 1.8 jmcneill int error; 237 1.8 jmcneill 238 1.8 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, cma_size, PAGE_SIZE, 0, 239 1.8 jmcneill segs, 1, &nsegs, BUS_DMA_NOWAIT); 240 1.8 jmcneill if (error) { 241 1.8 jmcneill aprint_error_dev(sc->sc_dev, "couldn't allocate CMA pool\n"); 242 1.8 jmcneill return NULL; 243 1.8 jmcneill } 244 1.8 jmcneill 245 1.8 jmcneill return vmem_create("sunxidrm", segs[0].ds_addr, segs[0].ds_len, 246 1.8 jmcneill PAGE_SIZE, NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE); 247 1.8 jmcneill } 248 1.8 jmcneill 249 1.1 jmcneill static int 250 1.1 jmcneill sunxi_drm_fb_create_handle(struct drm_framebuffer *fb, 251 1.1 jmcneill struct drm_file *file, unsigned int *handle) 252 1.1 jmcneill { 253 1.1 jmcneill struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(fb); 254 1.1 jmcneill 255 1.1 jmcneill return drm_gem_handle_create(file, &sfb->obj->base, handle); 256 1.1 jmcneill } 257 1.1 jmcneill 258 1.1 jmcneill static void 259 1.1 jmcneill sunxi_drm_fb_destroy(struct drm_framebuffer *fb) 260 1.1 jmcneill { 261 1.1 jmcneill struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(fb); 262 1.1 jmcneill 263 1.1 jmcneill drm_framebuffer_cleanup(fb); 264 1.16 riastrad drm_gem_object_put_unlocked(&sfb->obj->base); 265 1.1 jmcneill kmem_free(sfb, sizeof(*sfb)); 266 1.1 jmcneill } 267 1.1 jmcneill 268 1.1 jmcneill static const struct drm_framebuffer_funcs sunxi_drm_framebuffer_funcs = { 269 1.1 jmcneill .create_handle = sunxi_drm_fb_create_handle, 270 1.1 jmcneill .destroy = sunxi_drm_fb_destroy, 271 1.1 jmcneill }; 272 1.1 jmcneill 273 1.1 jmcneill static struct drm_framebuffer * 274 1.1 jmcneill sunxi_drm_fb_create(struct drm_device *ddev, struct drm_file *file, 275 1.16 riastrad const struct drm_mode_fb_cmd2 *cmd) 276 1.1 jmcneill { 277 1.1 jmcneill struct sunxi_drm_framebuffer *fb; 278 1.1 jmcneill struct drm_gem_object *gem_obj; 279 1.1 jmcneill int error; 280 1.1 jmcneill 281 1.1 jmcneill if (cmd->flags) 282 1.1 jmcneill return NULL; 283 1.1 jmcneill 284 1.16 riastrad gem_obj = drm_gem_object_lookup(file, cmd->handles[0]); 285 1.1 jmcneill if (gem_obj == NULL) 286 1.1 jmcneill return NULL; 287 1.1 jmcneill 288 1.1 jmcneill fb = kmem_zalloc(sizeof(*fb), KM_SLEEP); 289 1.1 jmcneill fb->obj = to_drm_gem_cma_obj(gem_obj); 290 1.20 riastrad drm_helper_mode_fill_fb_struct(ddev, &fb->base, cmd); 291 1.1 jmcneill 292 1.1 jmcneill error = drm_framebuffer_init(ddev, &fb->base, &sunxi_drm_framebuffer_funcs); 293 1.1 jmcneill if (error != 0) 294 1.1 jmcneill goto dealloc; 295 1.1 jmcneill 296 1.1 jmcneill return &fb->base; 297 1.1 jmcneill 298 1.1 jmcneill dealloc: 299 1.1 jmcneill drm_framebuffer_cleanup(&fb->base); 300 1.1 jmcneill kmem_free(fb, sizeof(*fb)); 301 1.16 riastrad drm_gem_object_put_unlocked(gem_obj); 302 1.1 jmcneill 303 1.1 jmcneill return NULL; 304 1.1 jmcneill } 305 1.1 jmcneill 306 1.1 jmcneill static struct drm_mode_config_funcs sunxi_drm_mode_config_funcs = { 307 1.1 jmcneill .fb_create = sunxi_drm_fb_create, 308 1.1 jmcneill }; 309 1.1 jmcneill 310 1.1 jmcneill static int 311 1.9 jmcneill sunxi_drm_simplefb_lookup(bus_addr_t *paddr, bus_size_t *psize) 312 1.9 jmcneill { 313 1.12 thorpej static const struct device_compatible_entry simplefb_compat[] = { 314 1.12 thorpej { .compat = "simple-framebuffer" }, 315 1.12 thorpej DEVICE_COMPAT_EOL 316 1.12 thorpej }; 317 1.11 jmcneill int chosen, child, error; 318 1.11 jmcneill bus_addr_t addr_end; 319 1.9 jmcneill 320 1.9 jmcneill chosen = OF_finddevice("/chosen"); 321 1.9 jmcneill if (chosen == -1) 322 1.9 jmcneill return ENOENT; 323 1.9 jmcneill 324 1.9 jmcneill for (child = OF_child(chosen); child; child = OF_peer(child)) { 325 1.9 jmcneill if (!fdtbus_status_okay(child)) 326 1.9 jmcneill continue; 327 1.12 thorpej if (!of_compatible_match(child, simplefb_compat)) 328 1.9 jmcneill continue; 329 1.11 jmcneill error = fdtbus_get_reg(child, 0, paddr, psize); 330 1.11 jmcneill if (error != 0) 331 1.11 jmcneill return error; 332 1.11 jmcneill 333 1.11 jmcneill /* Reclaim entire pages used by the simplefb */ 334 1.11 jmcneill addr_end = *paddr + *psize; 335 1.11 jmcneill *paddr = SUNXI_TRUNC_PAGE(*paddr); 336 1.11 jmcneill *psize = SUNXI_ROUND_PAGE(addr_end - *paddr); 337 1.11 jmcneill return 0; 338 1.9 jmcneill } 339 1.9 jmcneill 340 1.9 jmcneill return ENOENT; 341 1.9 jmcneill } 342 1.9 jmcneill 343 1.9 jmcneill static int 344 1.1 jmcneill sunxi_drm_fb_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) 345 1.1 jmcneill { 346 1.1 jmcneill struct sunxi_drm_softc * const sc = sunxi_drm_private(helper->dev); 347 1.1 jmcneill struct drm_device *ddev = helper->dev; 348 1.1 jmcneill struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(helper->fb); 349 1.1 jmcneill struct drm_framebuffer *fb = helper->fb; 350 1.1 jmcneill struct sunxi_drmfb_attach_args sfa; 351 1.9 jmcneill bus_addr_t sfb_addr; 352 1.9 jmcneill bus_size_t sfb_size; 353 1.8 jmcneill size_t cma_size; 354 1.1 jmcneill int error; 355 1.1 jmcneill 356 1.1 jmcneill const u_int width = sizes->surface_width; 357 1.1 jmcneill const u_int height = sizes->surface_height; 358 1.1 jmcneill const u_int pitch = width * (32 / 8); 359 1.1 jmcneill 360 1.1 jmcneill const size_t size = roundup(height * pitch, PAGE_SIZE); 361 1.1 jmcneill 362 1.9 jmcneill if (sunxi_drm_simplefb_lookup(&sfb_addr, &sfb_size) != 0) 363 1.9 jmcneill sfb_size = 0; 364 1.9 jmcneill 365 1.9 jmcneill /* Reserve enough memory for a 4K plane, rounded to 1MB */ 366 1.9 jmcneill cma_size = (SUNXI_DRM_MAX_WIDTH * SUNXI_DRM_MAX_HEIGHT * 4); 367 1.9 jmcneill if (sfb_size == 0) { 368 1.9 jmcneill /* Add memory for FB console if we cannot reclaim bootloader memory */ 369 1.9 jmcneill cma_size += size; 370 1.9 jmcneill } 371 1.8 jmcneill cma_size = roundup(cma_size, 1024 * 1024); 372 1.8 jmcneill sc->sc_ddev->cma_pool = sunxi_drm_alloc_cma_pool(sc->sc_ddev, cma_size); 373 1.9 jmcneill if (sc->sc_ddev->cma_pool != NULL) { 374 1.9 jmcneill if (sfb_size != 0) { 375 1.9 jmcneill error = vmem_add(sc->sc_ddev->cma_pool, sfb_addr, 376 1.9 jmcneill sfb_size, VM_SLEEP); 377 1.9 jmcneill if (error != 0) 378 1.9 jmcneill sfb_size = 0; 379 1.9 jmcneill } 380 1.9 jmcneill aprint_normal_dev(sc->sc_dev, "reserved %u MB DRAM for CMA", 381 1.9 jmcneill (u_int)((cma_size + sfb_size) / (1024 * 1024))); 382 1.9 jmcneill if (sfb_size != 0) 383 1.9 jmcneill aprint_normal(" (%u MB reclaimed from bootloader)", 384 1.9 jmcneill (u_int)(sfb_size / (1024 * 1024))); 385 1.9 jmcneill aprint_normal("\n"); 386 1.9 jmcneill } 387 1.8 jmcneill 388 1.1 jmcneill sfb->obj = drm_gem_cma_create(ddev, size); 389 1.1 jmcneill if (sfb->obj == NULL) { 390 1.1 jmcneill DRM_ERROR("failed to allocate memory for framebuffer\n"); 391 1.1 jmcneill return -ENOMEM; 392 1.1 jmcneill } 393 1.1 jmcneill 394 1.1 jmcneill fb->pitches[0] = pitch; 395 1.1 jmcneill fb->offsets[0] = 0; 396 1.1 jmcneill fb->width = width; 397 1.1 jmcneill fb->height = height; 398 1.16 riastrad fb->format = drm_format_info(DRM_FORMAT_XRGB8888); 399 1.19 riastrad fb->dev = ddev; 400 1.1 jmcneill 401 1.1 jmcneill error = drm_framebuffer_init(ddev, fb, &sunxi_drm_framebuffer_funcs); 402 1.1 jmcneill if (error != 0) { 403 1.1 jmcneill DRM_ERROR("failed to initialize framebuffer\n"); 404 1.1 jmcneill return error; 405 1.1 jmcneill } 406 1.1 jmcneill 407 1.1 jmcneill memset(&sfa, 0, sizeof(sfa)); 408 1.1 jmcneill sfa.sfa_drm_dev = ddev; 409 1.1 jmcneill sfa.sfa_fb_helper = helper; 410 1.1 jmcneill sfa.sfa_fb_sizes = *sizes; 411 1.1 jmcneill sfa.sfa_fb_bst = sc->sc_bst; 412 1.1 jmcneill sfa.sfa_fb_dmat = sc->sc_dmat; 413 1.1 jmcneill sfa.sfa_fb_linebytes = helper->fb->pitches[0]; 414 1.1 jmcneill 415 1.14 thorpej helper->fbdev = config_found(ddev->dev, &sfa, NULL, 416 1.15 thorpej CFARGS(.iattr = "sunxifbbus")); 417 1.1 jmcneill if (helper->fbdev == NULL) { 418 1.1 jmcneill DRM_ERROR("unable to attach framebuffer\n"); 419 1.1 jmcneill return -ENXIO; 420 1.1 jmcneill } 421 1.1 jmcneill 422 1.1 jmcneill return 0; 423 1.1 jmcneill } 424 1.1 jmcneill 425 1.1 jmcneill static struct drm_fb_helper_funcs sunxi_drm_fb_helper_funcs = { 426 1.1 jmcneill .fb_probe = sunxi_drm_fb_probe, 427 1.1 jmcneill }; 428 1.1 jmcneill 429 1.1 jmcneill static int 430 1.1 jmcneill sunxi_drm_load(struct drm_device *ddev, unsigned long flags) 431 1.1 jmcneill { 432 1.1 jmcneill struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev); 433 1.1 jmcneill struct sunxi_drm_endpoint *sep; 434 1.1 jmcneill struct sunxi_drm_fbdev *fbdev; 435 1.1 jmcneill const u_int *data; 436 1.1 jmcneill int datalen, error, num_crtc; 437 1.1 jmcneill 438 1.1 jmcneill drm_mode_config_init(ddev); 439 1.1 jmcneill ddev->mode_config.min_width = 0; 440 1.1 jmcneill ddev->mode_config.min_height = 0; 441 1.8 jmcneill ddev->mode_config.max_width = SUNXI_DRM_MAX_WIDTH; 442 1.8 jmcneill ddev->mode_config.max_height = SUNXI_DRM_MAX_HEIGHT; 443 1.1 jmcneill ddev->mode_config.funcs = &sunxi_drm_mode_config_funcs; 444 1.1 jmcneill 445 1.1 jmcneill num_crtc = 0; 446 1.1 jmcneill data = fdtbus_get_prop(sc->sc_phandle, "allwinner,pipelines", &datalen); 447 1.1 jmcneill while (datalen >= 4) { 448 1.1 jmcneill const int crtc_phandle = fdtbus_get_phandle_from_native(be32dec(data)); 449 1.1 jmcneill 450 1.1 jmcneill TAILQ_FOREACH(sep, &sunxi_drm_endpoints, entries) 451 1.1 jmcneill if (sep->phandle == crtc_phandle && sep->ddev == NULL) { 452 1.1 jmcneill sep->ddev = ddev; 453 1.1 jmcneill error = fdt_endpoint_activate_direct(sep->ep, true); 454 1.1 jmcneill if (error != 0) { 455 1.1 jmcneill aprint_error_dev(sc->sc_dev, "failed to activate endpoint: %d\n", 456 1.1 jmcneill error); 457 1.1 jmcneill } 458 1.1 jmcneill if (fdt_endpoint_type(sep->ep) == EP_DRM_CRTC) 459 1.1 jmcneill num_crtc++; 460 1.1 jmcneill } 461 1.1 jmcneill 462 1.1 jmcneill datalen -= 4; 463 1.1 jmcneill data++; 464 1.1 jmcneill } 465 1.1 jmcneill 466 1.1 jmcneill if (num_crtc == 0) { 467 1.1 jmcneill aprint_error_dev(sc->sc_dev, "no pipelines configured\n"); 468 1.10 mrg error = ENXIO; 469 1.10 mrg goto drmerr; 470 1.1 jmcneill } 471 1.1 jmcneill 472 1.1 jmcneill fbdev = kmem_zalloc(sizeof(*fbdev), KM_SLEEP); 473 1.1 jmcneill 474 1.1 jmcneill drm_fb_helper_prepare(ddev, &fbdev->helper, &sunxi_drm_fb_helper_funcs); 475 1.1 jmcneill 476 1.16 riastrad error = drm_fb_helper_init(ddev, &fbdev->helper, num_crtc); 477 1.1 jmcneill if (error) 478 1.10 mrg goto allocerr; 479 1.1 jmcneill 480 1.1 jmcneill fbdev->helper.fb = kmem_zalloc(sizeof(struct sunxi_drm_framebuffer), KM_SLEEP); 481 1.1 jmcneill 482 1.1 jmcneill drm_fb_helper_single_add_all_connectors(&fbdev->helper); 483 1.1 jmcneill 484 1.1 jmcneill drm_helper_disable_unused_functions(ddev); 485 1.1 jmcneill 486 1.1 jmcneill drm_fb_helper_initial_config(&fbdev->helper, 32); 487 1.1 jmcneill 488 1.6 jmcneill /* XXX */ 489 1.6 jmcneill ddev->irq_enabled = true; 490 1.6 jmcneill drm_vblank_init(ddev, num_crtc); 491 1.6 jmcneill 492 1.1 jmcneill return 0; 493 1.1 jmcneill 494 1.10 mrg allocerr: 495 1.10 mrg kmem_free(fbdev, sizeof(*fbdev)); 496 1.1 jmcneill drmerr: 497 1.1 jmcneill drm_mode_config_cleanup(ddev); 498 1.1 jmcneill 499 1.1 jmcneill return error; 500 1.1 jmcneill } 501 1.1 jmcneill 502 1.6 jmcneill static uint32_t 503 1.6 jmcneill sunxi_drm_get_vblank_counter(struct drm_device *ddev, unsigned int crtc) 504 1.6 jmcneill { 505 1.6 jmcneill struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev); 506 1.6 jmcneill 507 1.6 jmcneill if (crtc >= __arraycount(sc->sc_vbl)) 508 1.6 jmcneill return 0; 509 1.6 jmcneill 510 1.6 jmcneill if (sc->sc_vbl[crtc].get_vblank_counter == NULL) 511 1.6 jmcneill return 0; 512 1.6 jmcneill 513 1.6 jmcneill return sc->sc_vbl[crtc].get_vblank_counter(sc->sc_vbl[crtc].priv); 514 1.6 jmcneill } 515 1.6 jmcneill 516 1.6 jmcneill static int 517 1.6 jmcneill sunxi_drm_enable_vblank(struct drm_device *ddev, unsigned int crtc) 518 1.6 jmcneill { 519 1.6 jmcneill struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev); 520 1.6 jmcneill 521 1.6 jmcneill if (crtc >= __arraycount(sc->sc_vbl)) 522 1.6 jmcneill return 0; 523 1.6 jmcneill 524 1.6 jmcneill if (sc->sc_vbl[crtc].enable_vblank == NULL) 525 1.6 jmcneill return 0; 526 1.6 jmcneill 527 1.6 jmcneill sc->sc_vbl[crtc].enable_vblank(sc->sc_vbl[crtc].priv); 528 1.6 jmcneill 529 1.6 jmcneill return 0; 530 1.6 jmcneill } 531 1.6 jmcneill 532 1.6 jmcneill static void 533 1.6 jmcneill sunxi_drm_disable_vblank(struct drm_device *ddev, unsigned int crtc) 534 1.6 jmcneill { 535 1.6 jmcneill struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev); 536 1.6 jmcneill 537 1.6 jmcneill if (crtc >= __arraycount(sc->sc_vbl)) 538 1.6 jmcneill return; 539 1.6 jmcneill 540 1.6 jmcneill if (sc->sc_vbl[crtc].disable_vblank == NULL) 541 1.6 jmcneill return; 542 1.6 jmcneill 543 1.6 jmcneill sc->sc_vbl[crtc].disable_vblank(sc->sc_vbl[crtc].priv); 544 1.6 jmcneill } 545 1.6 jmcneill 546 1.16 riastrad static void 547 1.1 jmcneill sunxi_drm_unload(struct drm_device *ddev) 548 1.1 jmcneill { 549 1.1 jmcneill drm_mode_config_cleanup(ddev); 550 1.1 jmcneill } 551 1.1 jmcneill 552 1.1 jmcneill int 553 1.1 jmcneill sunxi_drm_register_endpoint(int phandle, struct fdt_endpoint *ep) 554 1.1 jmcneill { 555 1.1 jmcneill struct sunxi_drm_endpoint *sep; 556 1.1 jmcneill 557 1.1 jmcneill sep = kmem_zalloc(sizeof(*sep), KM_SLEEP); 558 1.1 jmcneill sep->phandle = phandle; 559 1.1 jmcneill sep->ep = ep; 560 1.1 jmcneill sep->ddev = NULL; 561 1.1 jmcneill TAILQ_INSERT_TAIL(&sunxi_drm_endpoints, sep, entries); 562 1.1 jmcneill 563 1.1 jmcneill return 0; 564 1.1 jmcneill } 565 1.1 jmcneill 566 1.1 jmcneill struct drm_device * 567 1.1 jmcneill sunxi_drm_endpoint_device(struct fdt_endpoint *ep) 568 1.1 jmcneill { 569 1.1 jmcneill struct sunxi_drm_endpoint *sep; 570 1.1 jmcneill 571 1.1 jmcneill TAILQ_FOREACH(sep, &sunxi_drm_endpoints, entries) 572 1.1 jmcneill if (sep->ep == ep) 573 1.1 jmcneill return sep->ddev; 574 1.1 jmcneill 575 1.1 jmcneill return NULL; 576 1.1 jmcneill } 577 1.22 riastrad 578 1.22 riastrad static void 579 1.22 riastrad sunxi_drm_task_work(struct work *work, void *cookie) 580 1.22 riastrad { 581 1.22 riastrad struct sunxi_drm_task *task = container_of(work, struct sunxi_drm_task, 582 1.22 riastrad sdt_u.work); 583 1.22 riastrad 584 1.22 riastrad (*task->sdt_fn)(task); 585 1.22 riastrad } 586 1.22 riastrad 587 1.22 riastrad void 588 1.22 riastrad sunxi_task_init(struct sunxi_drm_task *task, 589 1.22 riastrad void (*fn)(struct sunxi_drm_task *)) 590 1.22 riastrad { 591 1.22 riastrad 592 1.22 riastrad task->sdt_fn = fn; 593 1.22 riastrad } 594 1.22 riastrad 595 1.22 riastrad void 596 1.22 riastrad sunxi_task_schedule(device_t self, struct sunxi_drm_task *task) 597 1.22 riastrad { 598 1.22 riastrad struct sunxi_drm_softc *sc = device_private(self); 599 1.22 riastrad 600 1.22 riastrad if (atomic_load_relaxed(&sc->sc_task_thread) == curlwp) 601 1.22 riastrad SIMPLEQ_INSERT_TAIL(&sc->sc_tasks, task, sdt_u.queue); 602 1.22 riastrad else 603 1.22 riastrad workqueue_enqueue(sc->sc_task_wq, &task->sdt_u.work, NULL); 604 1.22 riastrad } 605