rk_drm.c revision 1.12 1 1.12 riastrad /* $NetBSD: rk_drm.c,v 1.12 2021/12/19 12:28:27 riastradh Exp $ */
2 1.1 jmcneill
3 1.1 jmcneill /*-
4 1.1 jmcneill * Copyright (c) 2019 Jared D. McNeill <jmcneill (at) invisible.ca>
5 1.1 jmcneill * All rights reserved.
6 1.1 jmcneill *
7 1.1 jmcneill * Redistribution and use in source and binary forms, with or without
8 1.1 jmcneill * modification, are permitted provided that the following conditions
9 1.1 jmcneill * are met:
10 1.1 jmcneill * 1. Redistributions of source code must retain the above copyright
11 1.1 jmcneill * notice, this list of conditions and the following disclaimer.
12 1.1 jmcneill * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 jmcneill * notice, this list of conditions and the following disclaimer in the
14 1.1 jmcneill * documentation and/or other materials provided with the distribution.
15 1.1 jmcneill *
16 1.1 jmcneill * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 1.1 jmcneill * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 1.1 jmcneill * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 1.1 jmcneill * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 1.1 jmcneill * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 1.1 jmcneill * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 1.1 jmcneill * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 1.1 jmcneill * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 1.1 jmcneill * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1 jmcneill * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1 jmcneill * SUCH DAMAGE.
27 1.1 jmcneill */
28 1.1 jmcneill
29 1.1 jmcneill #include <sys/cdefs.h>
30 1.12 riastrad __KERNEL_RCSID(0, "$NetBSD: rk_drm.c,v 1.12 2021/12/19 12:28:27 riastradh Exp $");
31 1.1 jmcneill
32 1.1 jmcneill #include <sys/param.h>
33 1.1 jmcneill #include <sys/bus.h>
34 1.9 riastrad #include <sys/conf.h>
35 1.1 jmcneill #include <sys/device.h>
36 1.1 jmcneill #include <sys/intr.h>
37 1.9 riastrad #include <sys/kernel.h>
38 1.1 jmcneill #include <sys/systm.h>
39 1.1 jmcneill
40 1.9 riastrad #include <uvm/uvm_device.h>
41 1.1 jmcneill #include <uvm/uvm_extern.h>
42 1.1 jmcneill #include <uvm/uvm_object.h>
43 1.9 riastrad
44 1.9 riastrad #include <dev/fdt/fdt_port.h>
45 1.9 riastrad #include <dev/fdt/fdtvar.h>
46 1.9 riastrad
47 1.9 riastrad #include <arm/rockchip/rk_drm.h>
48 1.1 jmcneill
49 1.8 riastrad #include <drm/drm_auth.h>
50 1.9 riastrad #include <drm/drm_crtc_helper.h>
51 1.8 riastrad #include <drm/drm_drv.h>
52 1.1 jmcneill #include <drm/drm_fb_helper.h>
53 1.8 riastrad #include <drm/drm_fourcc.h>
54 1.8 riastrad #include <drm/drm_vblank.h>
55 1.1 jmcneill
56 1.1 jmcneill #define RK_DRM_MAX_WIDTH 3840
57 1.1 jmcneill #define RK_DRM_MAX_HEIGHT 2160
58 1.1 jmcneill
59 1.1 jmcneill static TAILQ_HEAD(, rk_drm_ports) rk_drm_ports =
60 1.1 jmcneill TAILQ_HEAD_INITIALIZER(rk_drm_ports);
61 1.1 jmcneill
62 1.4 thorpej static const struct device_compatible_entry compat_data[] = {
63 1.4 thorpej { .compat = "rockchip,display-subsystem" },
64 1.4 thorpej DEVICE_COMPAT_EOL
65 1.1 jmcneill };
66 1.1 jmcneill
67 1.1 jmcneill static const char * fb_compatible[] = {
68 1.1 jmcneill "simple-framebuffer",
69 1.1 jmcneill NULL
70 1.1 jmcneill };
71 1.1 jmcneill
72 1.1 jmcneill static int rk_drm_match(device_t, cfdata_t, void *);
73 1.1 jmcneill static void rk_drm_attach(device_t, device_t, void *);
74 1.1 jmcneill
75 1.1 jmcneill static void rk_drm_init(device_t);
76 1.1 jmcneill static vmem_t *rk_drm_alloc_cma_pool(struct drm_device *, size_t);
77 1.1 jmcneill
78 1.1 jmcneill static uint32_t rk_drm_get_vblank_counter(struct drm_device *, unsigned int);
79 1.1 jmcneill static int rk_drm_enable_vblank(struct drm_device *, unsigned int);
80 1.1 jmcneill static void rk_drm_disable_vblank(struct drm_device *, unsigned int);
81 1.1 jmcneill
82 1.1 jmcneill static int rk_drm_load(struct drm_device *, unsigned long);
83 1.8 riastrad static void rk_drm_unload(struct drm_device *);
84 1.1 jmcneill
85 1.12 riastrad static void rk_drm_task_work(struct work *, void *);
86 1.12 riastrad
87 1.1 jmcneill static struct drm_driver rk_drm_driver = {
88 1.8 riastrad .driver_features = DRIVER_MODESET | DRIVER_GEM,
89 1.1 jmcneill .dev_priv_size = 0,
90 1.1 jmcneill .load = rk_drm_load,
91 1.1 jmcneill .unload = rk_drm_unload,
92 1.1 jmcneill
93 1.1 jmcneill .gem_free_object = drm_gem_cma_free_object,
94 1.1 jmcneill .mmap_object = drm_gem_or_legacy_mmap_object,
95 1.1 jmcneill .gem_uvm_ops = &drm_gem_cma_uvm_ops,
96 1.1 jmcneill
97 1.1 jmcneill .dumb_create = drm_gem_cma_dumb_create,
98 1.1 jmcneill .dumb_destroy = drm_gem_dumb_destroy,
99 1.1 jmcneill
100 1.1 jmcneill .get_vblank_counter = rk_drm_get_vblank_counter,
101 1.1 jmcneill .enable_vblank = rk_drm_enable_vblank,
102 1.1 jmcneill .disable_vblank = rk_drm_disable_vblank,
103 1.1 jmcneill
104 1.1 jmcneill .name = DRIVER_NAME,
105 1.1 jmcneill .desc = DRIVER_DESC,
106 1.1 jmcneill .date = DRIVER_DATE,
107 1.1 jmcneill .major = DRIVER_MAJOR,
108 1.1 jmcneill .minor = DRIVER_MINOR,
109 1.1 jmcneill .patchlevel = DRIVER_PATCHLEVEL,
110 1.1 jmcneill };
111 1.1 jmcneill
112 1.1 jmcneill CFATTACH_DECL_NEW(rk_drm, sizeof(struct rk_drm_softc),
113 1.1 jmcneill rk_drm_match, rk_drm_attach, NULL, NULL);
114 1.1 jmcneill
115 1.1 jmcneill static int
116 1.1 jmcneill rk_drm_match(device_t parent, cfdata_t cf, void *aux)
117 1.1 jmcneill {
118 1.1 jmcneill struct fdt_attach_args * const faa = aux;
119 1.1 jmcneill
120 1.4 thorpej return of_compatible_match(faa->faa_phandle, compat_data);
121 1.1 jmcneill }
122 1.1 jmcneill
123 1.1 jmcneill static void
124 1.1 jmcneill rk_drm_attach(device_t parent, device_t self, void *aux)
125 1.1 jmcneill {
126 1.1 jmcneill struct rk_drm_softc * const sc = device_private(self);
127 1.1 jmcneill struct fdt_attach_args * const faa = aux;
128 1.1 jmcneill struct drm_driver * const driver = &rk_drm_driver;
129 1.1 jmcneill prop_dictionary_t dict = device_properties(self);
130 1.1 jmcneill bool is_disabled;
131 1.1 jmcneill
132 1.1 jmcneill sc->sc_dev = self;
133 1.1 jmcneill sc->sc_dmat = faa->faa_dmat;
134 1.1 jmcneill sc->sc_bst = faa->faa_bst;
135 1.1 jmcneill sc->sc_phandle = faa->faa_phandle;
136 1.12 riastrad sc->sc_task_thread = NULL;
137 1.12 riastrad SIMPLEQ_INIT(&sc->sc_tasks);
138 1.12 riastrad if (workqueue_create(&sc->sc_task_wq, "rkdrm",
139 1.12 riastrad &rk_drm_task_work, NULL, PRI_NONE, IPL_NONE, WQ_MPSAFE)) {
140 1.12 riastrad aprint_error_dev(self, "unable to create workqueue\n");
141 1.12 riastrad sc->sc_task_wq = NULL;
142 1.12 riastrad return;
143 1.12 riastrad }
144 1.1 jmcneill
145 1.1 jmcneill aprint_naive("\n");
146 1.1 jmcneill
147 1.1 jmcneill if (prop_dictionary_get_bool(dict, "disabled", &is_disabled) && is_disabled) {
148 1.1 jmcneill aprint_normal(": (disabled)\n");
149 1.1 jmcneill return;
150 1.1 jmcneill }
151 1.1 jmcneill
152 1.1 jmcneill aprint_normal("\n");
153 1.1 jmcneill
154 1.1 jmcneill sc->sc_ddev = drm_dev_alloc(driver, sc->sc_dev);
155 1.10 riastrad if (IS_ERR(sc->sc_ddev)) {
156 1.1 jmcneill aprint_error_dev(self, "couldn't allocate DRM device\n");
157 1.1 jmcneill return;
158 1.1 jmcneill }
159 1.1 jmcneill sc->sc_ddev->dev_private = sc;
160 1.1 jmcneill sc->sc_ddev->bst = sc->sc_bst;
161 1.1 jmcneill sc->sc_ddev->bus_dmat = sc->sc_dmat;
162 1.1 jmcneill sc->sc_ddev->dmat = sc->sc_ddev->bus_dmat;
163 1.1 jmcneill sc->sc_ddev->dmat_subregion_p = false;
164 1.1 jmcneill
165 1.1 jmcneill fdt_remove_bycompat(fb_compatible);
166 1.1 jmcneill
167 1.1 jmcneill config_defer(self, rk_drm_init);
168 1.1 jmcneill }
169 1.1 jmcneill
170 1.1 jmcneill static void
171 1.1 jmcneill rk_drm_init(device_t dev)
172 1.1 jmcneill {
173 1.1 jmcneill struct rk_drm_softc * const sc = device_private(dev);
174 1.1 jmcneill struct drm_driver * const driver = &rk_drm_driver;
175 1.1 jmcneill int error;
176 1.1 jmcneill
177 1.12 riastrad /*
178 1.12 riastrad * Cause any tasks issued synchronously during attach to be
179 1.12 riastrad * processed at the end of this function.
180 1.12 riastrad */
181 1.12 riastrad sc->sc_task_thread = curlwp;
182 1.12 riastrad
183 1.1 jmcneill error = -drm_dev_register(sc->sc_ddev, 0);
184 1.1 jmcneill if (error) {
185 1.1 jmcneill aprint_error_dev(dev, "couldn't register DRM device: %d\n",
186 1.1 jmcneill error);
187 1.12 riastrad goto out;
188 1.1 jmcneill }
189 1.12 riastrad sc->sc_dev_registered = true;
190 1.1 jmcneill
191 1.1 jmcneill aprint_normal_dev(dev, "initialized %s %d.%d.%d %s on minor %d\n",
192 1.1 jmcneill driver->name, driver->major, driver->minor, driver->patchlevel,
193 1.1 jmcneill driver->date, sc->sc_ddev->primary->index);
194 1.12 riastrad
195 1.12 riastrad /*
196 1.12 riastrad * Process asynchronous tasks queued synchronously during
197 1.12 riastrad * attach. This will be for display detection to attach a
198 1.12 riastrad * framebuffer, so we have the opportunity for a console device
199 1.12 riastrad * to attach before autoconf has completed, in time for init(8)
200 1.12 riastrad * to find that console without panicking.
201 1.12 riastrad */
202 1.12 riastrad while (!SIMPLEQ_EMPTY(&sc->sc_tasks)) {
203 1.12 riastrad struct rk_drm_task *const task = SIMPLEQ_FIRST(&sc->sc_tasks);
204 1.12 riastrad
205 1.12 riastrad SIMPLEQ_REMOVE_HEAD(&sc->sc_tasks, rdt_u.queue);
206 1.12 riastrad (*task->rdt_fn)(task);
207 1.12 riastrad }
208 1.12 riastrad
209 1.12 riastrad out: /* Cause any subesquent tasks to be processed by the workqueue. */
210 1.12 riastrad atomic_store_relaxed(&sc->sc_task_thread, NULL);
211 1.1 jmcneill }
212 1.1 jmcneill
213 1.1 jmcneill static vmem_t *
214 1.1 jmcneill rk_drm_alloc_cma_pool(struct drm_device *ddev, size_t cma_size)
215 1.1 jmcneill {
216 1.1 jmcneill struct rk_drm_softc * const sc = rk_drm_private(ddev);
217 1.1 jmcneill bus_dma_segment_t segs[1];
218 1.1 jmcneill int nsegs;
219 1.1 jmcneill int error;
220 1.1 jmcneill
221 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, cma_size, PAGE_SIZE, 0,
222 1.1 jmcneill segs, 1, &nsegs, BUS_DMA_NOWAIT);
223 1.1 jmcneill if (error) {
224 1.1 jmcneill aprint_error_dev(sc->sc_dev, "couldn't allocate CMA pool\n");
225 1.1 jmcneill return NULL;
226 1.1 jmcneill }
227 1.1 jmcneill
228 1.1 jmcneill return vmem_create("rkdrm", segs[0].ds_addr, segs[0].ds_len,
229 1.1 jmcneill PAGE_SIZE, NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
230 1.1 jmcneill }
231 1.1 jmcneill
232 1.1 jmcneill static int
233 1.1 jmcneill rk_drm_fb_create_handle(struct drm_framebuffer *fb,
234 1.1 jmcneill struct drm_file *file, unsigned int *handle)
235 1.1 jmcneill {
236 1.1 jmcneill struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(fb);
237 1.1 jmcneill
238 1.1 jmcneill return drm_gem_handle_create(file, &sfb->obj->base, handle);
239 1.1 jmcneill }
240 1.1 jmcneill
241 1.1 jmcneill static void
242 1.1 jmcneill rk_drm_fb_destroy(struct drm_framebuffer *fb)
243 1.1 jmcneill {
244 1.1 jmcneill struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(fb);
245 1.1 jmcneill
246 1.1 jmcneill drm_framebuffer_cleanup(fb);
247 1.8 riastrad drm_gem_object_put_unlocked(&sfb->obj->base);
248 1.1 jmcneill kmem_free(sfb, sizeof(*sfb));
249 1.1 jmcneill }
250 1.1 jmcneill
251 1.1 jmcneill static const struct drm_framebuffer_funcs rk_drm_framebuffer_funcs = {
252 1.1 jmcneill .create_handle = rk_drm_fb_create_handle,
253 1.1 jmcneill .destroy = rk_drm_fb_destroy,
254 1.1 jmcneill };
255 1.1 jmcneill
256 1.1 jmcneill static struct drm_framebuffer *
257 1.1 jmcneill rk_drm_fb_create(struct drm_device *ddev, struct drm_file *file,
258 1.8 riastrad const struct drm_mode_fb_cmd2 *cmd)
259 1.1 jmcneill {
260 1.1 jmcneill struct rk_drm_framebuffer *fb;
261 1.1 jmcneill struct drm_gem_object *gem_obj;
262 1.1 jmcneill int error;
263 1.1 jmcneill
264 1.1 jmcneill if (cmd->flags)
265 1.1 jmcneill return NULL;
266 1.1 jmcneill
267 1.8 riastrad gem_obj = drm_gem_object_lookup(file, cmd->handles[0]);
268 1.1 jmcneill if (gem_obj == NULL)
269 1.1 jmcneill return NULL;
270 1.1 jmcneill
271 1.1 jmcneill fb = kmem_zalloc(sizeof(*fb), KM_SLEEP);
272 1.1 jmcneill fb->obj = to_drm_gem_cma_obj(gem_obj);
273 1.1 jmcneill fb->base.pitches[0] = cmd->pitches[0];
274 1.1 jmcneill fb->base.pitches[1] = cmd->pitches[1];
275 1.1 jmcneill fb->base.pitches[2] = cmd->pitches[2];
276 1.1 jmcneill fb->base.offsets[0] = cmd->offsets[0];
277 1.1 jmcneill fb->base.offsets[1] = cmd->offsets[2];
278 1.1 jmcneill fb->base.offsets[2] = cmd->offsets[1];
279 1.1 jmcneill fb->base.width = cmd->width;
280 1.1 jmcneill fb->base.height = cmd->height;
281 1.8 riastrad fb->base.format = drm_format_info(cmd->pixel_format);
282 1.1 jmcneill
283 1.1 jmcneill error = drm_framebuffer_init(ddev, &fb->base, &rk_drm_framebuffer_funcs);
284 1.1 jmcneill if (error != 0)
285 1.1 jmcneill goto dealloc;
286 1.1 jmcneill
287 1.1 jmcneill return &fb->base;
288 1.1 jmcneill
289 1.1 jmcneill dealloc:
290 1.1 jmcneill drm_framebuffer_cleanup(&fb->base);
291 1.1 jmcneill kmem_free(fb, sizeof(*fb));
292 1.8 riastrad drm_gem_object_put_unlocked(gem_obj);
293 1.1 jmcneill
294 1.1 jmcneill return NULL;
295 1.1 jmcneill }
296 1.1 jmcneill
297 1.1 jmcneill static struct drm_mode_config_funcs rk_drm_mode_config_funcs = {
298 1.1 jmcneill .fb_create = rk_drm_fb_create,
299 1.1 jmcneill };
300 1.1 jmcneill
301 1.1 jmcneill static int
302 1.1 jmcneill rk_drm_fb_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes)
303 1.1 jmcneill {
304 1.1 jmcneill struct rk_drm_softc * const sc = rk_drm_private(helper->dev);
305 1.1 jmcneill struct drm_device *ddev = helper->dev;
306 1.1 jmcneill struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(helper->fb);
307 1.1 jmcneill struct drm_framebuffer *fb = helper->fb;
308 1.1 jmcneill struct rk_drmfb_attach_args sfa;
309 1.1 jmcneill size_t cma_size;
310 1.1 jmcneill int error;
311 1.1 jmcneill
312 1.1 jmcneill const u_int width = sizes->surface_width;
313 1.1 jmcneill const u_int height = sizes->surface_height;
314 1.1 jmcneill const u_int pitch = width * (32 / 8);
315 1.1 jmcneill
316 1.1 jmcneill const size_t size = roundup(height * pitch, PAGE_SIZE);
317 1.1 jmcneill
318 1.1 jmcneill /* Reserve enough memory for the FB console plus a 4K plane, rounded to 1MB */
319 1.1 jmcneill cma_size = size;
320 1.1 jmcneill cma_size += (RK_DRM_MAX_WIDTH * RK_DRM_MAX_HEIGHT * 4);
321 1.1 jmcneill cma_size = roundup(cma_size, 1024 * 1024);
322 1.1 jmcneill sc->sc_ddev->cma_pool = rk_drm_alloc_cma_pool(sc->sc_ddev, cma_size);
323 1.1 jmcneill if (sc->sc_ddev->cma_pool != NULL)
324 1.1 jmcneill aprint_normal_dev(sc->sc_dev, "reserved %u MB DRAM for CMA\n",
325 1.1 jmcneill (u_int)(cma_size / (1024 * 1024)));
326 1.1 jmcneill
327 1.1 jmcneill sfb->obj = drm_gem_cma_create(ddev, size);
328 1.1 jmcneill if (sfb->obj == NULL) {
329 1.1 jmcneill DRM_ERROR("failed to allocate memory for framebuffer\n");
330 1.1 jmcneill return -ENOMEM;
331 1.1 jmcneill }
332 1.1 jmcneill
333 1.1 jmcneill fb->pitches[0] = pitch;
334 1.1 jmcneill fb->offsets[0] = 0;
335 1.1 jmcneill fb->width = width;
336 1.1 jmcneill fb->height = height;
337 1.6 mrg #ifdef __ARM_BIG_ENDIAN
338 1.8 riastrad fb->format = drm_format_info(DRM_FORMAT_BGRX8888);
339 1.6 mrg #else
340 1.8 riastrad fb->format = drm_format_info(DRM_FORMAT_XRGB8888);
341 1.6 mrg #endif
342 1.1 jmcneill
343 1.1 jmcneill error = drm_framebuffer_init(ddev, fb, &rk_drm_framebuffer_funcs);
344 1.1 jmcneill if (error != 0) {
345 1.1 jmcneill DRM_ERROR("failed to initialize framebuffer\n");
346 1.1 jmcneill return error;
347 1.1 jmcneill }
348 1.1 jmcneill
349 1.1 jmcneill memset(&sfa, 0, sizeof(sfa));
350 1.1 jmcneill sfa.sfa_drm_dev = ddev;
351 1.1 jmcneill sfa.sfa_fb_helper = helper;
352 1.1 jmcneill sfa.sfa_fb_sizes = *sizes;
353 1.1 jmcneill sfa.sfa_fb_bst = sc->sc_bst;
354 1.1 jmcneill sfa.sfa_fb_dmat = sc->sc_dmat;
355 1.1 jmcneill sfa.sfa_fb_linebytes = helper->fb->pitches[0];
356 1.1 jmcneill
357 1.5 thorpej helper->fbdev = config_found(ddev->dev, &sfa, NULL,
358 1.7 thorpej CFARGS(.iattr = "rkfbbus"));
359 1.1 jmcneill if (helper->fbdev == NULL) {
360 1.1 jmcneill DRM_ERROR("unable to attach framebuffer\n");
361 1.1 jmcneill return -ENXIO;
362 1.1 jmcneill }
363 1.1 jmcneill
364 1.1 jmcneill return 0;
365 1.1 jmcneill }
366 1.1 jmcneill
367 1.1 jmcneill static struct drm_fb_helper_funcs rk_drm_fb_helper_funcs = {
368 1.1 jmcneill .fb_probe = rk_drm_fb_probe,
369 1.1 jmcneill };
370 1.1 jmcneill
371 1.1 jmcneill static int
372 1.1 jmcneill rk_drm_load(struct drm_device *ddev, unsigned long flags)
373 1.1 jmcneill {
374 1.1 jmcneill struct rk_drm_softc * const sc = rk_drm_private(ddev);
375 1.1 jmcneill struct rk_drm_ports *sport;
376 1.1 jmcneill struct rk_drm_fbdev *fbdev;
377 1.1 jmcneill struct fdt_endpoint *ep;
378 1.1 jmcneill const u_int *data;
379 1.1 jmcneill int datalen, error, num_crtc, ep_index;
380 1.1 jmcneill
381 1.1 jmcneill drm_mode_config_init(ddev);
382 1.1 jmcneill ddev->mode_config.min_width = 0;
383 1.1 jmcneill ddev->mode_config.min_height = 0;
384 1.1 jmcneill ddev->mode_config.max_width = RK_DRM_MAX_WIDTH;
385 1.1 jmcneill ddev->mode_config.max_height = RK_DRM_MAX_HEIGHT;
386 1.1 jmcneill ddev->mode_config.funcs = &rk_drm_mode_config_funcs;
387 1.1 jmcneill
388 1.1 jmcneill num_crtc = 0;
389 1.1 jmcneill data = fdtbus_get_prop(sc->sc_phandle, "ports", &datalen);
390 1.1 jmcneill while (datalen >= 4) {
391 1.1 jmcneill const int crtc_phandle = fdtbus_get_phandle_from_native(be32dec(data));
392 1.1 jmcneill
393 1.1 jmcneill TAILQ_FOREACH(sport, &rk_drm_ports, entries)
394 1.1 jmcneill if (sport->phandle == crtc_phandle && sport->ddev == NULL) {
395 1.1 jmcneill sport->ddev = ddev;
396 1.1 jmcneill for (ep_index = 0; (ep = fdt_endpoint_get_from_index(sport->port, 0, ep_index)) != NULL; ep_index++) {
397 1.1 jmcneill error = fdt_endpoint_activate_direct(ep, true);
398 1.1 jmcneill if (error != 0)
399 1.1 jmcneill aprint_debug_dev(sc->sc_dev,
400 1.1 jmcneill "failed to activate endpoint %d: %d\n",
401 1.1 jmcneill ep_index, error);
402 1.1 jmcneill }
403 1.1 jmcneill num_crtc++;
404 1.1 jmcneill }
405 1.1 jmcneill
406 1.1 jmcneill datalen -= 4;
407 1.1 jmcneill data++;
408 1.1 jmcneill }
409 1.1 jmcneill
410 1.1 jmcneill if (num_crtc == 0) {
411 1.1 jmcneill aprint_error_dev(sc->sc_dev, "no display interface ports configured\n");
412 1.3 mrg error = ENXIO;
413 1.3 mrg goto drmerr;
414 1.1 jmcneill }
415 1.1 jmcneill
416 1.1 jmcneill fbdev = kmem_zalloc(sizeof(*fbdev), KM_SLEEP);
417 1.1 jmcneill
418 1.1 jmcneill drm_fb_helper_prepare(ddev, &fbdev->helper, &rk_drm_fb_helper_funcs);
419 1.1 jmcneill
420 1.8 riastrad error = drm_fb_helper_init(ddev, &fbdev->helper, num_crtc);
421 1.1 jmcneill if (error)
422 1.3 mrg goto allocerr;
423 1.1 jmcneill
424 1.1 jmcneill fbdev->helper.fb = kmem_zalloc(sizeof(struct rk_drm_framebuffer), KM_SLEEP);
425 1.1 jmcneill
426 1.1 jmcneill drm_fb_helper_single_add_all_connectors(&fbdev->helper);
427 1.1 jmcneill
428 1.1 jmcneill drm_helper_disable_unused_functions(ddev);
429 1.1 jmcneill
430 1.1 jmcneill drm_fb_helper_initial_config(&fbdev->helper, 32);
431 1.1 jmcneill
432 1.1 jmcneill /* XXX */
433 1.1 jmcneill ddev->irq_enabled = true;
434 1.1 jmcneill drm_vblank_init(ddev, num_crtc);
435 1.1 jmcneill
436 1.1 jmcneill return 0;
437 1.1 jmcneill
438 1.3 mrg allocerr:
439 1.3 mrg kmem_free(fbdev, sizeof(*fbdev));
440 1.1 jmcneill drmerr:
441 1.1 jmcneill drm_mode_config_cleanup(ddev);
442 1.1 jmcneill
443 1.1 jmcneill return error;
444 1.1 jmcneill }
445 1.1 jmcneill
446 1.1 jmcneill static uint32_t
447 1.1 jmcneill rk_drm_get_vblank_counter(struct drm_device *ddev, unsigned int crtc)
448 1.1 jmcneill {
449 1.1 jmcneill struct rk_drm_softc * const sc = rk_drm_private(ddev);
450 1.1 jmcneill
451 1.1 jmcneill if (crtc >= __arraycount(sc->sc_vbl))
452 1.1 jmcneill return 0;
453 1.1 jmcneill
454 1.1 jmcneill if (sc->sc_vbl[crtc].get_vblank_counter == NULL)
455 1.1 jmcneill return 0;
456 1.1 jmcneill
457 1.1 jmcneill return sc->sc_vbl[crtc].get_vblank_counter(sc->sc_vbl[crtc].priv);
458 1.1 jmcneill }
459 1.1 jmcneill
460 1.1 jmcneill static int
461 1.1 jmcneill rk_drm_enable_vblank(struct drm_device *ddev, unsigned int crtc)
462 1.1 jmcneill {
463 1.1 jmcneill struct rk_drm_softc * const sc = rk_drm_private(ddev);
464 1.1 jmcneill
465 1.1 jmcneill if (crtc >= __arraycount(sc->sc_vbl))
466 1.1 jmcneill return 0;
467 1.1 jmcneill
468 1.1 jmcneill if (sc->sc_vbl[crtc].enable_vblank == NULL)
469 1.1 jmcneill return 0;
470 1.1 jmcneill
471 1.1 jmcneill sc->sc_vbl[crtc].enable_vblank(sc->sc_vbl[crtc].priv);
472 1.1 jmcneill
473 1.1 jmcneill return 0;
474 1.1 jmcneill }
475 1.1 jmcneill
476 1.1 jmcneill static void
477 1.1 jmcneill rk_drm_disable_vblank(struct drm_device *ddev, unsigned int crtc)
478 1.1 jmcneill {
479 1.1 jmcneill struct rk_drm_softc * const sc = rk_drm_private(ddev);
480 1.1 jmcneill
481 1.1 jmcneill if (crtc >= __arraycount(sc->sc_vbl))
482 1.1 jmcneill return;
483 1.1 jmcneill
484 1.1 jmcneill if (sc->sc_vbl[crtc].disable_vblank == NULL)
485 1.1 jmcneill return;
486 1.1 jmcneill
487 1.1 jmcneill sc->sc_vbl[crtc].disable_vblank(sc->sc_vbl[crtc].priv);
488 1.1 jmcneill }
489 1.1 jmcneill
490 1.8 riastrad static void
491 1.1 jmcneill rk_drm_unload(struct drm_device *ddev)
492 1.1 jmcneill {
493 1.1 jmcneill drm_mode_config_cleanup(ddev);
494 1.1 jmcneill }
495 1.1 jmcneill
496 1.1 jmcneill int
497 1.1 jmcneill rk_drm_register_port(int phandle, struct fdt_device_ports *port)
498 1.1 jmcneill {
499 1.1 jmcneill struct rk_drm_ports *sport;
500 1.1 jmcneill
501 1.1 jmcneill sport = kmem_zalloc(sizeof(*sport), KM_SLEEP);
502 1.1 jmcneill sport->phandle = phandle;
503 1.1 jmcneill sport->port = port;
504 1.1 jmcneill sport->ddev = NULL;
505 1.1 jmcneill TAILQ_INSERT_TAIL(&rk_drm_ports, sport, entries);
506 1.1 jmcneill
507 1.1 jmcneill return 0;
508 1.1 jmcneill }
509 1.1 jmcneill
510 1.1 jmcneill struct drm_device *
511 1.1 jmcneill rk_drm_port_device(struct fdt_device_ports *port)
512 1.1 jmcneill {
513 1.1 jmcneill struct rk_drm_ports *sport;
514 1.1 jmcneill
515 1.1 jmcneill TAILQ_FOREACH(sport, &rk_drm_ports, entries)
516 1.1 jmcneill if (sport->port == port)
517 1.1 jmcneill return sport->ddev;
518 1.1 jmcneill
519 1.1 jmcneill return NULL;
520 1.1 jmcneill }
521 1.12 riastrad
522 1.12 riastrad static void
523 1.12 riastrad rk_drm_task_work(struct work *work, void *cookie)
524 1.12 riastrad {
525 1.12 riastrad struct rk_drm_task *task = container_of(work, struct rk_drm_task,
526 1.12 riastrad rdt_u.work);
527 1.12 riastrad
528 1.12 riastrad (*task->rdt_fn)(task);
529 1.12 riastrad }
530 1.12 riastrad
531 1.12 riastrad void
532 1.12 riastrad rk_task_init(struct rk_drm_task *task,
533 1.12 riastrad void (*fn)(struct rk_drm_task *))
534 1.12 riastrad {
535 1.12 riastrad
536 1.12 riastrad task->rdt_fn = fn;
537 1.12 riastrad }
538 1.12 riastrad
539 1.12 riastrad void
540 1.12 riastrad rk_task_schedule(device_t self, struct rk_drm_task *task)
541 1.12 riastrad {
542 1.12 riastrad struct rk_drm_softc *sc = device_private(self);
543 1.12 riastrad
544 1.12 riastrad if (atomic_load_relaxed(&sc->sc_task_thread) == curlwp)
545 1.12 riastrad SIMPLEQ_INSERT_TAIL(&sc->sc_tasks, task, rdt_u.queue);
546 1.12 riastrad else
547 1.12 riastrad workqueue_enqueue(sc->sc_task_wq, &task->rdt_u.work, NULL);
548 1.12 riastrad }
549