Home | History | Annotate | Line # | Download | only in pci
      1 /*	$NetBSD: viogpu.c,v 1.2 2025/10/04 03:58:38 thorpej Exp $ */
      2 /*	$OpenBSD: viogpu.c,v 1.3 2023/05/29 08:13:35 sf Exp $ */
      3 
      4 /*
      5  * Copyright (c) 2024-2025 The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  * POSSIBILITY OF SUCH DAMAGE.
     28  */
     29 
     30 /*
     31  * Copyright (c) 2021-2023 joshua stein <jcs (at) openbsd.org>
     32  *
     33  * Permission to use, copy, modify, and distribute this software for any
     34  * purpose with or without fee is hereby granted, provided that the above
     35  * copyright notice and this permission notice appear in all copies.
     36  *
     37  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     38  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     39  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     40  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     41  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     42  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     43  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     44  */
     45 
     46 #include <sys/cdefs.h>
     47 
     48 #include <sys/param.h>
     49 #include <sys/systm.h>
     50 #include <sys/bus.h>
     51 #include <sys/condvar.h>
     52 #include <sys/device.h>
     53 #include <sys/intr.h>
     54 #include <sys/kernel.h>
     55 #include <sys/mutex.h>
     56 
     57 #include <dev/pci/virtioreg.h>
     58 #include <dev/pci/virtiovar.h>
     59 #include <dev/pci/viogpu.h>
     60 
     61 #include <dev/rasops/rasops.h>
     62 
     63 #include <dev/wscons/wsconsio.h>
     64 #include <dev/wscons/wsdisplayvar.h>
     65 #include <dev/wscons/wsdisplay_vconsvar.h>
     66 
     67 #include <prop/proplib.h>
     68 
     69 struct viogpu_softc;
     70 
     71 static int	viogpu_match(device_t, cfdata_t, void *);
     72 static void	viogpu_attach(device_t, device_t, void *);
     73 static void	viogpu_attach_postintr(device_t);
     74 static int	viogpu_cmd_sync(struct viogpu_softc *, void *, size_t, void *,
     75 				size_t);
     76 static int	viogpu_cmd_req(struct viogpu_softc *, void *, size_t, size_t);
     77 static void	viogpu_screen_update(void *);
     78 static int	viogpu_vq_done(struct virtqueue *vq);
     79 
     80 static int	viogpu_get_display_info(struct viogpu_softc *);
     81 static int	viogpu_create_2d(struct viogpu_softc *, uint32_t, uint32_t,
     82 				 uint32_t);
     83 static int	viogpu_set_scanout(struct viogpu_softc *, uint32_t, uint32_t,
     84 				   uint32_t, uint32_t);
     85 static int	viogpu_attach_backing(struct viogpu_softc *, uint32_t,
     86 				      bus_dmamap_t);
     87 static int	viogpu_transfer_to_host_2d(struct viogpu_softc *sc, uint32_t,
     88 					   uint32_t, uint32_t, uint32_t,
     89 					   uint32_t);
     90 static int	viogpu_flush_resource(struct viogpu_softc *, uint32_t,
     91 				      uint32_t, uint32_t, uint32_t, uint32_t);
     92 
     93 static int	viogpu_wsioctl(void *, void *, u_long, void *, int,
     94 			       struct lwp *);
     95 
     96 static void 	viogpu_init_screen(void *, struct vcons_screen *, int, long *);
     97 
     98 static void	viogpu_cursor(void *, int, int, int);
     99 static void	viogpu_putchar(void *, int, int, u_int, long);
    100 static void	viogpu_copycols(void *, int, int, int, int);
    101 static void	viogpu_erasecols(void *, int, int, int, long);
    102 static void	viogpu_copyrows(void *, int, int, int);
    103 static void	viogpu_eraserows(void *, int, int, long);
    104 static void	viogpu_replaceattr(void *, long, long);
    105 
    106 struct virtio_gpu_resource_attach_backing_entries {
    107 	struct virtio_gpu_ctrl_hdr hdr;
    108 	__le32 resource_id;
    109 	__le32 nr_entries;
    110 	struct virtio_gpu_mem_entry entries[1];
    111 } __packed;
    112 
    113 #define VIOGPU_CMD_DMA_SIZE \
    114     MAX(sizeof(struct virtio_gpu_resp_display_info), \
    115     MAX(sizeof(struct virtio_gpu_resource_create_2d), \
    116     MAX(sizeof(struct virtio_gpu_set_scanout), \
    117     MAX(sizeof(struct virtio_gpu_resource_attach_backing_entries), \
    118     MAX(sizeof(struct virtio_gpu_transfer_to_host_2d), \
    119     sizeof(struct virtio_gpu_resource_flush)))))) + \
    120     sizeof(struct virtio_gpu_ctrl_hdr)
    121 
    122 struct viogpu_softc {
    123 	device_t		sc_dev;
    124 	struct virtio_softc	*sc_virtio;
    125 #define	VQCTRL	0
    126 #define	VQCURS	1
    127 	struct virtqueue	sc_vqs[2];
    128 
    129 	bus_dma_segment_t	sc_dma_seg;
    130 	bus_dmamap_t		sc_dma_map;
    131 	void			*sc_cmd;
    132 	int			sc_fence_id;
    133 
    134 	int			sc_fb_height;
    135 	int			sc_fb_width;
    136 	bus_dma_segment_t	sc_fb_dma_seg;
    137 	bus_dmamap_t		sc_fb_dma_map;
    138 	size_t			sc_fb_dma_size;
    139 	void			*sc_fb_dma_kva;
    140 
    141 	struct wsscreen_descr		sc_wsd;
    142 	const struct wsscreen_descr	*sc_scrlist[1];
    143 	struct wsscreen_list		sc_wsl;
    144 	struct vcons_data		sc_vd;
    145 	struct vcons_screen		sc_vcs;
    146 	bool				is_console;
    147 
    148 	void	(*ri_cursor)(void *, int, int, int);
    149 	void	(*ri_putchar)(void *, int, int, u_int, long);
    150 	void	(*ri_copycols)(void *, int, int, int, int);
    151 	void	(*ri_erasecols)(void *, int, int, int, long);
    152 	void	(*ri_copyrows)(void *, int, int, int);
    153 	void	(*ri_eraserows)(void *, int, int, long);
    154 	void	(*ri_replaceattr)(void *, long, long);
    155 
    156 	/*
    157 	 * sc_mutex protects is_requesting, needs_update, and req_wait. It is
    158 	 * also held while submitting and reading the return values of
    159 	 * asynchronous commands and for the full duration of synchronous
    160 	 * commands.
    161 	 */
    162 	kmutex_t		sc_mutex;
    163 	bool			is_requesting;
    164 	bool			needs_update;
    165 	kcondvar_t		req_wait;
    166 	void			*update_soft_ih;
    167 	size_t			cur_cmd_size;
    168 	size_t			cur_ret_size;
    169 };
    170 
    171 CFATTACH_DECL_NEW(viogpu, sizeof(struct viogpu_softc),
    172 		  viogpu_match, viogpu_attach, NULL, NULL);
    173 
    174 #if VIOGPU_DEBUG
    175 #define VIOGPU_FEATURES		(VIRTIO_GPU_F_VIRGL | VIRTIO_GPU_F_EDID)
    176 #else
    177 #define VIOGPU_FEATURES		0
    178 #endif
    179 
    180 static struct wsdisplay_accessops viogpu_accessops = {
    181 	.ioctl        = viogpu_wsioctl,
    182 	.mmap         = NULL, /* This would require signalling on write to
    183 	                       * update the screen. */
    184 	.alloc_screen = NULL,
    185 	.free_screen  = NULL,
    186 	.show_screen  = NULL,
    187 	.load_font    = NULL,
    188 	.pollc        = NULL,
    189 	.scroll       = NULL,
    190 };
    191 
    192 static int
    193 viogpu_match(device_t parent, cfdata_t match, void *aux)
    194 {
    195 	struct virtio_attach_args *va = aux;
    196 
    197 	if (va->sc_childdevid == VIRTIO_DEVICE_ID_GPU)
    198 		return 1;
    199 
    200 	return 0;
    201 }
    202 
    203 static void
    204 viogpu_attach(device_t parent, device_t self, void *aux)
    205 {
    206 	struct viogpu_softc *sc = device_private(self);
    207 	struct virtio_softc *vsc = device_private(parent);
    208 	int error;
    209 
    210 	if (virtio_child(vsc) != NULL) {
    211 		aprint_error("child already attached for %s\n",
    212 		    device_xname(parent));
    213 		return;
    214 	}
    215 
    216 	sc->sc_dev = self;
    217 	sc->sc_virtio = vsc;
    218 
    219 	mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_NONE);
    220 	cv_init(&sc->req_wait, "vgpu_req");
    221 	sc->update_soft_ih = softint_establish(SOFTINT_NET,
    222 	    viogpu_screen_update, sc);
    223 	sc->needs_update = false;
    224 	sc->is_requesting = false;
    225 	sc->sc_fence_id = 0;
    226 
    227 	virtio_child_attach_start(vsc, self, IPL_VM,
    228 	    VIOGPU_FEATURES, VIRTIO_COMMON_FLAG_BITS);
    229 
    230 	if (!virtio_version_1(vsc)) {
    231 		aprint_error_dev(sc->sc_dev, "requires virtio version 1\n");
    232 		goto err;
    233 	}
    234 
    235 	/* Allocate command and cursor virtqueues. */
    236 	virtio_init_vq_vqdone(vsc, &sc->sc_vqs[VQCTRL], 0, viogpu_vq_done);
    237 	error = virtio_alloc_vq(vsc, &sc->sc_vqs[VQCTRL], NBPG, 1, "control");
    238 	if (error != 0) {
    239 		aprint_error_dev(sc->sc_dev, "alloc_vq failed: %d\n", error);
    240 		goto err;
    241 	}
    242 
    243 	virtio_init_vq_vqdone(vsc, &sc->sc_vqs[VQCURS], 1, viogpu_vq_done);
    244 	error = virtio_alloc_vq(vsc, &sc->sc_vqs[VQCURS], NBPG, 1, "cursor");
    245 	if (error != 0) {
    246 		aprint_error_dev(sc->sc_dev, "alloc_vq failed: %d\n", error);
    247 		goto free_vq0;
    248 	}
    249 
    250 	if (virtio_child_attach_finish(vsc, sc->sc_vqs,
    251 	    __arraycount(sc->sc_vqs), NULL,
    252 	    VIRTIO_F_INTR_MPSAFE | VIRTIO_F_INTR_SOFTINT) != 0)
    253 		goto free_vqs;
    254 
    255 	/* Interrupts are required for synchronous commands in attachment. */
    256 	config_interrupts(self, viogpu_attach_postintr);
    257 
    258 	return;
    259 
    260 free_vqs:
    261 	virtio_free_vq(vsc, &sc->sc_vqs[VQCURS]);
    262 free_vq0:
    263 	virtio_free_vq(vsc, &sc->sc_vqs[VQCTRL]);
    264 err:
    265 	virtio_child_attach_failed(vsc);
    266 	cv_destroy(&sc->req_wait);
    267 	mutex_destroy(&sc->sc_mutex);
    268 	return;
    269 }
    270 
    271 static void
    272 viogpu_attach_postintr(device_t self)
    273 {
    274 	struct viogpu_softc *sc = device_private(self);
    275 	struct virtio_softc *vsc = sc->sc_virtio;
    276 	struct wsemuldisplaydev_attach_args waa;
    277 	struct rasops_info *ri;
    278 	long defattr;
    279 	int nsegs;
    280 	int error;
    281 
    282 	/* Set up DMA space for sending commands. */
    283 	error = bus_dmamap_create(virtio_dmat(vsc), VIOGPU_CMD_DMA_SIZE, 1,
    284 	    VIOGPU_CMD_DMA_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
    285 	    &sc->sc_dma_map);
    286 	if (error != 0) {
    287 		aprint_error_dev(sc->sc_dev, "bus_dmamap_create failed: %d\n",
    288 		    error);
    289 		goto err;
    290 	}
    291 	error = bus_dmamem_alloc(virtio_dmat(vsc), VIOGPU_CMD_DMA_SIZE, 16, 0,
    292 	    &sc->sc_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT);
    293 	if (error != 0) {
    294 		aprint_error_dev(sc->sc_dev, "bus_dmamem_alloc failed: %d\n",
    295 		    error);
    296 		goto destroy;
    297 	}
    298 	error = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_dma_seg, nsegs,
    299 	    VIOGPU_CMD_DMA_SIZE, &sc->sc_cmd, BUS_DMA_NOWAIT);
    300 	if (error != 0) {
    301 		aprint_error_dev(sc->sc_dev, "bus_dmamem_map failed: %d\n",
    302 		    error);
    303 		goto free;
    304 	}
    305 	memset(sc->sc_cmd, 0, VIOGPU_CMD_DMA_SIZE);
    306 	error = bus_dmamap_load(virtio_dmat(vsc), sc->sc_dma_map, sc->sc_cmd,
    307 	    VIOGPU_CMD_DMA_SIZE, NULL, BUS_DMA_NOWAIT);
    308 	if (error != 0) {
    309 		aprint_error_dev(sc->sc_dev, "bus_dmamap_load failed: %d\n",
    310 		    error);
    311 		goto unmap;
    312 	}
    313 
    314 	if (viogpu_get_display_info(sc) != 0)
    315 		goto unmap;
    316 
    317 	/* Set up DMA space for actual framebuffer. */
    318 	sc->sc_fb_dma_size = sc->sc_fb_width * sc->sc_fb_height * 4;
    319 	error = bus_dmamap_create(virtio_dmat(vsc), sc->sc_fb_dma_size, 1,
    320 	    sc->sc_fb_dma_size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
    321 	    &sc->sc_fb_dma_map);
    322 	if (error != 0) {
    323 		aprint_error_dev(sc->sc_dev, "bus_dmamap_create failed: %d\n",
    324 		    error);
    325 		goto unmap;
    326 	}
    327 	error = bus_dmamem_alloc(virtio_dmat(vsc), sc->sc_fb_dma_size, 1024, 0,
    328 	    &sc->sc_fb_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT);
    329 	if (error != 0) {
    330 		aprint_error_dev(sc->sc_dev, "bus_dmamem_alloc failed: %d\n",
    331 		    error);
    332 		goto fb_destroy;
    333 	}
    334 	error = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_fb_dma_seg, nsegs,
    335 	    sc->sc_fb_dma_size, &sc->sc_fb_dma_kva, BUS_DMA_NOWAIT);
    336 	if (error != 0) {
    337 		aprint_error_dev(sc->sc_dev, "bus_dmamem_map failed: %d\n",
    338 		    error);
    339 		goto fb_free;
    340 	}
    341 	memset(sc->sc_fb_dma_kva, 0, sc->sc_fb_dma_size);
    342 	error = bus_dmamap_load(virtio_dmat(vsc), sc->sc_fb_dma_map,
    343 	    sc->sc_fb_dma_kva, sc->sc_fb_dma_size, NULL, BUS_DMA_NOWAIT);
    344 	if (error != 0) {
    345 		aprint_error_dev(sc->sc_dev, "bus_dmamap_load failed: %d\n",
    346 		    error);
    347 		goto fb_unmap;
    348 	}
    349 
    350 	if (viogpu_create_2d(sc, 1, sc->sc_fb_width, sc->sc_fb_height) != 0)
    351 		goto fb_unmap;
    352 
    353 	if (viogpu_attach_backing(sc, 1, sc->sc_fb_dma_map) != 0)
    354 		goto fb_unmap;
    355 
    356 	if (viogpu_set_scanout(sc, 0, 1, sc->sc_fb_width,
    357 	    sc->sc_fb_height) != 0)
    358 		goto fb_unmap;
    359 
    360 #ifdef WSDISPLAY_MULTICONS
    361 	sc->is_console = true;
    362 #else
    363 	sc->is_console = device_getprop_bool(self, "is_console");
    364 #endif
    365 
    366 	sc->sc_wsd = (struct wsscreen_descr){
    367 		"std",
    368 		0, 0,
    369 		NULL,
    370 		8, 16,
    371 		WSSCREEN_WSCOLORS | WSSCREEN_HILIT,
    372 		NULL
    373 	};
    374 
    375 	sc->sc_scrlist[0] = &sc->sc_wsd;
    376 	sc->sc_wsl.nscreens = __arraycount(sc->sc_scrlist);
    377 	sc->sc_wsl.screens = sc->sc_scrlist;
    378 
    379 	vcons_init(&sc->sc_vd, sc, &sc->sc_wsd, &viogpu_accessops);
    380 	sc->sc_vd.init_screen = viogpu_init_screen;
    381 
    382 	vcons_init_screen(&sc->sc_vd, &sc->sc_vcs, 1, &defattr);
    383 	sc->sc_vcs.scr_flags |= VCONS_SCREEN_IS_STATIC;
    384 	ri = &sc->sc_vcs.scr_ri;
    385 
    386 	sc->sc_wsd.textops = &ri->ri_ops;
    387 	sc->sc_wsd.capabilities = ri->ri_caps;
    388 	sc->sc_wsd.nrows = ri->ri_rows;
    389 	sc->sc_wsd.ncols = ri->ri_cols;
    390 
    391 	if (sc->is_console) {
    392 		wsdisplay_cnattach(&sc->sc_wsd, ri, 0, 0, defattr);
    393 		vcons_replay_msgbuf(&sc->sc_vcs);
    394 	}
    395 
    396 	device_printf(sc->sc_dev, "%dx%d, %dbpp\n", ri->ri_width,
    397 	    ri->ri_height, ri->ri_depth);
    398 
    399 	waa.scrdata = &sc->sc_wsl;
    400 	waa.accessops = &viogpu_accessops;
    401 	waa.accesscookie = &sc->sc_vd;
    402 	waa.console = sc->is_console;
    403 
    404 	config_found(self, &waa, wsemuldisplaydevprint, CFARGS_NONE);
    405 
    406 	return;
    407 
    408 fb_unmap:
    409 	bus_dmamem_unmap(virtio_dmat(vsc), &sc->sc_fb_dma_kva,
    410 	    sc->sc_fb_dma_size);
    411 fb_free:
    412 	bus_dmamem_free(virtio_dmat(vsc), &sc->sc_fb_dma_seg, 1);
    413 fb_destroy:
    414 	bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_fb_dma_map);
    415 unmap:
    416 	bus_dmamem_unmap(virtio_dmat(vsc), &sc->sc_cmd, VIOGPU_CMD_DMA_SIZE);
    417 free:
    418 	bus_dmamem_free(virtio_dmat(vsc), &sc->sc_dma_seg, 1);
    419 destroy:
    420 	bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_dma_map);
    421 err:
    422 	aprint_error_dev(sc->sc_dev, "DMA setup failed\n");
    423 	virtio_free_vq(vsc, &sc->sc_vqs[VQCURS]);
    424 	virtio_free_vq(vsc, &sc->sc_vqs[VQCTRL]);
    425 	virtio_child_attach_failed(vsc);
    426 	cv_destroy(&sc->req_wait);
    427 	mutex_destroy(&sc->sc_mutex);
    428 	return;
    429 }
    430 
    431 /*
    432  * This carries out a command synchronously, unlike the commands used to
    433  * update the screen.
    434  */
    435 static int
    436 viogpu_cmd_sync(struct viogpu_softc *sc, void *cmd, size_t cmd_size,
    437 		void *ret, size_t ret_size)
    438 {
    439 	int error;
    440 
    441 	mutex_enter(&sc->sc_mutex);
    442 
    443 	while (sc->is_requesting == true)
    444 		cv_wait(&sc->req_wait, &sc->sc_mutex);
    445 
    446 	error = viogpu_cmd_req(sc, cmd, cmd_size, ret_size);
    447 	if (error != 0)
    448 		goto out;
    449 
    450 	while (sc->is_requesting == true)
    451 		cv_wait(&sc->req_wait, &sc->sc_mutex);
    452 
    453 	if (ret != NULL)
    454 		memcpy(ret, (char *)sc->sc_cmd + cmd_size, ret_size);
    455 
    456 out:
    457 	mutex_exit(&sc->sc_mutex);
    458 
    459 	return error;
    460 }
    461 
    462 static void
    463 viogpu_screen_update(void *arg)
    464 {
    465 	struct viogpu_softc *sc = arg;
    466 
    467 	mutex_enter(&sc->sc_mutex);
    468 
    469 	if (sc->is_requesting == false)
    470 		viogpu_transfer_to_host_2d(sc, 1, 0, 0, sc->sc_fb_width,
    471 		    sc->sc_fb_height);
    472 	else
    473 		sc->needs_update = true;
    474 
    475 	mutex_exit(&sc->sc_mutex);
    476 }
    477 
    478 static int
    479 viogpu_cmd_req(struct viogpu_softc *sc, void *cmd, size_t cmd_size,
    480 	       size_t ret_size)
    481 {
    482 	struct virtio_softc *vsc = sc->sc_virtio;
    483 	struct virtqueue *vq = &sc->sc_vqs[VQCTRL];
    484 	struct virtio_gpu_ctrl_hdr *hdr =
    485 	    (struct virtio_gpu_ctrl_hdr *)sc->sc_cmd;
    486 	int slot, error;
    487 
    488 	memcpy(sc->sc_cmd, cmd, cmd_size);
    489 	memset((char *)sc->sc_cmd + cmd_size, 0, ret_size);
    490 
    491 #if VIOGPU_DEBUG
    492 	printf("%s: [%zu -> %zu]: ", __func__, cmd_size, ret_size);
    493 	for (int i = 0; i < cmd_size; i++) {
    494 		printf(" %02x", ((unsigned char *)sc->sc_cmd)[i]);
    495 	}
    496 	printf("\n");
    497 #endif
    498 
    499 	hdr->flags |= virtio_rw32(vsc, VIRTIO_GPU_FLAG_FENCE);
    500 	hdr->fence_id = virtio_rw64(vsc, ++sc->sc_fence_id);
    501 
    502 	error = virtio_enqueue_prep(vsc, vq, &slot);
    503 	if (error != 0)
    504 		panic("%s: control vq busy", device_xname(sc->sc_dev));
    505 
    506 	error = virtio_enqueue_reserve(vsc, vq, slot,
    507 	    sc->sc_dma_map->dm_nsegs + 1);
    508 	if (error != 0)
    509 		panic("%s: control vq busy", device_xname(sc->sc_dev));
    510 
    511 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dma_map, 0, cmd_size,
    512 	    BUS_DMASYNC_PREWRITE);
    513 	virtio_enqueue_p(vsc, vq, slot, sc->sc_dma_map, 0, cmd_size, true);
    514 
    515 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dma_map, cmd_size, ret_size,
    516 	    BUS_DMASYNC_PREREAD);
    517 	virtio_enqueue_p(vsc, vq, slot, sc->sc_dma_map, cmd_size, ret_size,
    518 	    false);
    519 
    520 	virtio_enqueue_commit(vsc, vq, slot, true);
    521 
    522 	sc->cur_cmd_size = cmd_size;
    523 	sc->cur_ret_size = ret_size;
    524 	sc->is_requesting = true;
    525 
    526 	return 0;
    527 }
    528 
    529 static int
    530 viogpu_vq_done(struct virtqueue *vq)
    531 {
    532 	struct virtio_softc *vsc = vq->vq_owner;
    533 	struct viogpu_softc *sc = device_private(virtio_child(vsc));
    534 	struct virtio_gpu_ctrl_hdr *resp;
    535 	int slot, len;
    536 	uint32_t cmd_type, resp_type;
    537 	uint64_t resp_fence, expect_fence;
    538 	bool next_req_sent = false;
    539 
    540 	mutex_enter(&sc->sc_mutex);
    541 
    542 	while (virtio_dequeue(vsc, vq, &slot, &len) != 0)
    543 		;
    544 
    545 	virtio_dequeue_commit(vsc, vq, slot);
    546 
    547 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dma_map, 0, sc->cur_cmd_size,
    548 	    BUS_DMASYNC_POSTWRITE);
    549 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dma_map, sc->cur_cmd_size,
    550 	    sc->cur_ret_size, BUS_DMASYNC_POSTREAD);
    551 
    552 	resp = (struct virtio_gpu_ctrl_hdr *)((char *)sc->sc_cmd +
    553 	    sc->cur_cmd_size);
    554 
    555 	cmd_type = virtio_rw32(vsc,
    556 	    ((struct virtio_gpu_ctrl_hdr *)sc->sc_cmd)->type);
    557 	resp_type = virtio_rw32(vsc, resp->type);
    558 	resp_fence = virtio_rw64(vsc, resp->fence_id);
    559 	expect_fence = sc->sc_fence_id;
    560 
    561 	switch (cmd_type) {
    562 	case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
    563 		/* The second command for screen updating must be issued. */
    564 		if (resp_type == VIRTIO_GPU_RESP_OK_NODATA) {
    565 			viogpu_flush_resource(sc, 1, 0, 0, sc->sc_fb_width,
    566 			    sc->sc_fb_height);
    567 			next_req_sent = true;
    568 		}
    569 		break;
    570 	case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
    571 		if (sc->needs_update == true) {
    572 			viogpu_transfer_to_host_2d(sc, 1, 0, 0,
    573 			    sc->sc_fb_width, sc->sc_fb_height);
    574 			sc->needs_update = false;
    575 			next_req_sent = true;
    576 		}
    577 		break;
    578 	default:
    579 		/* Other command types are called synchronously. */
    580 		break;
    581 	}
    582 
    583 	if (next_req_sent == false) {
    584 		sc->is_requesting = false;
    585 		cv_broadcast(&sc->req_wait);
    586 	}
    587 
    588 	mutex_exit(&sc->sc_mutex);
    589 
    590 	if (resp_type != VIRTIO_GPU_RESP_OK_NODATA) {
    591 		switch (cmd_type) {
    592 		case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
    593 			device_printf(sc->sc_dev,
    594 			    "failed TRANSFER_TO_HOST: %d\n", resp_type);
    595 			break;
    596 		case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
    597 			device_printf(sc->sc_dev,
    598 			    "failed RESOURCE_FLUSH: %d\n", resp_type);
    599 			break;
    600 		default:
    601 			break;
    602 		}
    603 	}
    604 
    605 	if (resp_fence != expect_fence)
    606 		printf("%s: return fence id not right (0x%" PRIx64 " != 0x%"
    607 		    PRIx64 ")\n", __func__, resp_fence, expect_fence);
    608 
    609 	return 0;
    610 }
    611 
    612 static int
    613 viogpu_get_display_info(struct viogpu_softc *sc)
    614 {
    615 	struct virtio_softc *vsc = sc->sc_virtio;
    616 	struct virtio_gpu_ctrl_hdr hdr = { 0 };
    617 	struct virtio_gpu_resp_display_info info = { 0 };
    618 
    619 	hdr.type = virtio_rw32(vsc, VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
    620 
    621 	viogpu_cmd_sync(sc, &hdr, sizeof(hdr), &info, sizeof(info));
    622 
    623 	if (virtio_rw32(vsc, info.hdr.type) !=
    624 	    VIRTIO_GPU_RESP_OK_DISPLAY_INFO) {
    625 		device_printf(sc->sc_dev, "failed getting display info\n");
    626 		return 1;
    627 	}
    628 
    629 	if (!info.pmodes[0].enabled) {
    630 		device_printf(sc->sc_dev, "pmodes[0] is not enabled\n");
    631 		return 1;
    632 	}
    633 
    634 	sc->sc_fb_width = virtio_rw32(vsc, info.pmodes[0].r.width);
    635 	sc->sc_fb_height = virtio_rw32(vsc, info.pmodes[0].r.height);
    636 
    637 	return 0;
    638 }
    639 
    640 static int
    641 viogpu_create_2d(struct viogpu_softc *sc, uint32_t resource_id, uint32_t width,
    642 		 uint32_t height)
    643 {
    644 	struct virtio_softc *vsc = sc->sc_virtio;
    645 	struct virtio_gpu_resource_create_2d res = { 0 };
    646 	struct virtio_gpu_ctrl_hdr resp = { 0 };
    647 
    648 	res.hdr.type = virtio_rw32(vsc, VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
    649 	res.resource_id = virtio_rw32(vsc, resource_id);
    650 	res.format = virtio_rw32(vsc, VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM);
    651 	res.width = virtio_rw32(vsc, width);
    652 	res.height = virtio_rw32(vsc, height);
    653 
    654 	viogpu_cmd_sync(sc, &res, sizeof(res), &resp, sizeof(resp));
    655 
    656 	if (virtio_rw32(vsc, resp.type) != VIRTIO_GPU_RESP_OK_NODATA) {
    657 		device_printf(sc->sc_dev, "failed CREATE_2D: %d\n",
    658 		    virtio_rw32(vsc, resp.type));
    659 		return 1;
    660 	}
    661 
    662 	return 0;
    663 }
    664 
    665 static int
    666 viogpu_set_scanout(struct viogpu_softc *sc, uint32_t scanout_id,
    667 		   uint32_t resource_id, uint32_t width, uint32_t height)
    668 {
    669 	struct virtio_softc *vsc = sc->sc_virtio;
    670 	struct virtio_gpu_set_scanout ss = { 0 };
    671 	struct virtio_gpu_ctrl_hdr resp = { 0 };
    672 
    673 	ss.hdr.type = virtio_rw32(vsc, VIRTIO_GPU_CMD_SET_SCANOUT);
    674 	ss.scanout_id = virtio_rw32(vsc, scanout_id);
    675 	ss.resource_id = virtio_rw32(vsc, resource_id);
    676 	ss.r.width = virtio_rw32(vsc, width);
    677 	ss.r.height = virtio_rw32(vsc, height);
    678 
    679 	viogpu_cmd_sync(sc, &ss, sizeof(ss), &resp, sizeof(resp));
    680 
    681 	if (virtio_rw32(vsc, resp.type) != VIRTIO_GPU_RESP_OK_NODATA) {
    682 		device_printf(sc->sc_dev, "failed SET_SCANOUT: %d\n",
    683 		    virtio_rw32(vsc, resp.type));
    684 		return 1;
    685 	}
    686 
    687 	return 0;
    688 }
    689 
    690 static int
    691 viogpu_attach_backing(struct viogpu_softc *sc, uint32_t resource_id,
    692 		      bus_dmamap_t dmamap)
    693 {
    694 	struct virtio_softc *vsc = sc->sc_virtio;
    695 	struct virtio_gpu_resource_attach_backing_entries backing = { 0 };
    696 	struct virtio_gpu_ctrl_hdr resp = { 0 };
    697 
    698 	backing.hdr.type = virtio_rw32(vsc,
    699 	    VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
    700 	backing.resource_id = virtio_rw32(vsc, resource_id);
    701 	backing.nr_entries = virtio_rw32(vsc, __arraycount(backing.entries));
    702 	backing.entries[0].addr = virtio_rw64(vsc, dmamap->dm_segs[0].ds_addr);
    703 	backing.entries[0].length = virtio_rw32(vsc,
    704 	    dmamap->dm_segs[0].ds_len);
    705 
    706 	if (dmamap->dm_nsegs > 1)
    707 		printf("%s: TODO: send all %d segs\n", __func__,
    708 		    dmamap->dm_nsegs);
    709 
    710 #if VIOGPU_DEBUG
    711 	printf("%s: backing addr 0x%" PRIx64 " length %d\n", __func__,
    712 	    backing.entries[0].addr, backing.entries[0].length);
    713 #endif
    714 
    715 	viogpu_cmd_sync(sc, &backing, sizeof(backing), &resp, sizeof(resp));
    716 
    717 	if (virtio_rw32(vsc, resp.type) != VIRTIO_GPU_RESP_OK_NODATA) {
    718 		device_printf(sc->sc_dev, "failed ATTACH_BACKING: %d\n",
    719 		    virtio_rw32(vsc, resp.type));
    720 		return 1;
    721 	}
    722 
    723 	return 0;
    724 }
    725 
    726 static int
    727 viogpu_transfer_to_host_2d(struct viogpu_softc *sc, uint32_t resource_id,
    728 			   uint32_t x, uint32_t y, uint32_t width,
    729 			   uint32_t height)
    730 {
    731 	struct virtio_softc *vsc = sc->sc_virtio;
    732 	struct virtio_gpu_transfer_to_host_2d tth = { 0 };
    733 
    734 	tth.hdr.type = virtio_rw32(vsc, VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
    735 	tth.resource_id = virtio_rw32(vsc, resource_id);
    736 	tth.r.x = virtio_rw32(vsc, x);
    737 	tth.r.y = virtio_rw32(vsc, y);
    738 	tth.r.width = virtio_rw32(vsc, width);
    739 	tth.r.height = virtio_rw32(vsc, height);
    740 	tth.offset = virtio_rw64(vsc, (y * sc->sc_fb_width + x) *
    741 	    4 /* bpp / 8 */);
    742 
    743 	viogpu_cmd_req(sc, &tth, sizeof(tth),
    744 	    sizeof(struct virtio_gpu_ctrl_hdr));
    745 
    746 	return 0;
    747 }
    748 
    749 static int
    750 viogpu_flush_resource(struct viogpu_softc *sc, uint32_t resource_id,
    751 		      uint32_t x, uint32_t y, uint32_t width, uint32_t height)
    752 {
    753 	struct virtio_softc *vsc = sc->sc_virtio;
    754 	struct virtio_gpu_resource_flush flush = { 0 };
    755 
    756 	flush.hdr.type = virtio_rw32(vsc, VIRTIO_GPU_CMD_RESOURCE_FLUSH);
    757 	flush.resource_id = virtio_rw32(vsc, resource_id);
    758 	flush.r.x = virtio_rw32(vsc, x);
    759 	flush.r.y = virtio_rw32(vsc, y);
    760 	flush.r.width = virtio_rw32(vsc, width);
    761 	flush.r.height = virtio_rw32(vsc, height);
    762 
    763 	viogpu_cmd_req(sc, &flush, sizeof(flush),
    764 	    sizeof(struct virtio_gpu_ctrl_hdr));
    765 
    766 	return 0;
    767 }
    768 
    769 static int
    770 viogpu_wsioctl(void *v, void *vs, u_long cmd, void *data, int flag,
    771 	       struct lwp *l)
    772 {
    773 	struct rasops_info *ri = v;
    774 	struct wsdisplayio_fbinfo *fbi;
    775 	struct wsdisplay_fbinfo *wdf;
    776 
    777 	switch (cmd) {
    778 	case WSDISPLAYIO_GTYPE:
    779 		*(u_int *)data = WSDISPLAY_TYPE_VIOGPU;
    780 		return 0;
    781 	case WSDISPLAYIO_GET_FBINFO:
    782 		fbi = (struct wsdisplayio_fbinfo *)data;
    783 		return wsdisplayio_get_fbinfo(ri, fbi);
    784 	case WSDISPLAYIO_GINFO:
    785 		wdf = (struct wsdisplay_fbinfo *)data;
    786 		wdf->height = ri->ri_height;
    787 		wdf->width = ri->ri_width;
    788 		wdf->depth = ri->ri_depth;
    789 		wdf->cmsize = 0;
    790 		return 0;
    791 	case WSDISPLAYIO_LINEBYTES:
    792 		*(u_int *)data = ri->ri_stride;
    793 		return 0;
    794 	case WSDISPLAYIO_SMODE:
    795 		return 0;
    796 	case WSDISPLAYIO_GVIDEO:
    797 	case WSDISPLAYIO_SVIDEO:
    798 		return 0;
    799 	}
    800 
    801 	return EPASSTHROUGH;
    802 }
    803 
    804 static void
    805 viogpu_init_screen(void *cookie, struct vcons_screen *scr, int existing,
    806 		   long *defattr)
    807 {
    808 	struct viogpu_softc *sc = cookie;
    809 	struct rasops_info *ri = &scr->scr_ri;
    810 
    811 	ri->ri_bits = sc->sc_fb_dma_kva;
    812 	ri->ri_flg = RI_CENTER | RI_CLEAR;
    813 #if BYTE_ORDER == BIG_ENDIAN
    814 	ri->ri_flg |= RI_BSWAP;
    815 #endif
    816 	ri->ri_depth = 32;
    817 	ri->ri_width = sc->sc_fb_width;
    818 	ri->ri_height = sc->sc_fb_height;
    819 	ri->ri_stride = ri->ri_width * ri->ri_depth / 8;
    820 	ri->ri_bpos = 0;	/* B8G8R8X8 */
    821 	ri->ri_bnum = 8;
    822 	ri->ri_gpos = 8;
    823 	ri->ri_gnum = 8;
    824 	ri->ri_rpos = 16;
    825 	ri->ri_rnum = 8;
    826 	rasops_init(ri, 0, 0);
    827 	ri->ri_caps = WSSCREEN_WSCOLORS | WSSCREEN_HILIT;
    828 	rasops_reconfig(ri, sc->sc_fb_height / ri->ri_font->fontheight,
    829 	    sc->sc_fb_width / ri->ri_font->fontwidth);
    830 
    831 	/*
    832 	 * Replace select text operations with wrappers that update the screen
    833 	 * after the operation.
    834 	 */
    835 	sc->ri_cursor = ri->ri_ops.cursor;
    836 	sc->ri_putchar = ri->ri_ops.putchar;
    837 	sc->ri_copycols = ri->ri_ops.copycols;
    838 	sc->ri_erasecols = ri->ri_ops.erasecols;
    839 	sc->ri_copyrows = ri->ri_ops.copyrows;
    840 	sc->ri_eraserows = ri->ri_ops.eraserows;
    841 	sc->ri_replaceattr = ri->ri_ops.replaceattr;
    842 	ri->ri_ops.cursor = ri->ri_ops.cursor == NULL ? NULL : viogpu_cursor;
    843 	ri->ri_ops.putchar = ri->ri_ops.putchar == NULL ? NULL :
    844 	    viogpu_putchar;
    845 	ri->ri_ops.copycols = ri->ri_ops.copycols == NULL ? NULL :
    846 	    viogpu_copycols;
    847 	ri->ri_ops.erasecols = ri->ri_ops.erasecols == NULL ? NULL :
    848 	    viogpu_erasecols;
    849 	ri->ri_ops.copyrows = ri->ri_ops.copyrows == NULL ? NULL :
    850 	    viogpu_copyrows;
    851 	ri->ri_ops.eraserows = ri->ri_ops.eraserows == NULL ? NULL :
    852 	    viogpu_eraserows;
    853 	ri->ri_ops.replaceattr = ri->ri_ops.replaceattr == NULL ? NULL :
    854 	    viogpu_replaceattr;
    855 }
    856 
    857 static void
    858 viogpu_cursor(void *c, int on, int row, int col)
    859 {
    860 	struct rasops_info *ri = c;
    861 	struct vcons_screen *vscr = ri->ri_hw;
    862 	struct viogpu_softc *sc = vscr->scr_vd->cookie;
    863 
    864 	sc->ri_cursor(c, on, row, col);
    865 
    866 	softint_schedule(sc->update_soft_ih);
    867 }
    868 
    869 static void
    870 viogpu_putchar(void *c, int row, int col, u_int uc, long attr)
    871 {
    872 	struct rasops_info *ri = c;
    873 	struct vcons_screen *vscr = ri->ri_hw;
    874 	struct viogpu_softc *sc = vscr->scr_vd->cookie;
    875 
    876 	sc->ri_putchar(c, row, col, uc, attr);
    877 
    878 	softint_schedule(sc->update_soft_ih);
    879 }
    880 
    881 static void
    882 viogpu_copycols(void *c, int row, int srccol, int dstcol, int ncols)
    883 {
    884 	struct rasops_info *ri = c;
    885 	struct vcons_screen *vscr = ri->ri_hw;
    886 	struct viogpu_softc *sc = vscr->scr_vd->cookie;
    887 
    888 	sc->ri_copycols(c, row, srccol, dstcol, ncols);
    889 
    890 	softint_schedule(sc->update_soft_ih);
    891 }
    892 
    893 static void
    894 viogpu_erasecols(void *c, int row, int startcol, int ncols, long attr)
    895 {
    896 	struct rasops_info *ri = c;
    897 	struct vcons_screen *vscr = ri->ri_hw;
    898 	struct viogpu_softc *sc = vscr->scr_vd->cookie;
    899 
    900 	sc->ri_erasecols(c, row, startcol, ncols, attr);
    901 
    902 	softint_schedule(sc->update_soft_ih);
    903 }
    904 
    905 static void
    906 viogpu_copyrows(void *c, int srcrow, int dstrow, int nrows)
    907 {
    908 	struct rasops_info *ri = c;
    909 	struct vcons_screen *vscr = ri->ri_hw;
    910 	struct viogpu_softc *sc = vscr->scr_vd->cookie;
    911 
    912 	sc->ri_copyrows(c, srcrow, dstrow, nrows);
    913 
    914 	softint_schedule(sc->update_soft_ih);
    915 }
    916 
    917 static void
    918 viogpu_eraserows(void *c, int row, int nrows, long attr)
    919 {
    920 	struct rasops_info *ri = c;
    921 	struct vcons_screen *vscr = ri->ri_hw;
    922 	struct viogpu_softc *sc = vscr->scr_vd->cookie;
    923 
    924 	sc->ri_eraserows(c, row, nrows, attr);
    925 
    926 	softint_schedule(sc->update_soft_ih);
    927 }
    928 
    929 static void
    930 viogpu_replaceattr(void *c, long oldattr, long newattr)
    931 {
    932 	struct rasops_info *ri = c;
    933 	struct vcons_screen *vscr = ri->ri_hw;
    934 	struct viogpu_softc *sc = vscr->scr_vd->cookie;
    935 
    936 	sc->ri_replaceattr(c, oldattr, newattr);
    937 
    938 	softint_schedule(sc->update_soft_ih);
    939 }
    940