Home | History | Annotate | Line # | Download | only in pci
      1 /*	$NetBSD: viogpu.c,v 1.1 2025/07/26 14:18:13 martin Exp $ */
      2 /*	$OpenBSD: viogpu.c,v 1.3 2023/05/29 08:13:35 sf Exp $ */
      3 
      4 /*
      5  * Copyright (c) 2024-2025 The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  * POSSIBILITY OF SUCH DAMAGE.
     28  */
     29 
     30 /*
     31  * Copyright (c) 2021-2023 joshua stein <jcs (at) openbsd.org>
     32  *
     33  * Permission to use, copy, modify, and distribute this software for any
     34  * purpose with or without fee is hereby granted, provided that the above
     35  * copyright notice and this permission notice appear in all copies.
     36  *
     37  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     38  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     39  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     40  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     41  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     42  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     43  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     44  */
     45 
     46 #include <sys/cdefs.h>
     47 
     48 #include <sys/param.h>
     49 #include <sys/systm.h>
     50 #include <sys/bus.h>
     51 #include <sys/condvar.h>
     52 #include <sys/device.h>
     53 #include <sys/intr.h>
     54 #include <sys/kernel.h>
     55 #include <sys/mutex.h>
     56 
     57 #include <dev/pci/virtioreg.h>
     58 #include <dev/pci/virtiovar.h>
     59 #include <dev/pci/viogpu.h>
     60 
     61 #include <dev/rasops/rasops.h>
     62 
     63 #include <dev/wscons/wsconsio.h>
     64 #include <dev/wscons/wsdisplayvar.h>
     65 #include <dev/wscons/wsdisplay_vconsvar.h>
     66 
     67 #include <prop/proplib.h>
     68 
     69 struct viogpu_softc;
     70 
     71 static int	viogpu_match(device_t, cfdata_t, void *);
     72 static void	viogpu_attach(device_t, device_t, void *);
     73 static void	viogpu_attach_postintr(device_t);
     74 static int	viogpu_cmd_sync(struct viogpu_softc *, void *, size_t, void *,
     75 				size_t);
     76 static int	viogpu_cmd_req(struct viogpu_softc *, void *, size_t, size_t);
     77 static void	viogpu_screen_update(void *);
     78 static int	viogpu_vq_done(struct virtqueue *vq);
     79 
     80 static int	viogpu_get_display_info(struct viogpu_softc *);
     81 static int	viogpu_create_2d(struct viogpu_softc *, uint32_t, uint32_t,
     82 				 uint32_t);
     83 static int	viogpu_set_scanout(struct viogpu_softc *, uint32_t, uint32_t,
     84 				   uint32_t, uint32_t);
     85 static int	viogpu_attach_backing(struct viogpu_softc *, uint32_t,
     86 				      bus_dmamap_t);
     87 static int	viogpu_transfer_to_host_2d(struct viogpu_softc *sc, uint32_t,
     88 					   uint32_t, uint32_t, uint32_t,
     89 					   uint32_t);
     90 static int	viogpu_flush_resource(struct viogpu_softc *, uint32_t,
     91 				      uint32_t, uint32_t, uint32_t, uint32_t);
     92 
     93 static int	viogpu_wsioctl(void *, void *, u_long, void *, int,
     94 			       struct lwp *);
     95 
     96 static void 	viogpu_init_screen(void *, struct vcons_screen *, int, long *);
     97 
     98 static void	viogpu_cursor(void *, int, int, int);
     99 static void	viogpu_putchar(void *, int, int, u_int, long);
    100 static void	viogpu_copycols(void *, int, int, int, int);
    101 static void	viogpu_erasecols(void *, int, int, int, long);
    102 static void	viogpu_copyrows(void *, int, int, int);
    103 static void	viogpu_eraserows(void *, int, int, long);
    104 static void	viogpu_replaceattr(void *, long, long);
    105 
    106 struct virtio_gpu_resource_attach_backing_entries {
    107 	struct virtio_gpu_ctrl_hdr hdr;
    108 	__le32 resource_id;
    109 	__le32 nr_entries;
    110 	struct virtio_gpu_mem_entry entries[1];
    111 } __packed;
    112 
    113 #define VIOGPU_CMD_DMA_SIZE \
    114     MAX(sizeof(struct virtio_gpu_resp_display_info), \
    115     MAX(sizeof(struct virtio_gpu_resource_create_2d), \
    116     MAX(sizeof(struct virtio_gpu_set_scanout), \
    117     MAX(sizeof(struct virtio_gpu_resource_attach_backing_entries), \
    118     MAX(sizeof(struct virtio_gpu_transfer_to_host_2d), \
    119     sizeof(struct virtio_gpu_resource_flush)))))) + \
    120     sizeof(struct virtio_gpu_ctrl_hdr)
    121 
    122 struct viogpu_softc {
    123 	device_t		sc_dev;
    124 	struct virtio_softc	*sc_virtio;
    125 #define	VQCTRL	0
    126 #define	VQCURS	1
    127 	struct virtqueue	sc_vqs[2];
    128 
    129 	bus_dma_segment_t	sc_dma_seg;
    130 	bus_dmamap_t		sc_dma_map;
    131 	void			*sc_cmd;
    132 	int			sc_fence_id;
    133 
    134 	int			sc_fb_height;
    135 	int			sc_fb_width;
    136 	bus_dma_segment_t	sc_fb_dma_seg;
    137 	bus_dmamap_t		sc_fb_dma_map;
    138 	size_t			sc_fb_dma_size;
    139 	void			*sc_fb_dma_kva;
    140 
    141 	struct wsscreen_descr		sc_wsd;
    142 	const struct wsscreen_descr	*sc_scrlist[1];
    143 	struct wsscreen_list		sc_wsl;
    144 	struct vcons_data		sc_vd;
    145 	struct vcons_screen		sc_vcs;
    146 	bool				is_console;
    147 
    148 	void	(*ri_cursor)(void *, int, int, int);
    149 	void	(*ri_putchar)(void *, int, int, u_int, long);
    150 	void	(*ri_copycols)(void *, int, int, int, int);
    151 	void	(*ri_erasecols)(void *, int, int, int, long);
    152 	void	(*ri_copyrows)(void *, int, int, int);
    153 	void	(*ri_eraserows)(void *, int, int, long);
    154 	void	(*ri_replaceattr)(void *, long, long);
    155 
    156 	/*
    157 	 * sc_mutex protects is_requesting, needs_update, and req_wait. It is
    158 	 * also held while submitting and reading the return values of
    159 	 * asynchronous commands and for the full duration of synchronous
    160 	 * commands.
    161 	 */
    162 	kmutex_t		sc_mutex;
    163 	bool			is_requesting;
    164 	bool			needs_update;
    165 	kcondvar_t		req_wait;
    166 	void			*update_soft_ih;
    167 	size_t			cur_cmd_size;
    168 	size_t			cur_ret_size;
    169 };
    170 
    171 CFATTACH_DECL_NEW(viogpu, sizeof(struct viogpu_softc),
    172 		  viogpu_match, viogpu_attach, NULL, NULL);
    173 
    174 #if VIOGPU_DEBUG
    175 #define VIOGPU_FEATURES		(VIRTIO_GPU_F_VIRGL | VIRTIO_GPU_F_EDID)
    176 #else
    177 #define VIOGPU_FEATURES		0
    178 #endif
    179 
    180 static struct wsdisplay_accessops viogpu_accessops = {
    181 	.ioctl        = viogpu_wsioctl,
    182 	.mmap         = NULL, /* This would require signalling on write to
    183 	                       * update the screen. */
    184 	.alloc_screen = NULL,
    185 	.free_screen  = NULL,
    186 	.show_screen  = NULL,
    187 	.load_font    = NULL,
    188 	.pollc        = NULL,
    189 	.scroll       = NULL,
    190 };
    191 
    192 static int
    193 viogpu_match(device_t parent, cfdata_t match, void *aux)
    194 {
    195 	struct virtio_attach_args *va = aux;
    196 
    197 	if (va->sc_childdevid == VIRTIO_DEVICE_ID_GPU)
    198 		return 1;
    199 
    200 	return 0;
    201 }
    202 
    203 static void
    204 viogpu_attach(device_t parent, device_t self, void *aux)
    205 {
    206 	struct viogpu_softc *sc = device_private(self);
    207 	struct virtio_softc *vsc = device_private(parent);
    208 	int error;
    209 
    210 	if (virtio_child(vsc) != NULL) {
    211 		aprint_error("child already attached for %s\n",
    212 		    device_xname(parent));
    213 		return;
    214 	}
    215 
    216 	sc->sc_dev = self;
    217 	sc->sc_virtio = vsc;
    218 
    219 	mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_NONE);
    220 	cv_init(&sc->req_wait, "vgpu_req");
    221 	sc->update_soft_ih = softint_establish(SOFTINT_NET,
    222 	    viogpu_screen_update, sc);
    223 	sc->needs_update = false;
    224 	sc->is_requesting = false;
    225 	sc->sc_fence_id = 0;
    226 
    227 	virtio_child_attach_start(vsc, self, IPL_VM,
    228 	    VIOGPU_FEATURES, VIRTIO_COMMON_FLAG_BITS);
    229 
    230 	if (!virtio_version_1(vsc)) {
    231 		aprint_error_dev(sc->sc_dev, "requires virtio version 1\n");
    232 		goto err;
    233 	}
    234 
    235 	/* Allocate command and cursor virtqueues. */
    236 	virtio_init_vq_vqdone(vsc, &sc->sc_vqs[VQCTRL], 0, viogpu_vq_done);
    237 	error = virtio_alloc_vq(vsc, &sc->sc_vqs[VQCTRL], NBPG, 1, "control");
    238 	if (error != 0) {
    239 		aprint_error_dev(sc->sc_dev, "alloc_vq failed: %d\n", error);
    240 		goto err;
    241 	}
    242 
    243 	virtio_init_vq_vqdone(vsc, &sc->sc_vqs[VQCURS], 1, viogpu_vq_done);
    244 	error = virtio_alloc_vq(vsc, &sc->sc_vqs[VQCURS], NBPG, 1, "cursor");
    245 	if (error != 0) {
    246 		aprint_error_dev(sc->sc_dev, "alloc_vq failed: %d\n", error);
    247 		goto free_vq0;
    248 	}
    249 
    250 	if (virtio_child_attach_finish(vsc, sc->sc_vqs,
    251 	    __arraycount(sc->sc_vqs), NULL,
    252 	    VIRTIO_F_INTR_MPSAFE | VIRTIO_F_INTR_SOFTINT) != 0)
    253 		goto free_vqs;
    254 
    255 	/* Interrupts are required for synchronous commands in attachment. */
    256 	config_interrupts(self, viogpu_attach_postintr);
    257 
    258 	return;
    259 
    260 free_vqs:
    261 	virtio_free_vq(vsc, &sc->sc_vqs[VQCURS]);
    262 free_vq0:
    263 	virtio_free_vq(vsc, &sc->sc_vqs[VQCTRL]);
    264 err:
    265 	virtio_child_attach_failed(vsc);
    266 	cv_destroy(&sc->req_wait);
    267 	mutex_destroy(&sc->sc_mutex);
    268 	return;
    269 }
    270 
    271 static void
    272 viogpu_attach_postintr(device_t self)
    273 {
    274 	struct viogpu_softc *sc = device_private(self);
    275 	struct virtio_softc *vsc = sc->sc_virtio;
    276 	struct wsemuldisplaydev_attach_args waa;
    277 	struct rasops_info *ri;
    278 	prop_dictionary_t dict;
    279 	long defattr;
    280 	int nsegs;
    281 	int error;
    282 
    283 	/* Set up DMA space for sending commands. */
    284 	error = bus_dmamap_create(virtio_dmat(vsc), VIOGPU_CMD_DMA_SIZE, 1,
    285 	    VIOGPU_CMD_DMA_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
    286 	    &sc->sc_dma_map);
    287 	if (error != 0) {
    288 		aprint_error_dev(sc->sc_dev, "bus_dmamap_create failed: %d\n",
    289 		    error);
    290 		goto err;
    291 	}
    292 	error = bus_dmamem_alloc(virtio_dmat(vsc), VIOGPU_CMD_DMA_SIZE, 16, 0,
    293 	    &sc->sc_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT);
    294 	if (error != 0) {
    295 		aprint_error_dev(sc->sc_dev, "bus_dmamem_alloc failed: %d\n",
    296 		    error);
    297 		goto destroy;
    298 	}
    299 	error = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_dma_seg, nsegs,
    300 	    VIOGPU_CMD_DMA_SIZE, &sc->sc_cmd, BUS_DMA_NOWAIT);
    301 	if (error != 0) {
    302 		aprint_error_dev(sc->sc_dev, "bus_dmamem_map failed: %d\n",
    303 		    error);
    304 		goto free;
    305 	}
    306 	memset(sc->sc_cmd, 0, VIOGPU_CMD_DMA_SIZE);
    307 	error = bus_dmamap_load(virtio_dmat(vsc), sc->sc_dma_map, sc->sc_cmd,
    308 	    VIOGPU_CMD_DMA_SIZE, NULL, BUS_DMA_NOWAIT);
    309 	if (error != 0) {
    310 		aprint_error_dev(sc->sc_dev, "bus_dmamap_load failed: %d\n",
    311 		    error);
    312 		goto unmap;
    313 	}
    314 
    315 	if (viogpu_get_display_info(sc) != 0)
    316 		goto unmap;
    317 
    318 	/* Set up DMA space for actual framebuffer. */
    319 	sc->sc_fb_dma_size = sc->sc_fb_width * sc->sc_fb_height * 4;
    320 	error = bus_dmamap_create(virtio_dmat(vsc), sc->sc_fb_dma_size, 1,
    321 	    sc->sc_fb_dma_size, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
    322 	    &sc->sc_fb_dma_map);
    323 	if (error != 0) {
    324 		aprint_error_dev(sc->sc_dev, "bus_dmamap_create failed: %d\n",
    325 		    error);
    326 		goto unmap;
    327 	}
    328 	error = bus_dmamem_alloc(virtio_dmat(vsc), sc->sc_fb_dma_size, 1024, 0,
    329 	    &sc->sc_fb_dma_seg, 1, &nsegs, BUS_DMA_NOWAIT);
    330 	if (error != 0) {
    331 		aprint_error_dev(sc->sc_dev, "bus_dmamem_alloc failed: %d\n",
    332 		    error);
    333 		goto fb_destroy;
    334 	}
    335 	error = bus_dmamem_map(virtio_dmat(vsc), &sc->sc_fb_dma_seg, nsegs,
    336 	    sc->sc_fb_dma_size, &sc->sc_fb_dma_kva, BUS_DMA_NOWAIT);
    337 	if (error != 0) {
    338 		aprint_error_dev(sc->sc_dev, "bus_dmamem_map failed: %d\n",
    339 		    error);
    340 		goto fb_free;
    341 	}
    342 	memset(sc->sc_fb_dma_kva, 0, sc->sc_fb_dma_size);
    343 	error = bus_dmamap_load(virtio_dmat(vsc), sc->sc_fb_dma_map,
    344 	    sc->sc_fb_dma_kva, sc->sc_fb_dma_size, NULL, BUS_DMA_NOWAIT);
    345 	if (error != 0) {
    346 		aprint_error_dev(sc->sc_dev, "bus_dmamap_load failed: %d\n",
    347 		    error);
    348 		goto fb_unmap;
    349 	}
    350 
    351 	if (viogpu_create_2d(sc, 1, sc->sc_fb_width, sc->sc_fb_height) != 0)
    352 		goto fb_unmap;
    353 
    354 	if (viogpu_attach_backing(sc, 1, sc->sc_fb_dma_map) != 0)
    355 		goto fb_unmap;
    356 
    357 	if (viogpu_set_scanout(sc, 0, 1, sc->sc_fb_width,
    358 	    sc->sc_fb_height) != 0)
    359 		goto fb_unmap;
    360 
    361 #ifdef WSDISPLAY_MULTICONS
    362 	sc->is_console = true;
    363 #else
    364 	sc->is_console = false;
    365 #endif
    366 	dict = device_properties(self);
    367 	prop_dictionary_get_bool(dict, "is_console", &sc->is_console);
    368 
    369 	sc->sc_wsd = (struct wsscreen_descr){
    370 		"std",
    371 		0, 0,
    372 		NULL,
    373 		8, 16,
    374 		WSSCREEN_WSCOLORS | WSSCREEN_HILIT,
    375 		NULL
    376 	};
    377 
    378 	sc->sc_scrlist[0] = &sc->sc_wsd;
    379 	sc->sc_wsl.nscreens = __arraycount(sc->sc_scrlist);
    380 	sc->sc_wsl.screens = sc->sc_scrlist;
    381 
    382 	vcons_init(&sc->sc_vd, sc, &sc->sc_wsd, &viogpu_accessops);
    383 	sc->sc_vd.init_screen = viogpu_init_screen;
    384 
    385 	vcons_init_screen(&sc->sc_vd, &sc->sc_vcs, 1, &defattr);
    386 	sc->sc_vcs.scr_flags |= VCONS_SCREEN_IS_STATIC;
    387 	ri = &sc->sc_vcs.scr_ri;
    388 
    389 	sc->sc_wsd.textops = &ri->ri_ops;
    390 	sc->sc_wsd.capabilities = ri->ri_caps;
    391 	sc->sc_wsd.nrows = ri->ri_rows;
    392 	sc->sc_wsd.ncols = ri->ri_cols;
    393 
    394 	if (sc->is_console) {
    395 		wsdisplay_cnattach(&sc->sc_wsd, ri, 0, 0, defattr);
    396 		vcons_replay_msgbuf(&sc->sc_vcs);
    397 	}
    398 
    399 	device_printf(sc->sc_dev, "%dx%d, %dbpp\n", ri->ri_width,
    400 	    ri->ri_height, ri->ri_depth);
    401 
    402 	waa.scrdata = &sc->sc_wsl;
    403 	waa.accessops = &viogpu_accessops;
    404 	waa.accesscookie = &sc->sc_vd;
    405 	waa.console = sc->is_console;
    406 
    407 	config_found(self, &waa, wsemuldisplaydevprint, CFARGS_NONE);
    408 
    409 	return;
    410 
    411 fb_unmap:
    412 	bus_dmamem_unmap(virtio_dmat(vsc), &sc->sc_fb_dma_kva,
    413 	    sc->sc_fb_dma_size);
    414 fb_free:
    415 	bus_dmamem_free(virtio_dmat(vsc), &sc->sc_fb_dma_seg, 1);
    416 fb_destroy:
    417 	bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_fb_dma_map);
    418 unmap:
    419 	bus_dmamem_unmap(virtio_dmat(vsc), &sc->sc_cmd, VIOGPU_CMD_DMA_SIZE);
    420 free:
    421 	bus_dmamem_free(virtio_dmat(vsc), &sc->sc_dma_seg, 1);
    422 destroy:
    423 	bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_dma_map);
    424 err:
    425 	aprint_error_dev(sc->sc_dev, "DMA setup failed\n");
    426 	virtio_free_vq(vsc, &sc->sc_vqs[VQCURS]);
    427 	virtio_free_vq(vsc, &sc->sc_vqs[VQCTRL]);
    428 	virtio_child_attach_failed(vsc);
    429 	cv_destroy(&sc->req_wait);
    430 	mutex_destroy(&sc->sc_mutex);
    431 	return;
    432 }
    433 
    434 /*
    435  * This carries out a command synchronously, unlike the commands used to
    436  * update the screen.
    437  */
    438 static int
    439 viogpu_cmd_sync(struct viogpu_softc *sc, void *cmd, size_t cmd_size,
    440 		void *ret, size_t ret_size)
    441 {
    442 	int error;
    443 
    444 	mutex_enter(&sc->sc_mutex);
    445 
    446 	while (sc->is_requesting == true)
    447 		cv_wait(&sc->req_wait, &sc->sc_mutex);
    448 
    449 	error = viogpu_cmd_req(sc, cmd, cmd_size, ret_size);
    450 	if (error != 0)
    451 		goto out;
    452 
    453 	while (sc->is_requesting == true)
    454 		cv_wait(&sc->req_wait, &sc->sc_mutex);
    455 
    456 	if (ret != NULL)
    457 		memcpy(ret, (char *)sc->sc_cmd + cmd_size, ret_size);
    458 
    459 out:
    460 	mutex_exit(&sc->sc_mutex);
    461 
    462 	return error;
    463 }
    464 
    465 static void
    466 viogpu_screen_update(void *arg)
    467 {
    468 	struct viogpu_softc *sc = arg;
    469 
    470 	mutex_enter(&sc->sc_mutex);
    471 
    472 	if (sc->is_requesting == false)
    473 		viogpu_transfer_to_host_2d(sc, 1, 0, 0, sc->sc_fb_width,
    474 		    sc->sc_fb_height);
    475 	else
    476 		sc->needs_update = true;
    477 
    478 	mutex_exit(&sc->sc_mutex);
    479 }
    480 
    481 static int
    482 viogpu_cmd_req(struct viogpu_softc *sc, void *cmd, size_t cmd_size,
    483 	       size_t ret_size)
    484 {
    485 	struct virtio_softc *vsc = sc->sc_virtio;
    486 	struct virtqueue *vq = &sc->sc_vqs[VQCTRL];
    487 	struct virtio_gpu_ctrl_hdr *hdr =
    488 	    (struct virtio_gpu_ctrl_hdr *)sc->sc_cmd;
    489 	int slot, error;
    490 
    491 	memcpy(sc->sc_cmd, cmd, cmd_size);
    492 	memset((char *)sc->sc_cmd + cmd_size, 0, ret_size);
    493 
    494 #if VIOGPU_DEBUG
    495 	printf("%s: [%zu -> %zu]: ", __func__, cmd_size, ret_size);
    496 	for (int i = 0; i < cmd_size; i++) {
    497 		printf(" %02x", ((unsigned char *)sc->sc_cmd)[i]);
    498 	}
    499 	printf("\n");
    500 #endif
    501 
    502 	hdr->flags |= virtio_rw32(vsc, VIRTIO_GPU_FLAG_FENCE);
    503 	hdr->fence_id = virtio_rw64(vsc, ++sc->sc_fence_id);
    504 
    505 	error = virtio_enqueue_prep(vsc, vq, &slot);
    506 	if (error != 0)
    507 		panic("%s: control vq busy", device_xname(sc->sc_dev));
    508 
    509 	error = virtio_enqueue_reserve(vsc, vq, slot,
    510 	    sc->sc_dma_map->dm_nsegs + 1);
    511 	if (error != 0)
    512 		panic("%s: control vq busy", device_xname(sc->sc_dev));
    513 
    514 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dma_map, 0, cmd_size,
    515 	    BUS_DMASYNC_PREWRITE);
    516 	virtio_enqueue_p(vsc, vq, slot, sc->sc_dma_map, 0, cmd_size, true);
    517 
    518 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dma_map, cmd_size, ret_size,
    519 	    BUS_DMASYNC_PREREAD);
    520 	virtio_enqueue_p(vsc, vq, slot, sc->sc_dma_map, cmd_size, ret_size,
    521 	    false);
    522 
    523 	virtio_enqueue_commit(vsc, vq, slot, true);
    524 
    525 	sc->cur_cmd_size = cmd_size;
    526 	sc->cur_ret_size = ret_size;
    527 	sc->is_requesting = true;
    528 
    529 	return 0;
    530 }
    531 
    532 static int
    533 viogpu_vq_done(struct virtqueue *vq)
    534 {
    535 	struct virtio_softc *vsc = vq->vq_owner;
    536 	struct viogpu_softc *sc = device_private(virtio_child(vsc));
    537 	struct virtio_gpu_ctrl_hdr *resp;
    538 	int slot, len;
    539 	uint32_t cmd_type, resp_type;
    540 	uint64_t resp_fence, expect_fence;
    541 	bool next_req_sent = false;
    542 
    543 	mutex_enter(&sc->sc_mutex);
    544 
    545 	while (virtio_dequeue(vsc, vq, &slot, &len) != 0)
    546 		;
    547 
    548 	virtio_dequeue_commit(vsc, vq, slot);
    549 
    550 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dma_map, 0, sc->cur_cmd_size,
    551 	    BUS_DMASYNC_POSTWRITE);
    552 	bus_dmamap_sync(virtio_dmat(vsc), sc->sc_dma_map, sc->cur_cmd_size,
    553 	    sc->cur_ret_size, BUS_DMASYNC_POSTREAD);
    554 
    555 	resp = (struct virtio_gpu_ctrl_hdr *)((char *)sc->sc_cmd +
    556 	    sc->cur_cmd_size);
    557 
    558 	cmd_type = virtio_rw32(vsc,
    559 	    ((struct virtio_gpu_ctrl_hdr *)sc->sc_cmd)->type);
    560 	resp_type = virtio_rw32(vsc, resp->type);
    561 	resp_fence = virtio_rw64(vsc, resp->fence_id);
    562 	expect_fence = sc->sc_fence_id;
    563 
    564 	switch (cmd_type) {
    565 	case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
    566 		/* The second command for screen updating must be issued. */
    567 		if (resp_type == VIRTIO_GPU_RESP_OK_NODATA) {
    568 			viogpu_flush_resource(sc, 1, 0, 0, sc->sc_fb_width,
    569 			    sc->sc_fb_height);
    570 			next_req_sent = true;
    571 		}
    572 		break;
    573 	case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
    574 		if (sc->needs_update == true) {
    575 			viogpu_transfer_to_host_2d(sc, 1, 0, 0,
    576 			    sc->sc_fb_width, sc->sc_fb_height);
    577 			sc->needs_update = false;
    578 			next_req_sent = true;
    579 		}
    580 		break;
    581 	default:
    582 		/* Other command types are called synchronously. */
    583 		break;
    584 	}
    585 
    586 	if (next_req_sent == false) {
    587 		sc->is_requesting = false;
    588 		cv_broadcast(&sc->req_wait);
    589 	}
    590 
    591 	mutex_exit(&sc->sc_mutex);
    592 
    593 	if (resp_type != VIRTIO_GPU_RESP_OK_NODATA) {
    594 		switch (cmd_type) {
    595 		case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
    596 			device_printf(sc->sc_dev,
    597 			    "failed TRANSFER_TO_HOST: %d\n", resp_type);
    598 			break;
    599 		case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
    600 			device_printf(sc->sc_dev,
    601 			    "failed RESOURCE_FLUSH: %d\n", resp_type);
    602 			break;
    603 		default:
    604 			break;
    605 		}
    606 	}
    607 
    608 	if (resp_fence != expect_fence)
    609 		printf("%s: return fence id not right (0x%" PRIx64 " != 0x%"
    610 		    PRIx64 ")\n", __func__, resp_fence, expect_fence);
    611 
    612 	return 0;
    613 }
    614 
    615 static int
    616 viogpu_get_display_info(struct viogpu_softc *sc)
    617 {
    618 	struct virtio_softc *vsc = sc->sc_virtio;
    619 	struct virtio_gpu_ctrl_hdr hdr = { 0 };
    620 	struct virtio_gpu_resp_display_info info = { 0 };
    621 
    622 	hdr.type = virtio_rw32(vsc, VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
    623 
    624 	viogpu_cmd_sync(sc, &hdr, sizeof(hdr), &info, sizeof(info));
    625 
    626 	if (virtio_rw32(vsc, info.hdr.type) !=
    627 	    VIRTIO_GPU_RESP_OK_DISPLAY_INFO) {
    628 		device_printf(sc->sc_dev, "failed getting display info\n");
    629 		return 1;
    630 	}
    631 
    632 	if (!info.pmodes[0].enabled) {
    633 		device_printf(sc->sc_dev, "pmodes[0] is not enabled\n");
    634 		return 1;
    635 	}
    636 
    637 	sc->sc_fb_width = virtio_rw32(vsc, info.pmodes[0].r.width);
    638 	sc->sc_fb_height = virtio_rw32(vsc, info.pmodes[0].r.height);
    639 
    640 	return 0;
    641 }
    642 
    643 static int
    644 viogpu_create_2d(struct viogpu_softc *sc, uint32_t resource_id, uint32_t width,
    645 		 uint32_t height)
    646 {
    647 	struct virtio_softc *vsc = sc->sc_virtio;
    648 	struct virtio_gpu_resource_create_2d res = { 0 };
    649 	struct virtio_gpu_ctrl_hdr resp = { 0 };
    650 
    651 	res.hdr.type = virtio_rw32(vsc, VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
    652 	res.resource_id = virtio_rw32(vsc, resource_id);
    653 	res.format = virtio_rw32(vsc, VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM);
    654 	res.width = virtio_rw32(vsc, width);
    655 	res.height = virtio_rw32(vsc, height);
    656 
    657 	viogpu_cmd_sync(sc, &res, sizeof(res), &resp, sizeof(resp));
    658 
    659 	if (virtio_rw32(vsc, resp.type) != VIRTIO_GPU_RESP_OK_NODATA) {
    660 		device_printf(sc->sc_dev, "failed CREATE_2D: %d\n",
    661 		    virtio_rw32(vsc, resp.type));
    662 		return 1;
    663 	}
    664 
    665 	return 0;
    666 }
    667 
    668 static int
    669 viogpu_set_scanout(struct viogpu_softc *sc, uint32_t scanout_id,
    670 		   uint32_t resource_id, uint32_t width, uint32_t height)
    671 {
    672 	struct virtio_softc *vsc = sc->sc_virtio;
    673 	struct virtio_gpu_set_scanout ss = { 0 };
    674 	struct virtio_gpu_ctrl_hdr resp = { 0 };
    675 
    676 	ss.hdr.type = virtio_rw32(vsc, VIRTIO_GPU_CMD_SET_SCANOUT);
    677 	ss.scanout_id = virtio_rw32(vsc, scanout_id);
    678 	ss.resource_id = virtio_rw32(vsc, resource_id);
    679 	ss.r.width = virtio_rw32(vsc, width);
    680 	ss.r.height = virtio_rw32(vsc, height);
    681 
    682 	viogpu_cmd_sync(sc, &ss, sizeof(ss), &resp, sizeof(resp));
    683 
    684 	if (virtio_rw32(vsc, resp.type) != VIRTIO_GPU_RESP_OK_NODATA) {
    685 		device_printf(sc->sc_dev, "failed SET_SCANOUT: %d\n",
    686 		    virtio_rw32(vsc, resp.type));
    687 		return 1;
    688 	}
    689 
    690 	return 0;
    691 }
    692 
    693 static int
    694 viogpu_attach_backing(struct viogpu_softc *sc, uint32_t resource_id,
    695 		      bus_dmamap_t dmamap)
    696 {
    697 	struct virtio_softc *vsc = sc->sc_virtio;
    698 	struct virtio_gpu_resource_attach_backing_entries backing = { 0 };
    699 	struct virtio_gpu_ctrl_hdr resp = { 0 };
    700 
    701 	backing.hdr.type = virtio_rw32(vsc,
    702 	    VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
    703 	backing.resource_id = virtio_rw32(vsc, resource_id);
    704 	backing.nr_entries = virtio_rw32(vsc, __arraycount(backing.entries));
    705 	backing.entries[0].addr = virtio_rw64(vsc, dmamap->dm_segs[0].ds_addr);
    706 	backing.entries[0].length = virtio_rw32(vsc,
    707 	    dmamap->dm_segs[0].ds_len);
    708 
    709 	if (dmamap->dm_nsegs > 1)
    710 		printf("%s: TODO: send all %d segs\n", __func__,
    711 		    dmamap->dm_nsegs);
    712 
    713 #if VIOGPU_DEBUG
    714 	printf("%s: backing addr 0x%" PRIx64 " length %d\n", __func__,
    715 	    backing.entries[0].addr, backing.entries[0].length);
    716 #endif
    717 
    718 	viogpu_cmd_sync(sc, &backing, sizeof(backing), &resp, sizeof(resp));
    719 
    720 	if (virtio_rw32(vsc, resp.type) != VIRTIO_GPU_RESP_OK_NODATA) {
    721 		device_printf(sc->sc_dev, "failed ATTACH_BACKING: %d\n",
    722 		    virtio_rw32(vsc, resp.type));
    723 		return 1;
    724 	}
    725 
    726 	return 0;
    727 }
    728 
    729 static int
    730 viogpu_transfer_to_host_2d(struct viogpu_softc *sc, uint32_t resource_id,
    731 			   uint32_t x, uint32_t y, uint32_t width,
    732 			   uint32_t height)
    733 {
    734 	struct virtio_softc *vsc = sc->sc_virtio;
    735 	struct virtio_gpu_transfer_to_host_2d tth = { 0 };
    736 
    737 	tth.hdr.type = virtio_rw32(vsc, VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
    738 	tth.resource_id = virtio_rw32(vsc, resource_id);
    739 	tth.r.x = virtio_rw32(vsc, x);
    740 	tth.r.y = virtio_rw32(vsc, y);
    741 	tth.r.width = virtio_rw32(vsc, width);
    742 	tth.r.height = virtio_rw32(vsc, height);
    743 	tth.offset = virtio_rw64(vsc, (y * sc->sc_fb_width + x) *
    744 	    4 /* bpp / 8 */);
    745 
    746 	viogpu_cmd_req(sc, &tth, sizeof(tth),
    747 	    sizeof(struct virtio_gpu_ctrl_hdr));
    748 
    749 	return 0;
    750 }
    751 
    752 static int
    753 viogpu_flush_resource(struct viogpu_softc *sc, uint32_t resource_id,
    754 		      uint32_t x, uint32_t y, uint32_t width, uint32_t height)
    755 {
    756 	struct virtio_softc *vsc = sc->sc_virtio;
    757 	struct virtio_gpu_resource_flush flush = { 0 };
    758 
    759 	flush.hdr.type = virtio_rw32(vsc, VIRTIO_GPU_CMD_RESOURCE_FLUSH);
    760 	flush.resource_id = virtio_rw32(vsc, resource_id);
    761 	flush.r.x = virtio_rw32(vsc, x);
    762 	flush.r.y = virtio_rw32(vsc, y);
    763 	flush.r.width = virtio_rw32(vsc, width);
    764 	flush.r.height = virtio_rw32(vsc, height);
    765 
    766 	viogpu_cmd_req(sc, &flush, sizeof(flush),
    767 	    sizeof(struct virtio_gpu_ctrl_hdr));
    768 
    769 	return 0;
    770 }
    771 
    772 static int
    773 viogpu_wsioctl(void *v, void *vs, u_long cmd, void *data, int flag,
    774 	       struct lwp *l)
    775 {
    776 	struct rasops_info *ri = v;
    777 	struct wsdisplayio_fbinfo *fbi;
    778 	struct wsdisplay_fbinfo *wdf;
    779 
    780 	switch (cmd) {
    781 	case WSDISPLAYIO_GTYPE:
    782 		*(u_int *)data = WSDISPLAY_TYPE_VIOGPU;
    783 		return 0;
    784 	case WSDISPLAYIO_GET_FBINFO:
    785 		fbi = (struct wsdisplayio_fbinfo *)data;
    786 		return wsdisplayio_get_fbinfo(ri, fbi);
    787 	case WSDISPLAYIO_GINFO:
    788 		wdf = (struct wsdisplay_fbinfo *)data;
    789 		wdf->height = ri->ri_height;
    790 		wdf->width = ri->ri_width;
    791 		wdf->depth = ri->ri_depth;
    792 		wdf->cmsize = 0;
    793 		return 0;
    794 	case WSDISPLAYIO_LINEBYTES:
    795 		*(u_int *)data = ri->ri_stride;
    796 		return 0;
    797 	case WSDISPLAYIO_SMODE:
    798 		return 0;
    799 	case WSDISPLAYIO_GVIDEO:
    800 	case WSDISPLAYIO_SVIDEO:
    801 		return 0;
    802 	}
    803 
    804 	return EPASSTHROUGH;
    805 }
    806 
    807 static void
    808 viogpu_init_screen(void *cookie, struct vcons_screen *scr, int existing,
    809 		   long *defattr)
    810 {
    811 	struct viogpu_softc *sc = cookie;
    812 	struct rasops_info *ri = &scr->scr_ri;
    813 
    814 	ri->ri_bits = sc->sc_fb_dma_kva;
    815 	ri->ri_flg = RI_CENTER | RI_CLEAR;
    816 #if BYTE_ORDER == BIG_ENDIAN
    817 	ri->ri_flg |= RI_BSWAP;
    818 #endif
    819 	ri->ri_depth = 32;
    820 	ri->ri_width = sc->sc_fb_width;
    821 	ri->ri_height = sc->sc_fb_height;
    822 	ri->ri_stride = ri->ri_width * ri->ri_depth / 8;
    823 	ri->ri_bpos = 0;	/* B8G8R8X8 */
    824 	ri->ri_bnum = 8;
    825 	ri->ri_gpos = 8;
    826 	ri->ri_gnum = 8;
    827 	ri->ri_rpos = 16;
    828 	ri->ri_rnum = 8;
    829 	rasops_init(ri, 0, 0);
    830 	ri->ri_caps = WSSCREEN_WSCOLORS | WSSCREEN_HILIT;
    831 	rasops_reconfig(ri, sc->sc_fb_height / ri->ri_font->fontheight,
    832 	    sc->sc_fb_width / ri->ri_font->fontwidth);
    833 
    834 	/*
    835 	 * Replace select text operations with wrappers that update the screen
    836 	 * after the operation.
    837 	 */
    838 	sc->ri_cursor = ri->ri_ops.cursor;
    839 	sc->ri_putchar = ri->ri_ops.putchar;
    840 	sc->ri_copycols = ri->ri_ops.copycols;
    841 	sc->ri_erasecols = ri->ri_ops.erasecols;
    842 	sc->ri_copyrows = ri->ri_ops.copyrows;
    843 	sc->ri_eraserows = ri->ri_ops.eraserows;
    844 	sc->ri_replaceattr = ri->ri_ops.replaceattr;
    845 	ri->ri_ops.cursor = ri->ri_ops.cursor == NULL ? NULL : viogpu_cursor;
    846 	ri->ri_ops.putchar = ri->ri_ops.putchar == NULL ? NULL :
    847 	    viogpu_putchar;
    848 	ri->ri_ops.copycols = ri->ri_ops.copycols == NULL ? NULL :
    849 	    viogpu_copycols;
    850 	ri->ri_ops.erasecols = ri->ri_ops.erasecols == NULL ? NULL :
    851 	    viogpu_erasecols;
    852 	ri->ri_ops.copyrows = ri->ri_ops.copyrows == NULL ? NULL :
    853 	    viogpu_copyrows;
    854 	ri->ri_ops.eraserows = ri->ri_ops.eraserows == NULL ? NULL :
    855 	    viogpu_eraserows;
    856 	ri->ri_ops.replaceattr = ri->ri_ops.replaceattr == NULL ? NULL :
    857 	    viogpu_replaceattr;
    858 }
    859 
    860 static void
    861 viogpu_cursor(void *c, int on, int row, int col)
    862 {
    863 	struct rasops_info *ri = c;
    864 	struct vcons_screen *vscr = ri->ri_hw;
    865 	struct viogpu_softc *sc = vscr->scr_vd->cookie;
    866 
    867 	sc->ri_cursor(c, on, row, col);
    868 
    869 	softint_schedule(sc->update_soft_ih);
    870 }
    871 
    872 static void
    873 viogpu_putchar(void *c, int row, int col, u_int uc, long attr)
    874 {
    875 	struct rasops_info *ri = c;
    876 	struct vcons_screen *vscr = ri->ri_hw;
    877 	struct viogpu_softc *sc = vscr->scr_vd->cookie;
    878 
    879 	sc->ri_putchar(c, row, col, uc, attr);
    880 
    881 	softint_schedule(sc->update_soft_ih);
    882 }
    883 
    884 static void
    885 viogpu_copycols(void *c, int row, int srccol, int dstcol, int ncols)
    886 {
    887 	struct rasops_info *ri = c;
    888 	struct vcons_screen *vscr = ri->ri_hw;
    889 	struct viogpu_softc *sc = vscr->scr_vd->cookie;
    890 
    891 	sc->ri_copycols(c, row, srccol, dstcol, ncols);
    892 
    893 	softint_schedule(sc->update_soft_ih);
    894 }
    895 
    896 static void
    897 viogpu_erasecols(void *c, int row, int startcol, int ncols, long attr)
    898 {
    899 	struct rasops_info *ri = c;
    900 	struct vcons_screen *vscr = ri->ri_hw;
    901 	struct viogpu_softc *sc = vscr->scr_vd->cookie;
    902 
    903 	sc->ri_erasecols(c, row, startcol, ncols, attr);
    904 
    905 	softint_schedule(sc->update_soft_ih);
    906 }
    907 
    908 static void
    909 viogpu_copyrows(void *c, int srcrow, int dstrow, int nrows)
    910 {
    911 	struct rasops_info *ri = c;
    912 	struct vcons_screen *vscr = ri->ri_hw;
    913 	struct viogpu_softc *sc = vscr->scr_vd->cookie;
    914 
    915 	sc->ri_copyrows(c, srcrow, dstrow, nrows);
    916 
    917 	softint_schedule(sc->update_soft_ih);
    918 }
    919 
    920 static void
    921 viogpu_eraserows(void *c, int row, int nrows, long attr)
    922 {
    923 	struct rasops_info *ri = c;
    924 	struct vcons_screen *vscr = ri->ri_hw;
    925 	struct viogpu_softc *sc = vscr->scr_vd->cookie;
    926 
    927 	sc->ri_eraserows(c, row, nrows, attr);
    928 
    929 	softint_schedule(sc->update_soft_ih);
    930 }
    931 
    932 static void
    933 viogpu_replaceattr(void *c, long oldattr, long newattr)
    934 {
    935 	struct rasops_info *ri = c;
    936 	struct vcons_screen *vscr = ri->ri_hw;
    937 	struct viogpu_softc *sc = vscr->scr_vd->cookie;
    938 
    939 	sc->ri_replaceattr(c, oldattr, newattr);
    940 
    941 	softint_schedule(sc->update_soft_ih);
    942 }
    943