/src/sys/dev/pci/ |
viornd.c | 88 struct virtqueue *vq = &sc->sc_vq; local in function:viornd_get 103 if (virtio_enqueue_prep(vsc, vq, &slot)) { 106 if (virtio_enqueue_reserve(vsc, vq, slot, 1)) { 109 virtio_enqueue(vsc, vq, slot, sc->sc_dmamap, 0); 110 virtio_enqueue_commit(vsc, vq, slot, 1); 221 viornd_vq_done(struct virtqueue *vq) 223 struct virtio_softc *vsc = vq->vq_owner; 229 if (virtio_dequeue(vsc, vq, &slot, &len) != 0) { 252 virtio_dequeue_commit(vsc, vq, slot);
|
viomb.c | 279 struct virtqueue *vq = &sc->sc_vq[VQ_INFLATE]; local in function:inflate 305 if (virtio_enqueue_prep(vsc, vq, &slot) != 0) { 311 if (virtio_enqueue_reserve(vsc, vq, slot, 1)) { 319 virtio_enqueue(vsc, vq, slot, b->bl_dmamap, true); 320 virtio_enqueue_commit(vsc, vq, slot, true); 327 inflateq_done(struct virtqueue *vq) 329 struct virtio_softc *vsc = vq->vq_owner; 344 struct virtqueue *vq = &sc->sc_vq[VQ_INFLATE]; local in function:inflate_done 350 r = virtio_dequeue(vsc, vq, &slot, NULL); 356 virtio_dequeue_commit(vsc, vq, slot) 390 struct virtqueue *vq = &sc->sc_vq[VQ_DEFLATE]; local in function:deflate 461 struct virtqueue *vq = &sc->sc_vq[VQ_DEFLATE]; local in function:deflate_done [all...] |
vioscsi.c | 274 struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST]; local in function:vioscsi_scsipi_request 381 virtio_enqueue_abort(vsc, vq, slot); 390 error = virtio_enqueue_reserve(vsc, vq, slot, nsegs); 413 virtio_enqueue_p(vsc, vq, slot, vr->vr_control, 417 virtio_enqueue(vsc, vq, slot, vr->vr_data, 1); 418 virtio_enqueue_p(vsc, vq, slot, vr->vr_control, 422 virtio_enqueue(vsc, vq, slot, vr->vr_data, 0); 424 virtio_enqueue_commit(vsc, vq, slot, 1); 452 struct vioscsi_req *vr, struct virtqueue *vq, int slot) 497 virtio_dequeue_commit(vsc, vq, slot) 556 struct virtqueue *vq = &sc->sc_vqs[VIOSCSI_VQ_REQUEST]; local in function:vioscsi_req_get [all...] |
vio9p.c | 221 struct virtqueue *vq = &sc->sc_vq[0]; local in function:vio9p_read 263 error = virtio_dequeue(vsc, vq, &slot, &len); 276 virtio_dequeue_commit(vsc, vq, slot); 317 struct virtqueue *vq = &sc->sc_vq[0]; local in function:vio9p_write 356 error = virtio_enqueue_prep(vsc, vq, &slot); 363 error = virtio_enqueue_reserve(vsc, vq, slot, 374 virtio_enqueue(vsc, vq, slot, sc->sc_dmamap_tx, true); 378 virtio_enqueue(vsc, vq, slot, sc->sc_dmamap_rx, false); 379 virtio_enqueue_commit(vsc, vq, slot, true); 617 vio9p_request_done(struct virtqueue *vq) [all...] |
viogpu.c | 78 static int viogpu_vq_done(struct virtqueue *vq); 486 struct virtqueue *vq = &sc->sc_vqs[VQCTRL]; local in function:viogpu_cmd_req 505 error = virtio_enqueue_prep(vsc, vq, &slot); 507 panic("%s: control vq busy", device_xname(sc->sc_dev)); 509 error = virtio_enqueue_reserve(vsc, vq, slot, 512 panic("%s: control vq busy", device_xname(sc->sc_dev)); 516 virtio_enqueue_p(vsc, vq, slot, sc->sc_dma_map, 0, cmd_size, true); 520 virtio_enqueue_p(vsc, vq, slot, sc->sc_dma_map, cmd_size, ret_size, 523 virtio_enqueue_commit(vsc, vq, slot, true); 533 viogpu_vq_done(struct virtqueue *vq) [all...] |
virtio_pci.c | 767 struct virtqueue *vq = &sc->sc_vqs[idx]; local in function:virtio_pci_setup_queue_10 770 KASSERT(vq->vq_index == idx); 772 bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index); 785 VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset); 787 VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset); 790 vq->vq_notify_off = bus_space_read_2(iot, ioh, 974 struct virtqueue *vq; local in function:virtio_pci_establish_msix_interrupts 999 vq = &sc->sc_vqs[qid]; 1001 snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d", 1011 vq->vq_intrhand, vq->vq_intrhand_arg, intr_xname) [all...] |
ld_virtio.c | 476 struct virtqueue *vq = &sc->sc_vq; local in function:ld_virtio_info 495 ld_virtio_vq_done(vq); 504 r = virtio_enqueue_prep(vsc, vq, &slot); 517 virtio_enqueue_abort(vsc, vq, slot); 523 r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs + 546 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, 549 virtio_enqueue(vsc, vq, slot, vr->vr_payload, false); 550 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, 554 virtio_enqueue_commit(vsc, vq, slot, true); 561 ld_virtio_vq_done(vq); 601 struct virtqueue *vq = &sc->sc_vq; local in function:ld_virtio_start 760 struct virtqueue *vq = &sc->sc_vq; local in function:ld_virtio_dump 896 struct virtqueue *vq = &sc->sc_vq; local in function:ld_virtio_flush 1039 struct virtqueue * const vq = &sc->sc_vq; local in function:ld_virtio_discard [all...] |
virtio.c | 117 struct virtqueue *vq = &sc->sc_vqs[i]; local in function:virtio_reinit_start 118 n = sc->sc_ops->read_queue_size(sc, vq->vq_index); 119 if (n == 0) /* vq disappeared */ 121 if (n != vq->vq_num) { 122 panic("%s: virtqueue size changed, vq index %d\n", 124 vq->vq_index); 126 virtio_reset_vq(sc, vq); 127 sc->sc_ops->setup_queue(sc, vq->vq_index, 128 vq->vq_dmamap->dm_segs[0].ds_addr); 462 /* set to vq->vq_intrhand in virtio_init_vq_vqdone() * 466 struct virtqueue *vq = xvq; local in function:virtio_vq_done 474 struct virtqueue *vq; local in function:virtio_vq_intr [all...] |
if_vioif.c | 440 vioif_notify(struct virtio_softc *vsc, struct virtqueue *vq) 443 virtio_enqueue_commit(vsc, vq, -1, true); 918 /* no ctrl vq; always promisc and allmulti */ 1141 * ctrlq_cmd: command to be sent via ctrl vq (WRITE) 1142 * ctrlq_status: return value for a command via ctrl vq (READ) 1315 /* control vq class & command */ 1328 /* control vq rx mode command parameter */ 1342 /* control vq MAC filter table for unicast */ 1351 /* control vq MAC filter table for multicast */ 1359 /* control vq MAC address set command * 1460 struct virtqueue *vq; local in function:vioif_netqueue_init 1746 struct virtqueue *vq = netq->netq_vq; local in function:vioif_populate_rx_mbufs_locked 1813 struct virtqueue *vq = netq->netq_vq; local in function:vioif_rx_deq_locked 1891 struct virtqueue *vq = netq->netq_vq; local in function:vioif_rx_handle_locked 1924 struct virtqueue *vq = netq->netq_vq; local in function:vioif_rx_intr 1953 struct virtqueue *vq = netq->netq_vq; local in function:vioif_rx_handle 1984 struct virtqueue *vq = netq->netq_vq; local in function:vioif_send_common_locked 2077 struct virtqueue *vq = netq->netq_vq; local in function:vioif_tx_deq_locked 2189 struct virtqueue *vq = netq->netq_vq; local in function:vioif_tx_handle_locked 2234 struct virtqueue *vq = netq->netq_vq; local in function:vioif_tx_intr 2264 struct virtqueue *vq = netq->netq_vq; local in function:vioif_tx_handle 2364 struct virtqueue *vq = ctrlq->ctrlq_vq; local in function:vioif_ctrl_send_command 2429 struct virtqueue *vq = ctrlq->ctrlq_vq; local in function:vioif_ctrl_intr [all...] |
/src/sys/dev/virtio/ |
virtio_mmio.c | 211 struct virtqueue *vq; local in function:virtio_mmio_v2_setup_queue 220 vq = &vsc->sc_vqs[idx]; 221 KASSERT(vq->vq_index == idx); 228 addr + vq->vq_availoffset); 230 addr + vq->vq_usedoffset);
|
viocon.c | 44 #define virtio_notify(vsc, vq) virtio_enqueue_commit(vsc, vq, -1, true) 140 int viocon_tx_drain(struct viocon_port *, struct virtqueue *vq); 328 viocon_tx_drain(struct viocon_port *vp, struct virtqueue *vq) 330 struct virtio_softc *vsc = vq->vq_owner; 334 while (virtio_dequeue(vsc, vq, &slot, &len) == 0) { 338 virtio_dequeue_commit(vsc, vq, slot); 345 viocon_tx_intr(struct virtqueue *vq) 347 struct virtio_softc *vsc = vq->vq_owner; 350 int portidx = (vq->vq_index - 1) / 2 367 struct virtqueue *vq = vp->vp_rx; local in function:viocon_rx_fill 402 struct virtqueue *vq = vp->vp_rx; local in function:viocon_rx_soft 428 struct virtqueue *vq; local in function:vioconstart [all...] |
/src/bin/ksh/ |
c_ksh.c | 1309 struct tbl *vq, *voptarg; local in function:c_getopts 1388 vq = global(var); 1390 if (!setstr(vq, buf, KSH_RETURN_ERROR))
|
expr.c | 592 struct tbl *vq; local in function:intvar 599 vq = tempvar(); 600 if (setint_v(vq, vp) == NULL) { 605 v_evaluate(vq, str_val(vp), KSH_UNWIND_ERROR); 609 return vq;
|
var.c | 72 struct tbl *vp, **vpp = l->vars.tbls, *vq; 78 if ((vq = global(vp->name))->flag & ISSET) 79 setspec(vq); 81 unsetspec(vq); 278 struct tbl *vq = (struct tbl *) 0; local in function:local 280 while ((ll = ll->next) && !(vq = mytsearch(&ll->vars, n, h))) 282 if (vq) { 283 vp->flag |= vq->flag & (EXPORT|INTEGER|RDONLY 286 if (vq->flag & INTEGER) 287 vp->type = vq->type 1238 struct tbl *vp, *vq; local in function:set_array [all...] |
/src/sys/external/bsd/drm2/dist/drm/virtio/ |
virtgpu_drv.h | 147 struct virtqueue *vq; member in struct:virtio_gpu_queue 330 void virtio_gpu_ctrl_ack(struct virtqueue *vq); 331 void virtio_gpu_cursor_ack(struct virtqueue *vq); 332 void virtio_gpu_fence_ack(struct virtqueue *vq);
|
virtgpu_vq.c | 59 void virtio_gpu_ctrl_ack(struct virtqueue *vq) 61 struct drm_device *dev = vq->vdev->priv; 67 void virtio_gpu_cursor_ack(struct virtqueue *vq) 69 struct drm_device *dev = vq->vdev->priv; 178 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list) 184 while ((vbuf = virtqueue_get_buf(vq, &len))) { 205 virtqueue_disable_cb(vgdev->ctrlq.vq); 206 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list); 208 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); 214 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp) 321 struct virtqueue *vq = vgdev->ctrlq.vq; local in function:virtio_gpu_queue_ctrl_buffer_locked 366 struct virtqueue *vq = vgdev->ctrlq.vq; local in function:virtio_gpu_queue_fenced_ctrl_buffer 449 struct virtqueue *vq = vgdev->cursorq.vq; local in function:virtio_gpu_queue_cursor [all...] |
/src/sys/fs/nfs/client/ |
nfs_clvfsops.c | 1792 struct vfsquery vq; local in function:nfs_sysctl 1795 bzero(&vq, sizeof(vq)); 1819 vq.vq_flags |= VQ_NOTRESP; 1824 vq.vq_flags |= VQ_NOTRESPLOCK; 1826 error = SYSCTL_OUT(req, &vq, sizeof(vq));
|
/src/sys/miscfs/specfs/ |
spec_vnops.c | 497 vnode_t *vp, *vq; local in function:spec_node_lookup_by_mount 500 for (i = 0, vq = NULL; i < SPECHSZ && vq == NULL; i++) { 504 vq = vp->v_specnode->sn_dev->sd_bdevvp; 505 if (vq != NULL && 506 vq->v_specnode->sn_dev->sd_mountpoint == mp) 508 vq = NULL; 511 if (vq == NULL) { 515 mutex_enter(vq->v_interlock); 517 error = vcache_vget(vq); [all...] |
/src/sys/kern/ |
vfs_mount.c | 1450 vnode_t *vq; local in function:vfs_mountedon 1457 if (spec_node_lookup_by_dev(vp->v_type, vp->v_rdev, VDEAD_NOWAIT, &vq) 1459 if (spec_node_getmountedfs(vq) != NULL) 1461 vrele(vq);
|
vfs_vnode.c | 1266 vnode_t *vq; local in function:vrevoke 1287 while (spec_node_lookup_by_dev(type, dev, VDEAD_NOWAIT, &vq) 1289 mp = vrevoke_suspend_next(mp, vq->v_mount); 1290 vgone(vq);
|