1 1.1 riastrad /* $NetBSD: vbva_base.c,v 1.2 2021/12/18 23:45:44 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad // SPDX-License-Identifier: MIT 4 1.1 riastrad /* Copyright (C) 2006-2017 Oracle Corporation */ 5 1.1 riastrad 6 1.1 riastrad #include <sys/cdefs.h> 7 1.1 riastrad __KERNEL_RCSID(0, "$NetBSD: vbva_base.c,v 1.2 2021/12/18 23:45:44 riastradh Exp $"); 8 1.1 riastrad 9 1.1 riastrad #include <linux/vbox_err.h> 10 1.1 riastrad #include "vbox_drv.h" 11 1.1 riastrad #include "vboxvideo_guest.h" 12 1.1 riastrad #include "hgsmi_channels.h" 13 1.1 riastrad 14 1.1 riastrad /* 15 1.1 riastrad * There is a hardware ring buffer in the graphics device video RAM, formerly 16 1.1 riastrad * in the VBox VMMDev PCI memory space. 17 1.1 riastrad * All graphics commands go there serialized by vbva_buffer_begin_update. 18 1.1 riastrad * and vbva_buffer_end_update. 19 1.1 riastrad * 20 1.1 riastrad * free_offset is writing position. data_offset is reading position. 21 1.1 riastrad * free_offset == data_offset means buffer is empty. 22 1.1 riastrad * There must be always gap between data_offset and free_offset when data 23 1.1 riastrad * are in the buffer. 24 1.1 riastrad * Guest only changes free_offset, host changes data_offset. 25 1.1 riastrad */ 26 1.1 riastrad 27 1.1 riastrad static u32 vbva_buffer_available(const struct vbva_buffer *vbva) 28 1.1 riastrad { 29 1.1 riastrad s32 diff = vbva->data_offset - vbva->free_offset; 30 1.1 riastrad 31 1.1 riastrad return diff > 0 ? diff : vbva->data_len + diff; 32 1.1 riastrad } 33 1.1 riastrad 34 1.1 riastrad static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx, 35 1.1 riastrad const void *p, u32 len, u32 offset) 36 1.1 riastrad { 37 1.1 riastrad struct vbva_buffer *vbva = vbva_ctx->vbva; 38 1.1 riastrad u32 bytes_till_boundary = vbva->data_len - offset; 39 1.1 riastrad u8 *dst = &vbva->data[offset]; 40 1.1 riastrad s32 diff = len - bytes_till_boundary; 41 1.1 riastrad 42 1.1 riastrad if (diff <= 0) { 43 1.1 riastrad /* Chunk will not cross buffer boundary. */ 44 1.1 riastrad memcpy(dst, p, len); 45 1.1 riastrad } else { 46 1.1 riastrad /* Chunk crosses buffer boundary. */ 47 1.1 riastrad memcpy(dst, p, bytes_till_boundary); 48 1.1 riastrad memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff); 49 1.1 riastrad } 50 1.1 riastrad } 51 1.1 riastrad 52 1.1 riastrad static void vbva_buffer_flush(struct gen_pool *ctx) 53 1.1 riastrad { 54 1.1 riastrad struct vbva_flush *p; 55 1.1 riastrad 56 1.1 riastrad p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH); 57 1.1 riastrad if (!p) 58 1.1 riastrad return; 59 1.1 riastrad 60 1.1 riastrad p->reserved = 0; 61 1.1 riastrad 62 1.1 riastrad hgsmi_buffer_submit(ctx, p); 63 1.1 riastrad hgsmi_buffer_free(ctx, p); 64 1.1 riastrad } 65 1.1 riastrad 66 1.1 riastrad bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, 67 1.1 riastrad const void *p, u32 len) 68 1.1 riastrad { 69 1.1 riastrad struct vbva_record *record; 70 1.1 riastrad struct vbva_buffer *vbva; 71 1.1 riastrad u32 available; 72 1.1 riastrad 73 1.1 riastrad vbva = vbva_ctx->vbva; 74 1.1 riastrad record = vbva_ctx->record; 75 1.1 riastrad 76 1.1 riastrad if (!vbva || vbva_ctx->buffer_overflow || 77 1.1 riastrad !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL)) 78 1.1 riastrad return false; 79 1.1 riastrad 80 1.1 riastrad available = vbva_buffer_available(vbva); 81 1.1 riastrad 82 1.1 riastrad while (len > 0) { 83 1.1 riastrad u32 chunk = len; 84 1.1 riastrad 85 1.1 riastrad if (chunk >= available) { 86 1.1 riastrad vbva_buffer_flush(ctx); 87 1.1 riastrad available = vbva_buffer_available(vbva); 88 1.1 riastrad } 89 1.1 riastrad 90 1.1 riastrad if (chunk >= available) { 91 1.1 riastrad if (WARN_ON(available <= vbva->partial_write_tresh)) { 92 1.1 riastrad vbva_ctx->buffer_overflow = true; 93 1.1 riastrad return false; 94 1.1 riastrad } 95 1.1 riastrad chunk = available - vbva->partial_write_tresh; 96 1.1 riastrad } 97 1.1 riastrad 98 1.1 riastrad vbva_buffer_place_data_at(vbva_ctx, p, chunk, 99 1.1 riastrad vbva->free_offset); 100 1.1 riastrad 101 1.1 riastrad vbva->free_offset = (vbva->free_offset + chunk) % 102 1.1 riastrad vbva->data_len; 103 1.1 riastrad record->len_and_flags += chunk; 104 1.1 riastrad available -= chunk; 105 1.1 riastrad len -= chunk; 106 1.1 riastrad p += chunk; 107 1.1 riastrad } 108 1.1 riastrad 109 1.1 riastrad return true; 110 1.1 riastrad } 111 1.1 riastrad 112 1.1 riastrad static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx, 113 1.1 riastrad struct gen_pool *ctx, s32 screen, bool enable) 114 1.1 riastrad { 115 1.1 riastrad struct vbva_enable_ex *p; 116 1.1 riastrad bool ret; 117 1.1 riastrad 118 1.1 riastrad p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE); 119 1.1 riastrad if (!p) 120 1.1 riastrad return false; 121 1.1 riastrad 122 1.1 riastrad p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE; 123 1.1 riastrad p->base.offset = vbva_ctx->buffer_offset; 124 1.1 riastrad p->base.result = VERR_NOT_SUPPORTED; 125 1.1 riastrad if (screen >= 0) { 126 1.1 riastrad p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET; 127 1.1 riastrad p->screen_id = screen; 128 1.1 riastrad } 129 1.1 riastrad 130 1.1 riastrad hgsmi_buffer_submit(ctx, p); 131 1.1 riastrad 132 1.1 riastrad if (enable) 133 1.1 riastrad ret = p->base.result >= 0; 134 1.1 riastrad else 135 1.1 riastrad ret = true; 136 1.1 riastrad 137 1.1 riastrad hgsmi_buffer_free(ctx, p); 138 1.1 riastrad 139 1.1 riastrad return ret; 140 1.1 riastrad } 141 1.1 riastrad 142 1.1 riastrad bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, 143 1.1 riastrad struct vbva_buffer *vbva, s32 screen) 144 1.1 riastrad { 145 1.1 riastrad bool ret = false; 146 1.1 riastrad 147 1.1 riastrad memset(vbva, 0, sizeof(*vbva)); 148 1.1 riastrad vbva->partial_write_tresh = 256; 149 1.1 riastrad vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer); 150 1.1 riastrad vbva_ctx->vbva = vbva; 151 1.1 riastrad 152 1.1 riastrad ret = vbva_inform_host(vbva_ctx, ctx, screen, true); 153 1.1 riastrad if (!ret) 154 1.1 riastrad vbva_disable(vbva_ctx, ctx, screen); 155 1.1 riastrad 156 1.1 riastrad return ret; 157 1.1 riastrad } 158 1.1 riastrad 159 1.1 riastrad void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, 160 1.1 riastrad s32 screen) 161 1.1 riastrad { 162 1.1 riastrad vbva_ctx->buffer_overflow = false; 163 1.1 riastrad vbva_ctx->record = NULL; 164 1.1 riastrad vbva_ctx->vbva = NULL; 165 1.1 riastrad 166 1.1 riastrad vbva_inform_host(vbva_ctx, ctx, screen, false); 167 1.1 riastrad } 168 1.1 riastrad 169 1.1 riastrad bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx, 170 1.1 riastrad struct gen_pool *ctx) 171 1.1 riastrad { 172 1.1 riastrad struct vbva_record *record; 173 1.1 riastrad u32 next; 174 1.1 riastrad 175 1.1 riastrad if (!vbva_ctx->vbva || 176 1.1 riastrad !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED)) 177 1.1 riastrad return false; 178 1.1 riastrad 179 1.1 riastrad WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record); 180 1.1 riastrad 181 1.1 riastrad next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS; 182 1.1 riastrad 183 1.1 riastrad /* Flush if all slots in the records queue are used */ 184 1.1 riastrad if (next == vbva_ctx->vbva->record_first_index) 185 1.1 riastrad vbva_buffer_flush(ctx); 186 1.1 riastrad 187 1.1 riastrad /* If even after flush there is no place then fail the request */ 188 1.1 riastrad if (next == vbva_ctx->vbva->record_first_index) 189 1.1 riastrad return false; 190 1.1 riastrad 191 1.1 riastrad record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index]; 192 1.1 riastrad record->len_and_flags = VBVA_F_RECORD_PARTIAL; 193 1.1 riastrad vbva_ctx->vbva->record_free_index = next; 194 1.1 riastrad /* Remember which record we are using. */ 195 1.1 riastrad vbva_ctx->record = record; 196 1.1 riastrad 197 1.1 riastrad return true; 198 1.1 riastrad } 199 1.1 riastrad 200 1.1 riastrad void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx) 201 1.1 riastrad { 202 1.1 riastrad struct vbva_record *record = vbva_ctx->record; 203 1.1 riastrad 204 1.1 riastrad WARN_ON(!vbva_ctx->vbva || !record || 205 1.1 riastrad !(record->len_and_flags & VBVA_F_RECORD_PARTIAL)); 206 1.1 riastrad 207 1.1 riastrad /* Mark the record completed. */ 208 1.1 riastrad record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL; 209 1.1 riastrad 210 1.1 riastrad vbva_ctx->buffer_overflow = false; 211 1.1 riastrad vbva_ctx->record = NULL; 212 1.1 riastrad } 213 1.1 riastrad 214 1.1 riastrad void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx, 215 1.1 riastrad u32 buffer_offset, u32 buffer_length) 216 1.1 riastrad { 217 1.1 riastrad vbva_ctx->buffer_offset = buffer_offset; 218 1.1 riastrad vbva_ctx->buffer_length = buffer_length; 219 1.1 riastrad } 220