1 1.7 riastrad /* $NetBSD: vmwgfx_cmdbuf.c,v 1.7 2022/10/25 23:35:29 riastradh Exp $ */ 2 1.1 riastrad 3 1.3 riastrad // SPDX-License-Identifier: GPL-2.0 OR MIT 4 1.1 riastrad /************************************************************************** 5 1.1 riastrad * 6 1.3 riastrad * Copyright 2015 VMware, Inc., Palo Alto, CA., USA 7 1.1 riastrad * 8 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 9 1.1 riastrad * copy of this software and associated documentation files (the 10 1.1 riastrad * "Software"), to deal in the Software without restriction, including 11 1.1 riastrad * without limitation the rights to use, copy, modify, merge, publish, 12 1.1 riastrad * distribute, sub license, and/or sell copies of the Software, and to 13 1.1 riastrad * permit persons to whom the Software is furnished to do so, subject to 14 1.1 riastrad * the following conditions: 15 1.1 riastrad * 16 1.1 riastrad * The above copyright notice and this permission notice (including the 17 1.1 riastrad * next paragraph) shall be included in all copies or substantial portions 18 1.1 riastrad * of the Software. 19 1.1 riastrad * 20 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 23 1.1 riastrad * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 24 1.1 riastrad * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 25 1.1 riastrad * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 26 1.1 riastrad * USE OR OTHER DEALINGS IN THE SOFTWARE. 27 1.1 riastrad * 28 1.1 riastrad **************************************************************************/ 29 1.1 riastrad 30 1.1 riastrad #include <sys/cdefs.h> 31 1.7 riastrad __KERNEL_RCSID(0, "$NetBSD: vmwgfx_cmdbuf.c,v 1.7 2022/10/25 23:35:29 riastradh Exp $"); 32 1.1 riastrad 33 1.3 riastrad #include <linux/dmapool.h> 34 1.3 riastrad #include <linux/pci.h> 35 1.3 riastrad 36 1.3 riastrad #include <drm/ttm/ttm_bo_api.h> 37 1.3 riastrad 38 1.1 riastrad #include "vmwgfx_drv.h" 39 1.1 riastrad 40 1.4 riastrad #include <linux/nbsd-namespace.h> 41 1.4 riastrad 42 1.1 riastrad /* 43 1.1 riastrad * Size of inline command buffers. Try to make sure that a page size is a 44 1.1 riastrad * multiple of the DMA pool allocation size. 45 1.1 riastrad */ 46 1.1 riastrad #define VMW_CMDBUF_INLINE_ALIGN 64 47 1.1 riastrad #define VMW_CMDBUF_INLINE_SIZE \ 48 1.1 riastrad (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN)) 49 1.1 riastrad 50 1.1 riastrad /** 51 1.1 riastrad * struct vmw_cmdbuf_context - Command buffer context queues 52 1.1 riastrad * 53 1.1 riastrad * @submitted: List of command buffers that have been submitted to the 54 1.1 riastrad * manager but not yet submitted to hardware. 55 1.1 riastrad * @hw_submitted: List of command buffers submitted to hardware. 56 1.1 riastrad * @preempted: List of preempted command buffers. 57 1.1 riastrad * @num_hw_submitted: Number of buffers currently being processed by hardware 58 1.1 riastrad */ 59 1.1 riastrad struct vmw_cmdbuf_context { 60 1.1 riastrad struct list_head submitted; 61 1.1 riastrad struct list_head hw_submitted; 62 1.1 riastrad struct list_head preempted; 63 1.1 riastrad unsigned num_hw_submitted; 64 1.3 riastrad bool block_submission; 65 1.1 riastrad }; 66 1.1 riastrad 67 1.1 riastrad /** 68 1.1 riastrad * struct vmw_cmdbuf_man: - Command buffer manager 69 1.1 riastrad * 70 1.1 riastrad * @cur_mutex: Mutex protecting the command buffer used for incremental small 71 1.1 riastrad * kernel command submissions, @cur. 72 1.1 riastrad * @space_mutex: Mutex to protect against starvation when we allocate 73 1.1 riastrad * main pool buffer space. 74 1.3 riastrad * @error_mutex: Mutex to serialize the work queue error handling. 75 1.3 riastrad * Note this is not needed if the same workqueue handler 76 1.3 riastrad * can't race with itself... 77 1.1 riastrad * @work: A struct work_struct implementeing command buffer error handling. 78 1.1 riastrad * Immutable. 79 1.1 riastrad * @dev_priv: Pointer to the device private struct. Immutable. 80 1.1 riastrad * @ctx: Array of command buffer context queues. The queues and the context 81 1.1 riastrad * data is protected by @lock. 82 1.1 riastrad * @error: List of command buffers that have caused device errors. 83 1.1 riastrad * Protected by @lock. 84 1.1 riastrad * @mm: Range manager for the command buffer space. Manager allocations and 85 1.1 riastrad * frees are protected by @lock. 86 1.1 riastrad * @cmd_space: Buffer object for the command buffer space, unless we were 87 1.1 riastrad * able to make a contigous coherent DMA memory allocation, @handle. Immutable. 88 1.1 riastrad * @map_obj: Mapping state for @cmd_space. Immutable. 89 1.1 riastrad * @map: Pointer to command buffer space. May be a mapped buffer object or 90 1.1 riastrad * a contigous coherent DMA memory allocation. Immutable. 91 1.1 riastrad * @cur: Command buffer for small kernel command submissions. Protected by 92 1.1 riastrad * the @cur_mutex. 93 1.1 riastrad * @cur_pos: Space already used in @cur. Protected by @cur_mutex. 94 1.1 riastrad * @default_size: Default size for the @cur command buffer. Immutable. 95 1.1 riastrad * @max_hw_submitted: Max number of in-flight command buffers the device can 96 1.1 riastrad * handle. Immutable. 97 1.1 riastrad * @lock: Spinlock protecting command submission queues. 98 1.1 riastrad * @header: Pool of DMA memory for device command buffer headers. 99 1.1 riastrad * Internal protection. 100 1.1 riastrad * @dheaders: Pool of DMA memory for device command buffer headers with trailing 101 1.1 riastrad * space for inline data. Internal protection. 102 1.1 riastrad * @alloc_queue: Wait queue for processes waiting to allocate command buffer 103 1.1 riastrad * space. 104 1.1 riastrad * @idle_queue: Wait queue for processes waiting for command buffer idle. 105 1.1 riastrad * @irq_on: Whether the process function has requested irq to be turned on. 106 1.1 riastrad * Protected by @lock. 107 1.1 riastrad * @using_mob: Whether the command buffer space is a MOB or a contigous DMA 108 1.1 riastrad * allocation. Immutable. 109 1.1 riastrad * @has_pool: Has a large pool of DMA memory which allows larger allocations. 110 1.1 riastrad * Typically this is false only during bootstrap. 111 1.1 riastrad * @handle: DMA address handle for the command buffer space if @using_mob is 112 1.1 riastrad * false. Immutable. 113 1.1 riastrad * @size: The size of the command buffer space. Immutable. 114 1.3 riastrad * @num_contexts: Number of contexts actually enabled. 115 1.1 riastrad */ 116 1.1 riastrad struct vmw_cmdbuf_man { 117 1.1 riastrad struct mutex cur_mutex; 118 1.1 riastrad struct mutex space_mutex; 119 1.3 riastrad struct mutex error_mutex; 120 1.1 riastrad struct work_struct work; 121 1.1 riastrad struct vmw_private *dev_priv; 122 1.1 riastrad struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX]; 123 1.1 riastrad struct list_head error; 124 1.1 riastrad struct drm_mm mm; 125 1.1 riastrad struct ttm_buffer_object *cmd_space; 126 1.1 riastrad struct ttm_bo_kmap_obj map_obj; 127 1.1 riastrad u8 *map; 128 1.1 riastrad struct vmw_cmdbuf_header *cur; 129 1.1 riastrad size_t cur_pos; 130 1.1 riastrad size_t default_size; 131 1.1 riastrad unsigned max_hw_submitted; 132 1.1 riastrad spinlock_t lock; 133 1.1 riastrad struct dma_pool *headers; 134 1.1 riastrad struct dma_pool *dheaders; 135 1.6 riastrad drm_waitqueue_t alloc_queue; 136 1.6 riastrad drm_waitqueue_t idle_queue; 137 1.1 riastrad bool irq_on; 138 1.1 riastrad bool using_mob; 139 1.1 riastrad bool has_pool; 140 1.5 riastrad #ifdef __NetBSD__ 141 1.5 riastrad bus_dmamap_t dmamap; 142 1.5 riastrad bus_dma_segment_t dmaseg; 143 1.5 riastrad #endif 144 1.1 riastrad dma_addr_t handle; 145 1.1 riastrad size_t size; 146 1.3 riastrad u32 num_contexts; 147 1.1 riastrad }; 148 1.1 riastrad 149 1.1 riastrad /** 150 1.1 riastrad * struct vmw_cmdbuf_header - Command buffer metadata 151 1.1 riastrad * 152 1.1 riastrad * @man: The command buffer manager. 153 1.1 riastrad * @cb_header: Device command buffer header, allocated from a DMA pool. 154 1.1 riastrad * @cb_context: The device command buffer context. 155 1.1 riastrad * @list: List head for attaching to the manager lists. 156 1.1 riastrad * @node: The range manager node. 157 1.1 riastrad * @handle. The DMA address of @cb_header. Handed to the device on command 158 1.1 riastrad * buffer submission. 159 1.1 riastrad * @cmd: Pointer to the command buffer space of this buffer. 160 1.1 riastrad * @size: Size of the command buffer space of this buffer. 161 1.1 riastrad * @reserved: Reserved space of this buffer. 162 1.1 riastrad * @inline_space: Whether inline command buffer space is used. 163 1.1 riastrad */ 164 1.1 riastrad struct vmw_cmdbuf_header { 165 1.1 riastrad struct vmw_cmdbuf_man *man; 166 1.1 riastrad SVGACBHeader *cb_header; 167 1.1 riastrad SVGACBContext cb_context; 168 1.1 riastrad struct list_head list; 169 1.1 riastrad struct drm_mm_node node; 170 1.1 riastrad dma_addr_t handle; 171 1.1 riastrad u8 *cmd; 172 1.1 riastrad size_t size; 173 1.1 riastrad size_t reserved; 174 1.1 riastrad bool inline_space; 175 1.1 riastrad }; 176 1.1 riastrad 177 1.1 riastrad /** 178 1.1 riastrad * struct vmw_cmdbuf_dheader - Device command buffer header with inline 179 1.1 riastrad * command buffer space. 180 1.1 riastrad * 181 1.1 riastrad * @cb_header: Device command buffer header. 182 1.1 riastrad * @cmd: Inline command buffer space. 183 1.1 riastrad */ 184 1.1 riastrad struct vmw_cmdbuf_dheader { 185 1.1 riastrad SVGACBHeader cb_header; 186 1.1 riastrad u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN); 187 1.1 riastrad }; 188 1.1 riastrad 189 1.1 riastrad /** 190 1.1 riastrad * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata 191 1.1 riastrad * 192 1.1 riastrad * @page_size: Size of requested command buffer space in pages. 193 1.1 riastrad * @node: Pointer to the range manager node. 194 1.1 riastrad * @done: True if this allocation has succeeded. 195 1.1 riastrad */ 196 1.1 riastrad struct vmw_cmdbuf_alloc_info { 197 1.1 riastrad size_t page_size; 198 1.1 riastrad struct drm_mm_node *node; 199 1.1 riastrad bool done; 200 1.1 riastrad }; 201 1.1 riastrad 202 1.1 riastrad /* Loop over each context in the command buffer manager. */ 203 1.3 riastrad #define for_each_cmdbuf_ctx(_man, _i, _ctx) \ 204 1.3 riastrad for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \ 205 1.1 riastrad ++(_i), ++(_ctx)) 206 1.1 riastrad 207 1.3 riastrad static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, 208 1.3 riastrad bool enable); 209 1.3 riastrad static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context); 210 1.1 riastrad 211 1.1 riastrad /** 212 1.1 riastrad * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex. 213 1.1 riastrad * 214 1.1 riastrad * @man: The range manager. 215 1.1 riastrad * @interruptible: Whether to wait interruptible when locking. 216 1.1 riastrad */ 217 1.1 riastrad static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible) 218 1.1 riastrad { 219 1.1 riastrad if (interruptible) { 220 1.1 riastrad if (mutex_lock_interruptible(&man->cur_mutex)) 221 1.1 riastrad return -ERESTARTSYS; 222 1.1 riastrad } else { 223 1.1 riastrad mutex_lock(&man->cur_mutex); 224 1.1 riastrad } 225 1.1 riastrad 226 1.1 riastrad return 0; 227 1.1 riastrad } 228 1.1 riastrad 229 1.1 riastrad /** 230 1.1 riastrad * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex. 231 1.1 riastrad * 232 1.1 riastrad * @man: The range manager. 233 1.1 riastrad */ 234 1.1 riastrad static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man) 235 1.1 riastrad { 236 1.1 riastrad mutex_unlock(&man->cur_mutex); 237 1.1 riastrad } 238 1.1 riastrad 239 1.1 riastrad /** 240 1.1 riastrad * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has 241 1.1 riastrad * been used for the device context with inline command buffers. 242 1.1 riastrad * Need not be called locked. 243 1.1 riastrad * 244 1.1 riastrad * @header: Pointer to the header to free. 245 1.1 riastrad */ 246 1.1 riastrad static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header) 247 1.1 riastrad { 248 1.1 riastrad struct vmw_cmdbuf_dheader *dheader; 249 1.1 riastrad 250 1.1 riastrad if (WARN_ON_ONCE(!header->inline_space)) 251 1.1 riastrad return; 252 1.1 riastrad 253 1.1 riastrad dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader, 254 1.1 riastrad cb_header); 255 1.1 riastrad dma_pool_free(header->man->dheaders, dheader, header->handle); 256 1.1 riastrad kfree(header); 257 1.1 riastrad } 258 1.1 riastrad 259 1.1 riastrad /** 260 1.1 riastrad * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its 261 1.1 riastrad * associated structures. 262 1.1 riastrad * 263 1.1 riastrad * header: Pointer to the header to free. 264 1.1 riastrad * 265 1.1 riastrad * For internal use. Must be called with man::lock held. 266 1.1 riastrad */ 267 1.1 riastrad static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) 268 1.1 riastrad { 269 1.1 riastrad struct vmw_cmdbuf_man *man = header->man; 270 1.1 riastrad 271 1.1 riastrad lockdep_assert_held_once(&man->lock); 272 1.1 riastrad 273 1.1 riastrad if (header->inline_space) { 274 1.1 riastrad vmw_cmdbuf_header_inline_free(header); 275 1.1 riastrad return; 276 1.1 riastrad } 277 1.1 riastrad 278 1.1 riastrad drm_mm_remove_node(&header->node); 279 1.6 riastrad DRM_SPIN_WAKEUP_ALL(&man->alloc_queue, &man->lock); /* XXX */ 280 1.1 riastrad if (header->cb_header) 281 1.1 riastrad dma_pool_free(man->headers, header->cb_header, 282 1.1 riastrad header->handle); 283 1.1 riastrad kfree(header); 284 1.1 riastrad } 285 1.1 riastrad 286 1.1 riastrad /** 287 1.1 riastrad * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its 288 1.1 riastrad * associated structures. 289 1.1 riastrad * 290 1.1 riastrad * @header: Pointer to the header to free. 291 1.1 riastrad */ 292 1.1 riastrad void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header) 293 1.1 riastrad { 294 1.1 riastrad struct vmw_cmdbuf_man *man = header->man; 295 1.1 riastrad 296 1.1 riastrad /* Avoid locking if inline_space */ 297 1.1 riastrad if (header->inline_space) { 298 1.1 riastrad vmw_cmdbuf_header_inline_free(header); 299 1.1 riastrad return; 300 1.1 riastrad } 301 1.3 riastrad spin_lock(&man->lock); 302 1.1 riastrad __vmw_cmdbuf_header_free(header); 303 1.3 riastrad spin_unlock(&man->lock); 304 1.1 riastrad } 305 1.1 riastrad 306 1.1 riastrad 307 1.1 riastrad /** 308 1.1 riastrad * vmw_cmbuf_header_submit: Submit a command buffer to hardware. 309 1.1 riastrad * 310 1.1 riastrad * @header: The header of the buffer to submit. 311 1.1 riastrad */ 312 1.1 riastrad static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header) 313 1.1 riastrad { 314 1.1 riastrad struct vmw_cmdbuf_man *man = header->man; 315 1.1 riastrad u32 val; 316 1.1 riastrad 317 1.1 riastrad val = upper_32_bits(header->handle); 318 1.1 riastrad vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val); 319 1.1 riastrad 320 1.1 riastrad val = lower_32_bits(header->handle); 321 1.1 riastrad val |= header->cb_context & SVGA_CB_CONTEXT_MASK; 322 1.1 riastrad vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val); 323 1.1 riastrad 324 1.1 riastrad return header->cb_header->status; 325 1.1 riastrad } 326 1.1 riastrad 327 1.1 riastrad /** 328 1.1 riastrad * vmw_cmdbuf_ctx_init: Initialize a command buffer context. 329 1.1 riastrad * 330 1.1 riastrad * @ctx: The command buffer context to initialize 331 1.1 riastrad */ 332 1.1 riastrad static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx) 333 1.1 riastrad { 334 1.1 riastrad INIT_LIST_HEAD(&ctx->hw_submitted); 335 1.1 riastrad INIT_LIST_HEAD(&ctx->submitted); 336 1.1 riastrad INIT_LIST_HEAD(&ctx->preempted); 337 1.1 riastrad ctx->num_hw_submitted = 0; 338 1.1 riastrad } 339 1.1 riastrad 340 1.1 riastrad /** 341 1.1 riastrad * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer 342 1.1 riastrad * context. 343 1.1 riastrad * 344 1.1 riastrad * @man: The command buffer manager. 345 1.1 riastrad * @ctx: The command buffer context. 346 1.1 riastrad * 347 1.1 riastrad * Submits command buffers to hardware until there are no more command 348 1.1 riastrad * buffers to submit or the hardware can't handle more command buffers. 349 1.1 riastrad */ 350 1.1 riastrad static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man, 351 1.1 riastrad struct vmw_cmdbuf_context *ctx) 352 1.1 riastrad { 353 1.1 riastrad while (ctx->num_hw_submitted < man->max_hw_submitted && 354 1.3 riastrad !list_empty(&ctx->submitted) && 355 1.3 riastrad !ctx->block_submission) { 356 1.1 riastrad struct vmw_cmdbuf_header *entry; 357 1.1 riastrad SVGACBStatus status; 358 1.1 riastrad 359 1.1 riastrad entry = list_first_entry(&ctx->submitted, 360 1.1 riastrad struct vmw_cmdbuf_header, 361 1.1 riastrad list); 362 1.1 riastrad 363 1.1 riastrad status = vmw_cmdbuf_header_submit(entry); 364 1.1 riastrad 365 1.1 riastrad /* This should never happen */ 366 1.1 riastrad if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) { 367 1.1 riastrad entry->cb_header->status = SVGA_CB_STATUS_NONE; 368 1.1 riastrad break; 369 1.1 riastrad } 370 1.1 riastrad 371 1.1 riastrad list_del(&entry->list); 372 1.1 riastrad list_add_tail(&entry->list, &ctx->hw_submitted); 373 1.1 riastrad ctx->num_hw_submitted++; 374 1.1 riastrad } 375 1.1 riastrad 376 1.1 riastrad } 377 1.1 riastrad 378 1.1 riastrad /** 379 1.1 riastrad * vmw_cmdbuf_ctx_submit: Process a command buffer context. 380 1.1 riastrad * 381 1.1 riastrad * @man: The command buffer manager. 382 1.1 riastrad * @ctx: The command buffer context. 383 1.1 riastrad * 384 1.1 riastrad * Submit command buffers to hardware if possible, and process finished 385 1.1 riastrad * buffers. Typically freeing them, but on preemption or error take 386 1.1 riastrad * appropriate action. Wake up waiters if appropriate. 387 1.1 riastrad */ 388 1.1 riastrad static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man, 389 1.1 riastrad struct vmw_cmdbuf_context *ctx, 390 1.1 riastrad int *notempty) 391 1.1 riastrad { 392 1.1 riastrad struct vmw_cmdbuf_header *entry, *next; 393 1.1 riastrad 394 1.6 riastrad assert_spin_locked(&man->lock); 395 1.6 riastrad 396 1.1 riastrad vmw_cmdbuf_ctx_submit(man, ctx); 397 1.1 riastrad 398 1.1 riastrad list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) { 399 1.1 riastrad SVGACBStatus status = entry->cb_header->status; 400 1.1 riastrad 401 1.1 riastrad if (status == SVGA_CB_STATUS_NONE) 402 1.1 riastrad break; 403 1.1 riastrad 404 1.1 riastrad list_del(&entry->list); 405 1.6 riastrad DRM_SPIN_WAKEUP_ONE(&man->idle_queue, &man->lock); 406 1.1 riastrad ctx->num_hw_submitted--; 407 1.1 riastrad switch (status) { 408 1.1 riastrad case SVGA_CB_STATUS_COMPLETED: 409 1.1 riastrad __vmw_cmdbuf_header_free(entry); 410 1.1 riastrad break; 411 1.1 riastrad case SVGA_CB_STATUS_COMMAND_ERROR: 412 1.3 riastrad WARN_ONCE(true, "Command buffer error.\n"); 413 1.3 riastrad entry->cb_header->status = SVGA_CB_STATUS_NONE; 414 1.1 riastrad list_add_tail(&entry->list, &man->error); 415 1.1 riastrad schedule_work(&man->work); 416 1.1 riastrad break; 417 1.1 riastrad case SVGA_CB_STATUS_PREEMPTED: 418 1.3 riastrad entry->cb_header->status = SVGA_CB_STATUS_NONE; 419 1.3 riastrad list_add_tail(&entry->list, &ctx->preempted); 420 1.3 riastrad break; 421 1.3 riastrad case SVGA_CB_STATUS_CB_HEADER_ERROR: 422 1.3 riastrad WARN_ONCE(true, "Command buffer header error.\n"); 423 1.3 riastrad __vmw_cmdbuf_header_free(entry); 424 1.1 riastrad break; 425 1.1 riastrad default: 426 1.1 riastrad WARN_ONCE(true, "Undefined command buffer status.\n"); 427 1.1 riastrad __vmw_cmdbuf_header_free(entry); 428 1.1 riastrad break; 429 1.1 riastrad } 430 1.1 riastrad } 431 1.1 riastrad 432 1.1 riastrad vmw_cmdbuf_ctx_submit(man, ctx); 433 1.1 riastrad if (!list_empty(&ctx->submitted)) 434 1.1 riastrad (*notempty)++; 435 1.1 riastrad } 436 1.1 riastrad 437 1.1 riastrad /** 438 1.1 riastrad * vmw_cmdbuf_man_process - Process all command buffer contexts and 439 1.1 riastrad * switch on and off irqs as appropriate. 440 1.1 riastrad * 441 1.1 riastrad * @man: The command buffer manager. 442 1.1 riastrad * 443 1.1 riastrad * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has 444 1.1 riastrad * command buffers left that are not submitted to hardware, Make sure 445 1.1 riastrad * IRQ handling is turned on. Otherwise, make sure it's turned off. 446 1.1 riastrad */ 447 1.1 riastrad static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) 448 1.1 riastrad { 449 1.1 riastrad int notempty; 450 1.1 riastrad struct vmw_cmdbuf_context *ctx; 451 1.1 riastrad int i; 452 1.1 riastrad 453 1.6 riastrad assert_spin_locked(&man->lock); 454 1.6 riastrad 455 1.1 riastrad retry: 456 1.1 riastrad notempty = 0; 457 1.1 riastrad for_each_cmdbuf_ctx(man, i, ctx) 458 1.1 riastrad vmw_cmdbuf_ctx_process(man, ctx, ¬empty); 459 1.1 riastrad 460 1.1 riastrad if (man->irq_on && !notempty) { 461 1.1 riastrad vmw_generic_waiter_remove(man->dev_priv, 462 1.1 riastrad SVGA_IRQFLAG_COMMAND_BUFFER, 463 1.1 riastrad &man->dev_priv->cmdbuf_waiters); 464 1.1 riastrad man->irq_on = false; 465 1.1 riastrad } else if (!man->irq_on && notempty) { 466 1.1 riastrad vmw_generic_waiter_add(man->dev_priv, 467 1.1 riastrad SVGA_IRQFLAG_COMMAND_BUFFER, 468 1.1 riastrad &man->dev_priv->cmdbuf_waiters); 469 1.1 riastrad man->irq_on = true; 470 1.1 riastrad 471 1.1 riastrad /* Rerun in case we just missed an irq. */ 472 1.1 riastrad goto retry; 473 1.1 riastrad } 474 1.1 riastrad } 475 1.1 riastrad 476 1.1 riastrad /** 477 1.1 riastrad * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a 478 1.1 riastrad * command buffer context 479 1.1 riastrad * 480 1.1 riastrad * @man: The command buffer manager. 481 1.1 riastrad * @header: The header of the buffer to submit. 482 1.1 riastrad * @cb_context: The command buffer context to use. 483 1.1 riastrad * 484 1.1 riastrad * This function adds @header to the "submitted" queue of the command 485 1.1 riastrad * buffer context identified by @cb_context. It then calls the command buffer 486 1.1 riastrad * manager processing to potentially submit the buffer to hardware. 487 1.1 riastrad * @man->lock needs to be held when calling this function. 488 1.1 riastrad */ 489 1.1 riastrad static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man, 490 1.1 riastrad struct vmw_cmdbuf_header *header, 491 1.1 riastrad SVGACBContext cb_context) 492 1.1 riastrad { 493 1.1 riastrad if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT)) 494 1.1 riastrad header->cb_header->dxContext = 0; 495 1.1 riastrad header->cb_context = cb_context; 496 1.1 riastrad list_add_tail(&header->list, &man->ctx[cb_context].submitted); 497 1.1 riastrad 498 1.1 riastrad vmw_cmdbuf_man_process(man); 499 1.1 riastrad } 500 1.1 riastrad 501 1.1 riastrad /** 502 1.3 riastrad * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt 503 1.3 riastrad * handler implemented as a threaded irq task. 504 1.1 riastrad * 505 1.3 riastrad * @man: Pointer to the command buffer manager. 506 1.1 riastrad * 507 1.3 riastrad * The bottom half of the interrupt handler simply calls into the 508 1.1 riastrad * command buffer processor to free finished buffers and submit any 509 1.1 riastrad * queued buffers to hardware. 510 1.1 riastrad */ 511 1.3 riastrad void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man) 512 1.1 riastrad { 513 1.1 riastrad spin_lock(&man->lock); 514 1.1 riastrad vmw_cmdbuf_man_process(man); 515 1.1 riastrad spin_unlock(&man->lock); 516 1.1 riastrad } 517 1.1 riastrad 518 1.1 riastrad /** 519 1.1 riastrad * vmw_cmdbuf_work_func - The deferred work function that handles 520 1.1 riastrad * command buffer errors. 521 1.1 riastrad * 522 1.1 riastrad * @work: The work func closure argument. 523 1.1 riastrad * 524 1.1 riastrad * Restarting the command buffer context after an error requires process 525 1.1 riastrad * context, so it is deferred to this work function. 526 1.1 riastrad */ 527 1.1 riastrad static void vmw_cmdbuf_work_func(struct work_struct *work) 528 1.1 riastrad { 529 1.1 riastrad struct vmw_cmdbuf_man *man = 530 1.1 riastrad container_of(work, struct vmw_cmdbuf_man, work); 531 1.1 riastrad struct vmw_cmdbuf_header *entry, *next; 532 1.1 riastrad uint32_t dummy; 533 1.3 riastrad bool send_fence = false; 534 1.3 riastrad struct list_head restart_head[SVGA_CB_CONTEXT_MAX]; 535 1.3 riastrad int i; 536 1.3 riastrad struct vmw_cmdbuf_context *ctx; 537 1.3 riastrad bool global_block = false; 538 1.3 riastrad 539 1.3 riastrad for_each_cmdbuf_ctx(man, i, ctx) 540 1.3 riastrad INIT_LIST_HEAD(&restart_head[i]); 541 1.1 riastrad 542 1.3 riastrad mutex_lock(&man->error_mutex); 543 1.3 riastrad spin_lock(&man->lock); 544 1.1 riastrad list_for_each_entry_safe(entry, next, &man->error, list) { 545 1.3 riastrad SVGACBHeader *cb_hdr = entry->cb_header; 546 1.3 riastrad SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) 547 1.3 riastrad (entry->cmd + cb_hdr->errorOffset); 548 1.3 riastrad u32 error_cmd_size, new_start_offset; 549 1.3 riastrad const char *cmd_name; 550 1.3 riastrad 551 1.3 riastrad list_del_init(&entry->list); 552 1.3 riastrad global_block = true; 553 1.3 riastrad 554 1.3 riastrad if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) { 555 1.3 riastrad VMW_DEBUG_USER("Unknown command causing device error.\n"); 556 1.3 riastrad VMW_DEBUG_USER("Command buffer offset is %lu\n", 557 1.3 riastrad (unsigned long) cb_hdr->errorOffset); 558 1.3 riastrad __vmw_cmdbuf_header_free(entry); 559 1.3 riastrad send_fence = true; 560 1.3 riastrad continue; 561 1.3 riastrad } 562 1.3 riastrad 563 1.3 riastrad VMW_DEBUG_USER("Command \"%s\" causing device error.\n", 564 1.3 riastrad cmd_name); 565 1.3 riastrad VMW_DEBUG_USER("Command buffer offset is %lu\n", 566 1.3 riastrad (unsigned long) cb_hdr->errorOffset); 567 1.3 riastrad VMW_DEBUG_USER("Command size is %lu\n", 568 1.3 riastrad (unsigned long) error_cmd_size); 569 1.3 riastrad 570 1.3 riastrad new_start_offset = cb_hdr->errorOffset + error_cmd_size; 571 1.3 riastrad 572 1.3 riastrad if (new_start_offset >= cb_hdr->length) { 573 1.3 riastrad __vmw_cmdbuf_header_free(entry); 574 1.3 riastrad send_fence = true; 575 1.3 riastrad continue; 576 1.3 riastrad } 577 1.3 riastrad 578 1.3 riastrad if (man->using_mob) 579 1.3 riastrad cb_hdr->ptr.mob.mobOffset += new_start_offset; 580 1.3 riastrad else 581 1.3 riastrad cb_hdr->ptr.pa += (u64) new_start_offset; 582 1.1 riastrad 583 1.3 riastrad entry->cmd += new_start_offset; 584 1.3 riastrad cb_hdr->length -= new_start_offset; 585 1.3 riastrad cb_hdr->errorOffset = 0; 586 1.3 riastrad cb_hdr->offset = 0; 587 1.3 riastrad 588 1.3 riastrad list_add_tail(&entry->list, &restart_head[entry->cb_context]); 589 1.1 riastrad } 590 1.1 riastrad 591 1.3 riastrad for_each_cmdbuf_ctx(man, i, ctx) 592 1.3 riastrad man->ctx[i].block_submission = true; 593 1.3 riastrad 594 1.3 riastrad spin_unlock(&man->lock); 595 1.3 riastrad 596 1.3 riastrad /* Preempt all contexts */ 597 1.3 riastrad if (global_block && vmw_cmdbuf_preempt(man, 0)) 598 1.3 riastrad DRM_ERROR("Failed preempting command buffer contexts\n"); 599 1.3 riastrad 600 1.3 riastrad spin_lock(&man->lock); 601 1.3 riastrad for_each_cmdbuf_ctx(man, i, ctx) { 602 1.3 riastrad /* Move preempted command buffers to the preempted queue. */ 603 1.3 riastrad vmw_cmdbuf_ctx_process(man, ctx, &dummy); 604 1.3 riastrad 605 1.3 riastrad /* 606 1.3 riastrad * Add the preempted queue after the command buffer 607 1.3 riastrad * that caused an error. 608 1.3 riastrad */ 609 1.3 riastrad list_splice_init(&ctx->preempted, restart_head[i].prev); 610 1.3 riastrad 611 1.3 riastrad /* 612 1.3 riastrad * Finally add all command buffers first in the submitted 613 1.3 riastrad * queue, to rerun them. 614 1.3 riastrad */ 615 1.3 riastrad 616 1.3 riastrad ctx->block_submission = false; 617 1.3 riastrad list_splice_init(&restart_head[i], &ctx->submitted); 618 1.3 riastrad } 619 1.3 riastrad 620 1.3 riastrad vmw_cmdbuf_man_process(man); 621 1.3 riastrad spin_unlock(&man->lock); 622 1.3 riastrad 623 1.3 riastrad if (global_block && vmw_cmdbuf_startstop(man, 0, true)) 624 1.3 riastrad DRM_ERROR("Failed restarting command buffer contexts\n"); 625 1.1 riastrad 626 1.1 riastrad /* Send a new fence in case one was removed */ 627 1.3 riastrad if (send_fence) { 628 1.3 riastrad vmw_fifo_send_fence(man->dev_priv, &dummy); 629 1.6 riastrad spin_lock(&man->lock); 630 1.6 riastrad DRM_SPIN_WAKEUP_ALL(&man->idle_queue, &man->lock); 631 1.6 riastrad spin_unlock(&man->lock); 632 1.3 riastrad } 633 1.3 riastrad 634 1.3 riastrad mutex_unlock(&man->error_mutex); 635 1.1 riastrad } 636 1.1 riastrad 637 1.1 riastrad /** 638 1.1 riastrad * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle. 639 1.1 riastrad * 640 1.1 riastrad * @man: The command buffer manager. 641 1.1 riastrad * @check_preempted: Check also the preempted queue for pending command buffers. 642 1.1 riastrad * 643 1.1 riastrad */ 644 1.1 riastrad static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man, 645 1.1 riastrad bool check_preempted) 646 1.1 riastrad { 647 1.1 riastrad struct vmw_cmdbuf_context *ctx; 648 1.1 riastrad bool idle = false; 649 1.1 riastrad int i; 650 1.1 riastrad 651 1.6 riastrad assert_spin_locked(&man->lock); 652 1.6 riastrad 653 1.1 riastrad vmw_cmdbuf_man_process(man); 654 1.1 riastrad for_each_cmdbuf_ctx(man, i, ctx) { 655 1.1 riastrad if (!list_empty(&ctx->submitted) || 656 1.1 riastrad !list_empty(&ctx->hw_submitted) || 657 1.1 riastrad (check_preempted && !list_empty(&ctx->preempted))) 658 1.6 riastrad goto out; 659 1.1 riastrad } 660 1.1 riastrad 661 1.1 riastrad idle = list_empty(&man->error); 662 1.1 riastrad 663 1.6 riastrad out: 664 1.1 riastrad return idle; 665 1.1 riastrad } 666 1.1 riastrad 667 1.1 riastrad /** 668 1.1 riastrad * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel 669 1.1 riastrad * command submissions 670 1.1 riastrad * 671 1.1 riastrad * @man: The command buffer manager. 672 1.1 riastrad * 673 1.1 riastrad * Flushes the current command buffer without allocating a new one. A new one 674 1.1 riastrad * is automatically allocated when needed. Call with @man->cur_mutex held. 675 1.1 riastrad */ 676 1.1 riastrad static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man) 677 1.1 riastrad { 678 1.1 riastrad struct vmw_cmdbuf_header *cur = man->cur; 679 1.1 riastrad 680 1.3 riastrad lockdep_assert_held_once(&man->cur_mutex); 681 1.1 riastrad 682 1.1 riastrad if (!cur) 683 1.1 riastrad return; 684 1.1 riastrad 685 1.3 riastrad spin_lock(&man->lock); 686 1.1 riastrad if (man->cur_pos == 0) { 687 1.1 riastrad __vmw_cmdbuf_header_free(cur); 688 1.1 riastrad goto out_unlock; 689 1.1 riastrad } 690 1.1 riastrad 691 1.1 riastrad man->cur->cb_header->length = man->cur_pos; 692 1.1 riastrad vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0); 693 1.1 riastrad out_unlock: 694 1.3 riastrad spin_unlock(&man->lock); 695 1.1 riastrad man->cur = NULL; 696 1.1 riastrad man->cur_pos = 0; 697 1.1 riastrad } 698 1.1 riastrad 699 1.1 riastrad /** 700 1.1 riastrad * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel 701 1.1 riastrad * command submissions 702 1.1 riastrad * 703 1.1 riastrad * @man: The command buffer manager. 704 1.1 riastrad * @interruptible: Whether to sleep interruptible when sleeping. 705 1.1 riastrad * 706 1.1 riastrad * Flushes the current command buffer without allocating a new one. A new one 707 1.1 riastrad * is automatically allocated when needed. 708 1.1 riastrad */ 709 1.1 riastrad int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, 710 1.1 riastrad bool interruptible) 711 1.1 riastrad { 712 1.1 riastrad int ret = vmw_cmdbuf_cur_lock(man, interruptible); 713 1.1 riastrad 714 1.1 riastrad if (ret) 715 1.1 riastrad return ret; 716 1.1 riastrad 717 1.1 riastrad __vmw_cmdbuf_cur_flush(man); 718 1.1 riastrad vmw_cmdbuf_cur_unlock(man); 719 1.1 riastrad 720 1.1 riastrad return 0; 721 1.1 riastrad } 722 1.1 riastrad 723 1.1 riastrad /** 724 1.1 riastrad * vmw_cmdbuf_idle - Wait for command buffer manager idle. 725 1.1 riastrad * 726 1.1 riastrad * @man: The command buffer manager. 727 1.1 riastrad * @interruptible: Sleep interruptible while waiting. 728 1.1 riastrad * @timeout: Time out after this many ticks. 729 1.1 riastrad * 730 1.1 riastrad * Wait until the command buffer manager has processed all command buffers, 731 1.1 riastrad * or until a timeout occurs. If a timeout occurs, the function will return 732 1.1 riastrad * -EBUSY. 733 1.1 riastrad */ 734 1.1 riastrad int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, 735 1.1 riastrad unsigned long timeout) 736 1.1 riastrad { 737 1.1 riastrad int ret; 738 1.1 riastrad 739 1.1 riastrad ret = vmw_cmdbuf_cur_flush(man, interruptible); 740 1.6 riastrad spin_lock(&man->lock); 741 1.1 riastrad vmw_generic_waiter_add(man->dev_priv, 742 1.1 riastrad SVGA_IRQFLAG_COMMAND_BUFFER, 743 1.1 riastrad &man->dev_priv->cmdbuf_waiters); 744 1.1 riastrad if (interruptible) { 745 1.6 riastrad DRM_SPIN_TIMED_WAIT_UNTIL(ret, &man->idle_queue, &man->lock, 746 1.6 riastrad timeout, vmw_cmdbuf_man_idle(man, true)); 747 1.1 riastrad } else { 748 1.6 riastrad DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, &man->idle_queue, 749 1.6 riastrad &man->lock, 750 1.6 riastrad timeout, vmw_cmdbuf_man_idle(man, true)); 751 1.1 riastrad } 752 1.1 riastrad vmw_generic_waiter_remove(man->dev_priv, 753 1.1 riastrad SVGA_IRQFLAG_COMMAND_BUFFER, 754 1.1 riastrad &man->dev_priv->cmdbuf_waiters); 755 1.1 riastrad if (ret == 0) { 756 1.1 riastrad if (!vmw_cmdbuf_man_idle(man, true)) 757 1.1 riastrad ret = -EBUSY; 758 1.1 riastrad else 759 1.1 riastrad ret = 0; 760 1.1 riastrad } 761 1.6 riastrad spin_unlock(&man->lock); 762 1.1 riastrad if (ret > 0) 763 1.1 riastrad ret = 0; 764 1.1 riastrad 765 1.1 riastrad return ret; 766 1.1 riastrad } 767 1.1 riastrad 768 1.1 riastrad /** 769 1.1 riastrad * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool. 770 1.1 riastrad * 771 1.1 riastrad * @man: The command buffer manager. 772 1.1 riastrad * @info: Allocation info. Will hold the size on entry and allocated mm node 773 1.1 riastrad * on successful return. 774 1.1 riastrad * 775 1.1 riastrad * Try to allocate buffer space from the main pool. Returns true if succeeded. 776 1.1 riastrad * If a fatal error was hit, the error code is returned in @info->ret. 777 1.1 riastrad */ 778 1.1 riastrad static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, 779 1.1 riastrad struct vmw_cmdbuf_alloc_info *info) 780 1.1 riastrad { 781 1.1 riastrad int ret; 782 1.1 riastrad 783 1.1 riastrad if (info->done) 784 1.1 riastrad return true; 785 1.3 riastrad 786 1.1 riastrad memset(info->node, 0, sizeof(*info->node)); 787 1.3 riastrad spin_lock(&man->lock); 788 1.3 riastrad ret = drm_mm_insert_node(&man->mm, info->node, info->page_size); 789 1.1 riastrad if (ret) { 790 1.1 riastrad vmw_cmdbuf_man_process(man); 791 1.3 riastrad ret = drm_mm_insert_node(&man->mm, info->node, info->page_size); 792 1.1 riastrad } 793 1.1 riastrad 794 1.3 riastrad spin_unlock(&man->lock); 795 1.1 riastrad info->done = !ret; 796 1.1 riastrad 797 1.1 riastrad return info->done; 798 1.1 riastrad } 799 1.1 riastrad 800 1.1 riastrad /** 801 1.1 riastrad * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool. 802 1.1 riastrad * 803 1.1 riastrad * @man: The command buffer manager. 804 1.1 riastrad * @node: Pointer to pre-allocated range-manager node. 805 1.1 riastrad * @size: The size of the allocation. 806 1.1 riastrad * @interruptible: Whether to sleep interruptible while waiting for space. 807 1.1 riastrad * 808 1.1 riastrad * This function allocates buffer space from the main pool, and if there is 809 1.1 riastrad * no space available ATM, it turns on IRQ handling and sleeps waiting for it to 810 1.1 riastrad * become available. 811 1.1 riastrad */ 812 1.1 riastrad static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man, 813 1.1 riastrad struct drm_mm_node *node, 814 1.1 riastrad size_t size, 815 1.1 riastrad bool interruptible) 816 1.1 riastrad { 817 1.1 riastrad struct vmw_cmdbuf_alloc_info info; 818 1.1 riastrad 819 1.1 riastrad info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT; 820 1.1 riastrad info.node = node; 821 1.1 riastrad info.done = false; 822 1.1 riastrad 823 1.1 riastrad /* 824 1.1 riastrad * To prevent starvation of large requests, only one allocating call 825 1.1 riastrad * at a time waiting for space. 826 1.1 riastrad */ 827 1.1 riastrad if (interruptible) { 828 1.1 riastrad if (mutex_lock_interruptible(&man->space_mutex)) 829 1.1 riastrad return -ERESTARTSYS; 830 1.1 riastrad } else { 831 1.1 riastrad mutex_lock(&man->space_mutex); 832 1.1 riastrad } 833 1.6 riastrad spin_lock(&man->lock); 834 1.1 riastrad 835 1.1 riastrad /* Try to allocate space without waiting. */ 836 1.1 riastrad if (vmw_cmdbuf_try_alloc(man, &info)) 837 1.1 riastrad goto out_unlock; 838 1.1 riastrad 839 1.1 riastrad vmw_generic_waiter_add(man->dev_priv, 840 1.1 riastrad SVGA_IRQFLAG_COMMAND_BUFFER, 841 1.1 riastrad &man->dev_priv->cmdbuf_waiters); 842 1.1 riastrad 843 1.1 riastrad if (interruptible) { 844 1.1 riastrad int ret; 845 1.1 riastrad 846 1.6 riastrad DRM_SPIN_WAIT_UNTIL(ret, &man->alloc_queue, &man->lock, 847 1.6 riastrad vmw_cmdbuf_try_alloc(man, &info)); 848 1.1 riastrad if (ret) { 849 1.1 riastrad vmw_generic_waiter_remove 850 1.1 riastrad (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER, 851 1.1 riastrad &man->dev_priv->cmdbuf_waiters); 852 1.6 riastrad spin_unlock(&man->lock); 853 1.1 riastrad mutex_unlock(&man->space_mutex); 854 1.1 riastrad return ret; 855 1.1 riastrad } 856 1.1 riastrad } else { 857 1.6 riastrad int ret; 858 1.6 riastrad 859 1.6 riastrad DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &man->alloc_queue, &man->lock, 860 1.6 riastrad vmw_cmdbuf_try_alloc(man, &info)); 861 1.6 riastrad BUG_ON(ret); 862 1.1 riastrad } 863 1.1 riastrad vmw_generic_waiter_remove(man->dev_priv, 864 1.1 riastrad SVGA_IRQFLAG_COMMAND_BUFFER, 865 1.1 riastrad &man->dev_priv->cmdbuf_waiters); 866 1.1 riastrad 867 1.1 riastrad out_unlock: 868 1.6 riastrad spin_unlock(&man->lock); 869 1.1 riastrad mutex_unlock(&man->space_mutex); 870 1.1 riastrad 871 1.1 riastrad return 0; 872 1.1 riastrad } 873 1.1 riastrad 874 1.1 riastrad /** 875 1.1 riastrad * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer 876 1.1 riastrad * space from the main pool. 877 1.1 riastrad * 878 1.1 riastrad * @man: The command buffer manager. 879 1.1 riastrad * @header: Pointer to the header to set up. 880 1.1 riastrad * @size: The requested size of the buffer space. 881 1.1 riastrad * @interruptible: Whether to sleep interruptible while waiting for space. 882 1.1 riastrad */ 883 1.1 riastrad static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, 884 1.1 riastrad struct vmw_cmdbuf_header *header, 885 1.1 riastrad size_t size, 886 1.1 riastrad bool interruptible) 887 1.1 riastrad { 888 1.1 riastrad SVGACBHeader *cb_hdr; 889 1.1 riastrad size_t offset; 890 1.1 riastrad int ret; 891 1.1 riastrad 892 1.1 riastrad if (!man->has_pool) 893 1.1 riastrad return -ENOMEM; 894 1.1 riastrad 895 1.1 riastrad ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible); 896 1.1 riastrad 897 1.1 riastrad if (ret) 898 1.1 riastrad return ret; 899 1.1 riastrad 900 1.3 riastrad header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL, 901 1.3 riastrad &header->handle); 902 1.1 riastrad if (!header->cb_header) { 903 1.1 riastrad ret = -ENOMEM; 904 1.1 riastrad goto out_no_cb_header; 905 1.1 riastrad } 906 1.1 riastrad 907 1.1 riastrad header->size = header->node.size << PAGE_SHIFT; 908 1.1 riastrad cb_hdr = header->cb_header; 909 1.1 riastrad offset = header->node.start << PAGE_SHIFT; 910 1.1 riastrad header->cmd = man->map + offset; 911 1.1 riastrad if (man->using_mob) { 912 1.1 riastrad cb_hdr->flags = SVGA_CB_FLAG_MOB; 913 1.1 riastrad cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; 914 1.1 riastrad cb_hdr->ptr.mob.mobOffset = offset; 915 1.1 riastrad } else { 916 1.1 riastrad cb_hdr->ptr.pa = (u64)man->handle + (u64)offset; 917 1.1 riastrad } 918 1.1 riastrad 919 1.1 riastrad return 0; 920 1.1 riastrad 921 1.1 riastrad out_no_cb_header: 922 1.3 riastrad spin_lock(&man->lock); 923 1.1 riastrad drm_mm_remove_node(&header->node); 924 1.3 riastrad spin_unlock(&man->lock); 925 1.1 riastrad 926 1.1 riastrad return ret; 927 1.1 riastrad } 928 1.1 riastrad 929 1.1 riastrad /** 930 1.1 riastrad * vmw_cmdbuf_space_inline - Set up a command buffer header with 931 1.1 riastrad * inline command buffer space. 932 1.1 riastrad * 933 1.1 riastrad * @man: The command buffer manager. 934 1.1 riastrad * @header: Pointer to the header to set up. 935 1.1 riastrad * @size: The requested size of the buffer space. 936 1.1 riastrad */ 937 1.1 riastrad static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, 938 1.1 riastrad struct vmw_cmdbuf_header *header, 939 1.1 riastrad int size) 940 1.1 riastrad { 941 1.1 riastrad struct vmw_cmdbuf_dheader *dheader; 942 1.1 riastrad SVGACBHeader *cb_hdr; 943 1.1 riastrad 944 1.1 riastrad if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE)) 945 1.1 riastrad return -ENOMEM; 946 1.1 riastrad 947 1.3 riastrad dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL, 948 1.3 riastrad &header->handle); 949 1.1 riastrad if (!dheader) 950 1.1 riastrad return -ENOMEM; 951 1.1 riastrad 952 1.1 riastrad header->inline_space = true; 953 1.1 riastrad header->size = VMW_CMDBUF_INLINE_SIZE; 954 1.1 riastrad cb_hdr = &dheader->cb_header; 955 1.1 riastrad header->cb_header = cb_hdr; 956 1.1 riastrad header->cmd = dheader->cmd; 957 1.1 riastrad cb_hdr->status = SVGA_CB_STATUS_NONE; 958 1.1 riastrad cb_hdr->flags = SVGA_CB_FLAG_NONE; 959 1.1 riastrad cb_hdr->ptr.pa = (u64)header->handle + 960 1.1 riastrad (u64)offsetof(struct vmw_cmdbuf_dheader, cmd); 961 1.1 riastrad 962 1.1 riastrad return 0; 963 1.1 riastrad } 964 1.1 riastrad 965 1.1 riastrad /** 966 1.1 riastrad * vmw_cmdbuf_alloc - Allocate a command buffer header complete with 967 1.1 riastrad * command buffer space. 968 1.1 riastrad * 969 1.1 riastrad * @man: The command buffer manager. 970 1.1 riastrad * @size: The requested size of the buffer space. 971 1.1 riastrad * @interruptible: Whether to sleep interruptible while waiting for space. 972 1.1 riastrad * @p_header: points to a header pointer to populate on successful return. 973 1.1 riastrad * 974 1.1 riastrad * Returns a pointer to command buffer space if successful. Otherwise 975 1.1 riastrad * returns an error pointer. The header pointer returned in @p_header should 976 1.1 riastrad * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit(). 977 1.1 riastrad */ 978 1.1 riastrad void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, 979 1.1 riastrad size_t size, bool interruptible, 980 1.1 riastrad struct vmw_cmdbuf_header **p_header) 981 1.1 riastrad { 982 1.1 riastrad struct vmw_cmdbuf_header *header; 983 1.1 riastrad int ret = 0; 984 1.1 riastrad 985 1.1 riastrad *p_header = NULL; 986 1.1 riastrad 987 1.1 riastrad header = kzalloc(sizeof(*header), GFP_KERNEL); 988 1.1 riastrad if (!header) 989 1.1 riastrad return ERR_PTR(-ENOMEM); 990 1.1 riastrad 991 1.1 riastrad if (size <= VMW_CMDBUF_INLINE_SIZE) 992 1.1 riastrad ret = vmw_cmdbuf_space_inline(man, header, size); 993 1.1 riastrad else 994 1.1 riastrad ret = vmw_cmdbuf_space_pool(man, header, size, interruptible); 995 1.1 riastrad 996 1.1 riastrad if (ret) { 997 1.1 riastrad kfree(header); 998 1.1 riastrad return ERR_PTR(ret); 999 1.1 riastrad } 1000 1.1 riastrad 1001 1.1 riastrad header->man = man; 1002 1.1 riastrad INIT_LIST_HEAD(&header->list); 1003 1.1 riastrad header->cb_header->status = SVGA_CB_STATUS_NONE; 1004 1.1 riastrad *p_header = header; 1005 1.1 riastrad 1006 1.1 riastrad return header->cmd; 1007 1.1 riastrad } 1008 1.1 riastrad 1009 1.1 riastrad /** 1010 1.1 riastrad * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current 1011 1.1 riastrad * command buffer. 1012 1.1 riastrad * 1013 1.1 riastrad * @man: The command buffer manager. 1014 1.1 riastrad * @size: The requested size of the commands. 1015 1.1 riastrad * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID. 1016 1.1 riastrad * @interruptible: Whether to sleep interruptible while waiting for space. 1017 1.1 riastrad * 1018 1.1 riastrad * Returns a pointer to command buffer space if successful. Otherwise 1019 1.1 riastrad * returns an error pointer. 1020 1.1 riastrad */ 1021 1.1 riastrad static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man, 1022 1.1 riastrad size_t size, 1023 1.1 riastrad int ctx_id, 1024 1.1 riastrad bool interruptible) 1025 1.1 riastrad { 1026 1.1 riastrad struct vmw_cmdbuf_header *cur; 1027 1.1 riastrad void *ret; 1028 1.1 riastrad 1029 1.1 riastrad if (vmw_cmdbuf_cur_lock(man, interruptible)) 1030 1.1 riastrad return ERR_PTR(-ERESTARTSYS); 1031 1.1 riastrad 1032 1.1 riastrad cur = man->cur; 1033 1.1 riastrad if (cur && (size + man->cur_pos > cur->size || 1034 1.1 riastrad ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) && 1035 1.1 riastrad ctx_id != cur->cb_header->dxContext))) 1036 1.1 riastrad __vmw_cmdbuf_cur_flush(man); 1037 1.1 riastrad 1038 1.1 riastrad if (!man->cur) { 1039 1.1 riastrad ret = vmw_cmdbuf_alloc(man, 1040 1.1 riastrad max_t(size_t, size, man->default_size), 1041 1.1 riastrad interruptible, &man->cur); 1042 1.1 riastrad if (IS_ERR(ret)) { 1043 1.1 riastrad vmw_cmdbuf_cur_unlock(man); 1044 1.1 riastrad return ret; 1045 1.1 riastrad } 1046 1.1 riastrad 1047 1.1 riastrad cur = man->cur; 1048 1.1 riastrad } 1049 1.1 riastrad 1050 1.1 riastrad if (ctx_id != SVGA3D_INVALID_ID) { 1051 1.1 riastrad cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT; 1052 1.1 riastrad cur->cb_header->dxContext = ctx_id; 1053 1.1 riastrad } 1054 1.1 riastrad 1055 1.1 riastrad cur->reserved = size; 1056 1.1 riastrad 1057 1.1 riastrad return (void *) (man->cur->cmd + man->cur_pos); 1058 1.1 riastrad } 1059 1.1 riastrad 1060 1.1 riastrad /** 1061 1.1 riastrad * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer. 1062 1.1 riastrad * 1063 1.1 riastrad * @man: The command buffer manager. 1064 1.1 riastrad * @size: The size of the commands actually written. 1065 1.1 riastrad * @flush: Whether to flush the command buffer immediately. 1066 1.1 riastrad */ 1067 1.1 riastrad static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man, 1068 1.1 riastrad size_t size, bool flush) 1069 1.1 riastrad { 1070 1.1 riastrad struct vmw_cmdbuf_header *cur = man->cur; 1071 1.1 riastrad 1072 1.3 riastrad lockdep_assert_held_once(&man->cur_mutex); 1073 1.1 riastrad 1074 1.1 riastrad WARN_ON(size > cur->reserved); 1075 1.1 riastrad man->cur_pos += size; 1076 1.1 riastrad if (!size) 1077 1.1 riastrad cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT; 1078 1.1 riastrad if (flush) 1079 1.1 riastrad __vmw_cmdbuf_cur_flush(man); 1080 1.1 riastrad vmw_cmdbuf_cur_unlock(man); 1081 1.1 riastrad } 1082 1.1 riastrad 1083 1.1 riastrad /** 1084 1.1 riastrad * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer. 1085 1.1 riastrad * 1086 1.1 riastrad * @man: The command buffer manager. 1087 1.1 riastrad * @size: The requested size of the commands. 1088 1.1 riastrad * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID. 1089 1.1 riastrad * @interruptible: Whether to sleep interruptible while waiting for space. 1090 1.1 riastrad * @header: Header of the command buffer. NULL if the current command buffer 1091 1.1 riastrad * should be used. 1092 1.1 riastrad * 1093 1.1 riastrad * Returns a pointer to command buffer space if successful. Otherwise 1094 1.1 riastrad * returns an error pointer. 1095 1.1 riastrad */ 1096 1.1 riastrad void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, 1097 1.1 riastrad int ctx_id, bool interruptible, 1098 1.1 riastrad struct vmw_cmdbuf_header *header) 1099 1.1 riastrad { 1100 1.1 riastrad if (!header) 1101 1.1 riastrad return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible); 1102 1.1 riastrad 1103 1.1 riastrad if (size > header->size) 1104 1.1 riastrad return ERR_PTR(-EINVAL); 1105 1.1 riastrad 1106 1.1 riastrad if (ctx_id != SVGA3D_INVALID_ID) { 1107 1.1 riastrad header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT; 1108 1.1 riastrad header->cb_header->dxContext = ctx_id; 1109 1.1 riastrad } 1110 1.1 riastrad 1111 1.1 riastrad header->reserved = size; 1112 1.1 riastrad return header->cmd; 1113 1.1 riastrad } 1114 1.1 riastrad 1115 1.1 riastrad /** 1116 1.1 riastrad * vmw_cmdbuf_commit - Commit commands in a command buffer. 1117 1.1 riastrad * 1118 1.1 riastrad * @man: The command buffer manager. 1119 1.1 riastrad * @size: The size of the commands actually written. 1120 1.1 riastrad * @header: Header of the command buffer. NULL if the current command buffer 1121 1.1 riastrad * should be used. 1122 1.1 riastrad * @flush: Whether to flush the command buffer immediately. 1123 1.1 riastrad */ 1124 1.1 riastrad void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, 1125 1.1 riastrad struct vmw_cmdbuf_header *header, bool flush) 1126 1.1 riastrad { 1127 1.1 riastrad if (!header) { 1128 1.1 riastrad vmw_cmdbuf_commit_cur(man, size, flush); 1129 1.1 riastrad return; 1130 1.1 riastrad } 1131 1.1 riastrad 1132 1.1 riastrad (void) vmw_cmdbuf_cur_lock(man, false); 1133 1.1 riastrad __vmw_cmdbuf_cur_flush(man); 1134 1.1 riastrad WARN_ON(size > header->reserved); 1135 1.1 riastrad man->cur = header; 1136 1.1 riastrad man->cur_pos = size; 1137 1.1 riastrad if (!size) 1138 1.1 riastrad header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT; 1139 1.1 riastrad if (flush) 1140 1.1 riastrad __vmw_cmdbuf_cur_flush(man); 1141 1.1 riastrad vmw_cmdbuf_cur_unlock(man); 1142 1.1 riastrad } 1143 1.1 riastrad 1144 1.1 riastrad 1145 1.1 riastrad /** 1146 1.1 riastrad * vmw_cmdbuf_send_device_command - Send a command through the device context. 1147 1.1 riastrad * 1148 1.1 riastrad * @man: The command buffer manager. 1149 1.1 riastrad * @command: Pointer to the command to send. 1150 1.1 riastrad * @size: Size of the command. 1151 1.1 riastrad * 1152 1.1 riastrad * Synchronously sends a device context command. 1153 1.1 riastrad */ 1154 1.1 riastrad static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man, 1155 1.1 riastrad const void *command, 1156 1.1 riastrad size_t size) 1157 1.1 riastrad { 1158 1.1 riastrad struct vmw_cmdbuf_header *header; 1159 1.1 riastrad int status; 1160 1.1 riastrad void *cmd = vmw_cmdbuf_alloc(man, size, false, &header); 1161 1.1 riastrad 1162 1.1 riastrad if (IS_ERR(cmd)) 1163 1.1 riastrad return PTR_ERR(cmd); 1164 1.1 riastrad 1165 1.1 riastrad memcpy(cmd, command, size); 1166 1.1 riastrad header->cb_header->length = size; 1167 1.1 riastrad header->cb_context = SVGA_CB_CONTEXT_DEVICE; 1168 1.3 riastrad spin_lock(&man->lock); 1169 1.1 riastrad status = vmw_cmdbuf_header_submit(header); 1170 1.3 riastrad spin_unlock(&man->lock); 1171 1.1 riastrad vmw_cmdbuf_header_free(header); 1172 1.1 riastrad 1173 1.1 riastrad if (status != SVGA_CB_STATUS_COMPLETED) { 1174 1.1 riastrad DRM_ERROR("Device context command failed with status %d\n", 1175 1.1 riastrad status); 1176 1.1 riastrad return -EINVAL; 1177 1.1 riastrad } 1178 1.1 riastrad 1179 1.1 riastrad return 0; 1180 1.1 riastrad } 1181 1.1 riastrad 1182 1.1 riastrad /** 1183 1.3 riastrad * vmw_cmdbuf_preempt - Send a preempt command through the device 1184 1.3 riastrad * context. 1185 1.3 riastrad * 1186 1.3 riastrad * @man: The command buffer manager. 1187 1.3 riastrad * 1188 1.3 riastrad * Synchronously sends a preempt command. 1189 1.3 riastrad */ 1190 1.3 riastrad static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context) 1191 1.3 riastrad { 1192 1.3 riastrad struct { 1193 1.3 riastrad uint32 id; 1194 1.3 riastrad SVGADCCmdPreempt body; 1195 1.3 riastrad } __packed cmd; 1196 1.3 riastrad 1197 1.3 riastrad cmd.id = SVGA_DC_CMD_PREEMPT; 1198 1.3 riastrad cmd.body.context = SVGA_CB_CONTEXT_0 + context; 1199 1.3 riastrad cmd.body.ignoreIDZero = 0; 1200 1.3 riastrad 1201 1.3 riastrad return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); 1202 1.3 riastrad } 1203 1.3 riastrad 1204 1.3 riastrad 1205 1.3 riastrad /** 1206 1.1 riastrad * vmw_cmdbuf_startstop - Send a start / stop command through the device 1207 1.1 riastrad * context. 1208 1.1 riastrad * 1209 1.1 riastrad * @man: The command buffer manager. 1210 1.1 riastrad * @enable: Whether to enable or disable the context. 1211 1.1 riastrad * 1212 1.1 riastrad * Synchronously sends a device start / stop context command. 1213 1.1 riastrad */ 1214 1.3 riastrad static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, 1215 1.1 riastrad bool enable) 1216 1.1 riastrad { 1217 1.1 riastrad struct { 1218 1.1 riastrad uint32 id; 1219 1.1 riastrad SVGADCCmdStartStop body; 1220 1.1 riastrad } __packed cmd; 1221 1.1 riastrad 1222 1.1 riastrad cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT; 1223 1.1 riastrad cmd.body.enable = (enable) ? 1 : 0; 1224 1.3 riastrad cmd.body.context = SVGA_CB_CONTEXT_0 + context; 1225 1.1 riastrad 1226 1.1 riastrad return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); 1227 1.1 riastrad } 1228 1.1 riastrad 1229 1.1 riastrad /** 1230 1.1 riastrad * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes 1231 1.1 riastrad * 1232 1.1 riastrad * @man: The command buffer manager. 1233 1.1 riastrad * @size: The size of the main space pool. 1234 1.1 riastrad * @default_size: The default size of the command buffer for small kernel 1235 1.1 riastrad * submissions. 1236 1.1 riastrad * 1237 1.1 riastrad * Set the size and allocate the main command buffer space pool, 1238 1.1 riastrad * as well as the default size of the command buffer for 1239 1.1 riastrad * small kernel submissions. If successful, this enables large command 1240 1.1 riastrad * submissions. Note that this function requires that rudimentary command 1241 1.1 riastrad * submission is already available and that the MOB memory manager is alive. 1242 1.1 riastrad * Returns 0 on success. Negative error code on failure. 1243 1.1 riastrad */ 1244 1.1 riastrad int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, 1245 1.1 riastrad size_t size, size_t default_size) 1246 1.1 riastrad { 1247 1.1 riastrad struct vmw_private *dev_priv = man->dev_priv; 1248 1.1 riastrad bool dummy; 1249 1.1 riastrad int ret; 1250 1.1 riastrad 1251 1.1 riastrad if (man->has_pool) 1252 1.1 riastrad return -EINVAL; 1253 1.1 riastrad 1254 1.1 riastrad /* First, try to allocate a huge chunk of DMA memory */ 1255 1.1 riastrad size = PAGE_ALIGN(size); 1256 1.5 riastrad #ifdef __NetBSD__ 1257 1.5 riastrad int error, nseg, alloced = 0, mapped = 0, loaded = 0; 1258 1.5 riastrad 1259 1.5 riastrad do { 1260 1.5 riastrad error = bus_dmamap_create(dev_priv->dev->dmat, size, 1, size, 1261 1.5 riastrad 0, BUS_DMA_ALLOCNOW|BUS_DMA_WAITOK, &man->dmamap); 1262 1.5 riastrad if (error) 1263 1.5 riastrad break; 1264 1.5 riastrad error = bus_dmamem_alloc(dev_priv->dev->dmat, size, 1, 0, 1265 1.5 riastrad &man->dmaseg, 1, &nseg, BUS_DMA_WAITOK); 1266 1.5 riastrad if (error) 1267 1.5 riastrad break; 1268 1.5 riastrad KASSERT(nseg == 1); 1269 1.5 riastrad alloced = 1; 1270 1.5 riastrad error = bus_dmamem_map(dev_priv->dev->dmat, &man->dmaseg, 1, 1271 1.5 riastrad size, (void *)&man->map, BUS_DMA_COHERENT|BUS_DMA_WAITOK); 1272 1.5 riastrad if (error) 1273 1.5 riastrad break; 1274 1.5 riastrad mapped = 1; 1275 1.5 riastrad error = bus_dmamap_load(dev_priv->dev->dmat, man->dmamap, 1276 1.5 riastrad man->map, size, NULL, BUS_DMA_WAITOK); 1277 1.5 riastrad if (error) 1278 1.5 riastrad break; 1279 1.5 riastrad loaded = 1; 1280 1.5 riastrad } while (0); 1281 1.5 riastrad if (error) { 1282 1.5 riastrad if (loaded) 1283 1.5 riastrad bus_dmamap_unload(dev_priv->dev->dmat, man->dmamap); 1284 1.5 riastrad if (mapped) 1285 1.5 riastrad bus_dmamem_unmap(dev_priv->dev->dmat, man->map, size); 1286 1.5 riastrad if (alloced) 1287 1.5 riastrad bus_dmamem_free(dev_priv->dev->dmat, &man->dmaseg, 1); 1288 1.5 riastrad if (man->dmamap) 1289 1.5 riastrad bus_dmamap_destroy(dev_priv->dev->dmat, man->dmamap); 1290 1.5 riastrad man->map = NULL; 1291 1.5 riastrad } 1292 1.5 riastrad #else 1293 1.1 riastrad man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size, 1294 1.1 riastrad &man->handle, GFP_KERNEL); 1295 1.5 riastrad #endif 1296 1.1 riastrad if (man->map) { 1297 1.1 riastrad man->using_mob = false; 1298 1.1 riastrad } else { 1299 1.1 riastrad /* 1300 1.1 riastrad * DMA memory failed. If we can have command buffers in a 1301 1.1 riastrad * MOB, try to use that instead. Note that this will 1302 1.1 riastrad * actually call into the already enabled manager, when 1303 1.1 riastrad * binding the MOB. 1304 1.1 riastrad */ 1305 1.1 riastrad if (!(dev_priv->capabilities & SVGA_CAP_DX)) 1306 1.1 riastrad return -ENOMEM; 1307 1.1 riastrad 1308 1.1 riastrad ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device, 1309 1.3 riastrad &vmw_mob_ne_placement, 0, false, 1310 1.1 riastrad &man->cmd_space); 1311 1.1 riastrad if (ret) 1312 1.1 riastrad return ret; 1313 1.1 riastrad 1314 1.1 riastrad man->using_mob = true; 1315 1.1 riastrad ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT, 1316 1.1 riastrad &man->map_obj); 1317 1.1 riastrad if (ret) 1318 1.1 riastrad goto out_no_map; 1319 1.1 riastrad 1320 1.1 riastrad man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy); 1321 1.1 riastrad } 1322 1.1 riastrad 1323 1.1 riastrad man->size = size; 1324 1.1 riastrad drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT); 1325 1.1 riastrad 1326 1.1 riastrad man->has_pool = true; 1327 1.1 riastrad 1328 1.1 riastrad /* 1329 1.1 riastrad * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to 1330 1.1 riastrad * prevent deadlocks from happening when vmw_cmdbuf_space_pool() 1331 1.1 riastrad * needs to wait for space and we block on further command 1332 1.1 riastrad * submissions to be able to free up space. 1333 1.1 riastrad */ 1334 1.1 riastrad man->default_size = VMW_CMDBUF_INLINE_SIZE; 1335 1.1 riastrad DRM_INFO("Using command buffers with %s pool.\n", 1336 1.1 riastrad (man->using_mob) ? "MOB" : "DMA"); 1337 1.1 riastrad 1338 1.1 riastrad return 0; 1339 1.1 riastrad 1340 1.1 riastrad out_no_map: 1341 1.3 riastrad if (man->using_mob) { 1342 1.3 riastrad ttm_bo_put(man->cmd_space); 1343 1.3 riastrad man->cmd_space = NULL; 1344 1.3 riastrad } 1345 1.1 riastrad 1346 1.1 riastrad return ret; 1347 1.1 riastrad } 1348 1.1 riastrad 1349 1.1 riastrad /** 1350 1.1 riastrad * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for 1351 1.1 riastrad * inline command buffer submissions only. 1352 1.1 riastrad * 1353 1.1 riastrad * @dev_priv: Pointer to device private structure. 1354 1.1 riastrad * 1355 1.1 riastrad * Returns a pointer to a cummand buffer manager to success or error pointer 1356 1.1 riastrad * on failure. The command buffer manager will be enabled for submissions of 1357 1.1 riastrad * size VMW_CMDBUF_INLINE_SIZE only. 1358 1.1 riastrad */ 1359 1.1 riastrad struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) 1360 1.1 riastrad { 1361 1.1 riastrad struct vmw_cmdbuf_man *man; 1362 1.1 riastrad struct vmw_cmdbuf_context *ctx; 1363 1.3 riastrad unsigned int i; 1364 1.1 riastrad int ret; 1365 1.1 riastrad 1366 1.1 riastrad if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS)) 1367 1.1 riastrad return ERR_PTR(-ENOSYS); 1368 1.1 riastrad 1369 1.1 riastrad man = kzalloc(sizeof(*man), GFP_KERNEL); 1370 1.1 riastrad if (!man) 1371 1.1 riastrad return ERR_PTR(-ENOMEM); 1372 1.1 riastrad 1373 1.3 riastrad man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ? 1374 1.3 riastrad 2 : 1; 1375 1.1 riastrad man->headers = dma_pool_create("vmwgfx cmdbuf", 1376 1.7 riastrad #ifdef __NetBSD__ 1377 1.7 riastrad dev_priv->dev->dmat, 1378 1.7 riastrad #else 1379 1.1 riastrad &dev_priv->dev->pdev->dev, 1380 1.7 riastrad #endif 1381 1.1 riastrad sizeof(SVGACBHeader), 1382 1.1 riastrad 64, PAGE_SIZE); 1383 1.1 riastrad if (!man->headers) { 1384 1.1 riastrad ret = -ENOMEM; 1385 1.1 riastrad goto out_no_pool; 1386 1.1 riastrad } 1387 1.1 riastrad 1388 1.1 riastrad man->dheaders = dma_pool_create("vmwgfx inline cmdbuf", 1389 1.7 riastrad #ifdef __NetBSD__ 1390 1.7 riastrad dev_priv->dev->dmat, 1391 1.7 riastrad #else 1392 1.1 riastrad &dev_priv->dev->pdev->dev, 1393 1.7 riastrad #endif 1394 1.1 riastrad sizeof(struct vmw_cmdbuf_dheader), 1395 1.1 riastrad 64, PAGE_SIZE); 1396 1.1 riastrad if (!man->dheaders) { 1397 1.1 riastrad ret = -ENOMEM; 1398 1.1 riastrad goto out_no_dpool; 1399 1.1 riastrad } 1400 1.1 riastrad 1401 1.1 riastrad for_each_cmdbuf_ctx(man, i, ctx) 1402 1.1 riastrad vmw_cmdbuf_ctx_init(ctx); 1403 1.1 riastrad 1404 1.1 riastrad INIT_LIST_HEAD(&man->error); 1405 1.1 riastrad spin_lock_init(&man->lock); 1406 1.1 riastrad mutex_init(&man->cur_mutex); 1407 1.1 riastrad mutex_init(&man->space_mutex); 1408 1.3 riastrad mutex_init(&man->error_mutex); 1409 1.1 riastrad man->default_size = VMW_CMDBUF_INLINE_SIZE; 1410 1.6 riastrad DRM_INIT_WAITQUEUE(&man->alloc_queue, "vmwgfxaq"); 1411 1.6 riastrad DRM_INIT_WAITQUEUE(&man->idle_queue, "vmwgfxiq"); 1412 1.1 riastrad man->dev_priv = dev_priv; 1413 1.1 riastrad man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1; 1414 1.1 riastrad INIT_WORK(&man->work, &vmw_cmdbuf_work_func); 1415 1.1 riastrad vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR, 1416 1.1 riastrad &dev_priv->error_waiters); 1417 1.3 riastrad ret = vmw_cmdbuf_startstop(man, 0, true); 1418 1.1 riastrad if (ret) { 1419 1.3 riastrad DRM_ERROR("Failed starting command buffer contexts\n"); 1420 1.1 riastrad vmw_cmdbuf_man_destroy(man); 1421 1.1 riastrad return ERR_PTR(ret); 1422 1.1 riastrad } 1423 1.1 riastrad 1424 1.1 riastrad return man; 1425 1.1 riastrad 1426 1.1 riastrad out_no_dpool: 1427 1.1 riastrad dma_pool_destroy(man->headers); 1428 1.1 riastrad out_no_pool: 1429 1.1 riastrad kfree(man); 1430 1.1 riastrad 1431 1.1 riastrad return ERR_PTR(ret); 1432 1.1 riastrad } 1433 1.1 riastrad 1434 1.1 riastrad /** 1435 1.1 riastrad * vmw_cmdbuf_remove_pool - Take down the main buffer space pool. 1436 1.1 riastrad * 1437 1.1 riastrad * @man: Pointer to a command buffer manager. 1438 1.1 riastrad * 1439 1.1 riastrad * This function removes the main buffer space pool, and should be called 1440 1.1 riastrad * before MOB memory management is removed. When this function has been called, 1441 1.1 riastrad * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or 1442 1.1 riastrad * less are allowed, and the default size of the command buffer for small kernel 1443 1.1 riastrad * submissions is also set to this size. 1444 1.1 riastrad */ 1445 1.1 riastrad void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man) 1446 1.1 riastrad { 1447 1.1 riastrad if (!man->has_pool) 1448 1.1 riastrad return; 1449 1.1 riastrad 1450 1.1 riastrad man->has_pool = false; 1451 1.1 riastrad man->default_size = VMW_CMDBUF_INLINE_SIZE; 1452 1.1 riastrad (void) vmw_cmdbuf_idle(man, false, 10*HZ); 1453 1.1 riastrad if (man->using_mob) { 1454 1.1 riastrad (void) ttm_bo_kunmap(&man->map_obj); 1455 1.3 riastrad ttm_bo_put(man->cmd_space); 1456 1.3 riastrad man->cmd_space = NULL; 1457 1.1 riastrad } else { 1458 1.5 riastrad #ifdef __NetBSD__ 1459 1.5 riastrad const bus_dma_tag_t dmat = man->dev_priv->dev->dmat; 1460 1.5 riastrad bus_dmamap_unload(dmat, man->dmamap); 1461 1.5 riastrad bus_dmamem_unmap(dmat, man->map, man->size); 1462 1.5 riastrad bus_dmamem_free(dmat, &man->dmaseg, 1); 1463 1.5 riastrad bus_dmamap_destroy(dmat, man->dmamap); 1464 1.5 riastrad #else 1465 1.1 riastrad dma_free_coherent(&man->dev_priv->dev->pdev->dev, 1466 1.1 riastrad man->size, man->map, man->handle); 1467 1.5 riastrad #endif 1468 1.1 riastrad } 1469 1.1 riastrad } 1470 1.1 riastrad 1471 1.1 riastrad /** 1472 1.1 riastrad * vmw_cmdbuf_man_destroy - Take down a command buffer manager. 1473 1.1 riastrad * 1474 1.1 riastrad * @man: Pointer to a command buffer manager. 1475 1.1 riastrad * 1476 1.1 riastrad * This function idles and then destroys a command buffer manager. 1477 1.1 riastrad */ 1478 1.1 riastrad void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man) 1479 1.1 riastrad { 1480 1.1 riastrad WARN_ON_ONCE(man->has_pool); 1481 1.1 riastrad (void) vmw_cmdbuf_idle(man, false, 10*HZ); 1482 1.3 riastrad 1483 1.3 riastrad if (vmw_cmdbuf_startstop(man, 0, false)) 1484 1.3 riastrad DRM_ERROR("Failed stopping command buffer contexts.\n"); 1485 1.1 riastrad 1486 1.1 riastrad vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR, 1487 1.1 riastrad &man->dev_priv->error_waiters); 1488 1.1 riastrad (void) cancel_work_sync(&man->work); 1489 1.1 riastrad dma_pool_destroy(man->dheaders); 1490 1.1 riastrad dma_pool_destroy(man->headers); 1491 1.6 riastrad DRM_DESTROY_WAITQUEUE(&man->idle_queue); 1492 1.6 riastrad DRM_DESTROY_WAITQUEUE(&man->alloc_queue); 1493 1.1 riastrad mutex_destroy(&man->cur_mutex); 1494 1.1 riastrad mutex_destroy(&man->space_mutex); 1495 1.3 riastrad mutex_destroy(&man->error_mutex); 1496 1.6 riastrad spin_lock_destroy(&man->lock); 1497 1.1 riastrad kfree(man); 1498 1.1 riastrad } 1499