Home | History | Annotate | Download | only in vmwgfx

Lines Matching defs:man

152  * @man: The command buffer manager.
165 struct vmw_cmdbuf_man *man;
207 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
209 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
214 * @man: The range manager.
217 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
220 if (mutex_lock_interruptible(&man->cur_mutex))
223 mutex_lock(&man->cur_mutex);
232 * @man: The range manager.
234 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
236 mutex_unlock(&man->cur_mutex);
255 dma_pool_free(header->man->dheaders, dheader, header->handle);
265 * For internal use. Must be called with man::lock held.
269 struct vmw_cmdbuf_man *man = header->man;
271 lockdep_assert_held_once(&man->lock);
279 DRM_SPIN_WAKEUP_ALL(&man->alloc_queue, &man->lock); /* XXX */
281 dma_pool_free(man->headers, header->cb_header,
294 struct vmw_cmdbuf_man *man = header->man;
301 spin_lock(&man->lock);
303 spin_unlock(&man->lock);
314 struct vmw_cmdbuf_man *man = header->man;
318 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
322 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
344 * @man: The command buffer manager.
350 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
353 while (ctx->num_hw_submitted < man->max_hw_submitted &&
381 * @man: The command buffer manager.
388 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
394 assert_spin_locked(&man->lock);
396 vmw_cmdbuf_ctx_submit(man, ctx);
405 DRM_SPIN_WAKEUP_ONE(&man->idle_queue, &man->lock);
414 list_add_tail(&entry->list, &man->error);
415 schedule_work(&man->work);
432 vmw_cmdbuf_ctx_submit(man, ctx);
441 * @man: The command buffer manager.
447 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
453 assert_spin_locked(&man->lock);
457 for_each_cmdbuf_ctx(man, i, ctx)
458 vmw_cmdbuf_ctx_process(man, ctx, &notempty);
460 if (man->irq_on && !notempty) {
461 vmw_generic_waiter_remove(man->dev_priv,
463 &man->dev_priv->cmdbuf_waiters);
464 man->irq_on = false;
465 } else if (!man->irq_on && notempty) {
466 vmw_generic_waiter_add(man->dev_priv,
468 &man->dev_priv->cmdbuf_waiters);
469 man->irq_on = true;
480 * @man: The command buffer manager.
487 * @man->lock needs to be held when calling this function.
489 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
496 list_add_tail(&header->list, &man->ctx[cb_context].submitted);
498 vmw_cmdbuf_man_process(man);
505 * @man: Pointer to the command buffer manager.
511 void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
513 spin_lock(&man->lock);
514 vmw_cmdbuf_man_process(man);
515 spin_unlock(&man->lock);
529 struct vmw_cmdbuf_man *man =
539 for_each_cmdbuf_ctx(man, i, ctx)
542 mutex_lock(&man->error_mutex);
543 spin_lock(&man->lock);
544 list_for_each_entry_safe(entry, next, &man->error, list) {
578 if (man->using_mob)
591 for_each_cmdbuf_ctx(man, i, ctx)
592 man->ctx[i].block_submission = true;
594 spin_unlock(&man->lock);
597 if (global_block && vmw_cmdbuf_preempt(man, 0))
600 spin_lock(&man->lock);
601 for_each_cmdbuf_ctx(man, i, ctx) {
603 vmw_cmdbuf_ctx_process(man, ctx, &dummy);
620 vmw_cmdbuf_man_process(man);
621 spin_unlock(&man->lock);
623 if (global_block && vmw_cmdbuf_startstop(man, 0, true))
628 vmw_fifo_send_fence(man->dev_priv, &dummy);
629 spin_lock(&man->lock);
630 DRM_SPIN_WAKEUP_ALL(&man->idle_queue, &man->lock);
631 spin_unlock(&man->lock);
634 mutex_unlock(&man->error_mutex);
640 * @man: The command buffer manager.
644 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
651 assert_spin_locked(&man->lock);
653 vmw_cmdbuf_man_process(man);
654 for_each_cmdbuf_ctx(man, i, ctx) {
661 idle = list_empty(&man->error);
671 * @man: The command buffer manager.
674 * is automatically allocated when needed. Call with @man->cur_mutex held.
676 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
678 struct vmw_cmdbuf_header *cur = man->cur;
680 lockdep_assert_held_once(&man->cur_mutex);
685 spin_lock(&man->lock);
686 if (man->cur_pos == 0) {
691 man->cur->cb_header->length = man->cur_pos;
692 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
694 spin_unlock(&man->lock);
695 man->cur = NULL;
696 man->cur_pos = 0;
703 * @man: The command buffer manager.
709 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
712 int ret = vmw_cmdbuf_cur_lock(man, interruptible);
717 __vmw_cmdbuf_cur_flush(man);
718 vmw_cmdbuf_cur_unlock(man);
726 * @man: The command buffer manager.
734 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
739 ret = vmw_cmdbuf_cur_flush(man, interruptible);
740 spin_lock(&man->lock);
741 vmw_generic_waiter_add(man->dev_priv,
743 &man->dev_priv->cmdbuf_waiters);
745 DRM_SPIN_TIMED_WAIT_UNTIL(ret, &man->idle_queue, &man->lock,
746 timeout, vmw_cmdbuf_man_idle(man, true));
748 DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, &man->idle_queue,
749 &man->lock,
750 timeout, vmw_cmdbuf_man_idle(man, true));
752 vmw_generic_waiter_remove(man->dev_priv,
754 &man->dev_priv->cmdbuf_waiters);
756 if (!vmw_cmdbuf_man_idle(man, true))
761 spin_unlock(&man->lock);
771 * @man: The command buffer manager.
778 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
787 spin_lock(&man->lock);
788 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
790 vmw_cmdbuf_man_process(man);
791 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
794 spin_unlock(&man->lock);
803 * @man: The command buffer manager.
812 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
828 if (mutex_lock_interruptible(&man->space_mutex))
831 mutex_lock(&man->space_mutex);
833 spin_lock(&man->lock);
836 if (vmw_cmdbuf_try_alloc(man, &info))
839 vmw_generic_waiter_add(man->dev_priv,
841 &man->dev_priv->cmdbuf_waiters);
846 DRM_SPIN_WAIT_UNTIL(ret, &man->alloc_queue, &man->lock,
847 vmw_cmdbuf_try_alloc(man, &info));
850 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
851 &man->dev_priv->cmdbuf_waiters);
852 spin_unlock(&man->lock);
853 mutex_unlock(&man->space_mutex);
859 DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &man->alloc_queue, &man->lock,
860 vmw_cmdbuf_try_alloc(man, &info));
863 vmw_generic_waiter_remove(man->dev_priv,
865 &man->dev_priv->cmdbuf_waiters);
868 spin_unlock(&man->lock);
869 mutex_unlock(&man->space_mutex);
878 * @man: The command buffer manager.
883 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
892 if (!man->has_pool)
895 ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
900 header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
910 header->cmd = man->map + offset;
911 if (man->using_mob) {
913 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
916 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
922 spin_lock(&man->lock);
924 spin_unlock(&man->lock);
933 * @man: The command buffer manager.
937 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
947 dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
969 * @man: The command buffer manager.
978 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
992 ret = vmw_cmdbuf_space_inline(man, header, size);
994 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
1001 header->man = man;
1013 * @man: The command buffer manager.
1021 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
1029 if (vmw_cmdbuf_cur_lock(man, interruptible))
1032 cur = man->cur;
1033 if (cur && (size + man->cur_pos > cur->size ||
1036 __vmw_cmdbuf_cur_flush(man);
1038 if (!man->cur) {
1039 ret = vmw_cmdbuf_alloc(man,
1040 max_t(size_t, size, man->default_size),
1041 interruptible, &man->cur);
1043 vmw_cmdbuf_cur_unlock(man);
1047 cur = man->cur;
1057 return (void *) (man->cur->cmd + man->cur_pos);
1063 * @man: The command buffer manager.
1067 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1070 struct vmw_cmdbuf_header *cur = man->cur;
1072 lockdep_assert_held_once(&man->cur_mutex);
1075 man->cur_pos += size;
1079 __vmw_cmdbuf_cur_flush(man);
1080 vmw_cmdbuf_cur_unlock(man);
1086 * @man: The command buffer manager.
1096 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1101 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1118 * @man: The command buffer manager.
1124 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1128 vmw_cmdbuf_commit_cur(man, size, flush);
1132 (void) vmw_cmdbuf_cur_lock(man, false);
1133 __vmw_cmdbuf_cur_flush(man);
1135 man->cur = header;
1136 man->cur_pos = size;
1140 __vmw_cmdbuf_cur_flush(man);
1141 vmw_cmdbuf_cur_unlock(man);
1148 * @man: The command buffer manager.
1154 man,
1160 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1168 spin_lock(&man->lock);
1170 spin_unlock(&man->lock);
1186 * @man: The command buffer manager.
1190 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1201 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1209 * @man: The command buffer manager.
1214 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1226 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1232 * @man: The command buffer manager.
1244 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1247 struct vmw_private *dev_priv = man->dev_priv;
1251 if (man->has_pool)
1261 0, BUS_DMA_ALLOCNOW|BUS_DMA_WAITOK, &man->dmamap);
1265 &man->dmaseg, 1, &nseg, BUS_DMA_WAITOK);
1270 error = bus_dmamem_map(dev_priv->dev->dmat, &man->dmaseg, 1,
1271 size, (void *)&man->map, BUS_DMA_COHERENT|BUS_DMA_WAITOK);
1275 error = bus_dmamap_load(dev_priv->dev->dmat, man->dmamap,
1276 man->map, size, NULL, BUS_DMA_WAITOK);
1283 bus_dmamap_unload(dev_priv->dev->dmat, man->dmamap);
1285 bus_dmamem_unmap(dev_priv->dev->dmat, man->map, size);
1287 bus_dmamem_free(dev_priv->dev->dmat, &man->dmaseg, 1);
1288 if (man->dmamap)
1289 bus_dmamap_destroy(dev_priv->dev->dmat, man->dmamap);
1290 man->map = NULL;
1293 man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1294 &man->handle, GFP_KERNEL);
1296 if (man->map) {
1297 man->using_mob = false;
1310 &man->cmd_space);
1314 man->using_mob = true;
1315 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1316 &man->map_obj);
1320 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1323 man->size = size;
1324 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1326 man->has_pool = true;
1334 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1336 (man->using_mob) ? "MOB" : "DMA");
1341 if (man->using_mob) {
1342 ttm_bo_put(man->cmd_space);
1343 man->cmd_space = NULL;
1361 struct vmw_cmdbuf_man *man;
1369 man = kzalloc(sizeof(*man), GFP_KERNEL);
1370 if (!man)
1373 man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1375 man->headers = dma_pool_create("vmwgfx cmdbuf",
1383 if (!man->headers) {
1388 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1396 if (!man->dheaders) {
1401 for_each_cmdbuf_ctx(man, i, ctx)
1404 INIT_LIST_HEAD(&man->error);
1405 spin_lock_init(&man->lock);
1406 mutex_init(&man->cur_mutex);
1407 mutex_init(&man->space_mutex);
1408 mutex_init(&man->error_mutex);
1409 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1410 DRM_INIT_WAITQUEUE(&man->alloc_queue, "vmwgfxaq");
1411 DRM_INIT_WAITQUEUE(&man->idle_queue, "vmwgfxiq");
1412 man->dev_priv = dev_priv;
1413 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1414 INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1417 ret = vmw_cmdbuf_startstop(man, 0, true);
1420 vmw_cmdbuf_man_destroy(man);
1424 return man;
1427 dma_pool_destroy(man->headers);
1429 kfree(man);
1437 * @man: Pointer to a command buffer manager.
1445 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1447 if (!man->has_pool)
1450 man->has_pool = false;
1451 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1452 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1453 if (man->using_mob) {
1454 (void) ttm_bo_kunmap(&man->map_obj);
1455 ttm_bo_put(man->cmd_space);
1456 man->cmd_space = NULL;
1459 const bus_dma_tag_t dmat = man->dev_priv->dev->dmat;
1460 bus_dmamap_unload(dmat, man->dmamap);
1461 bus_dmamem_unmap(dmat, man->map, man->size);
1462 bus_dmamem_free(dmat, &man->dmaseg, 1);
1463 bus_dmamap_destroy(dmat, man->dmamap);
1465 dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1466 man->size, man->map, man->handle);
1474 * @man: Pointer to a command buffer manager.
1478 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1480 WARN_ON_ONCE(man->has_pool);
1481 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1483 if (vmw_cmdbuf_startstop(man, 0, false))
1486 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1487 &man->dev_priv->error_waiters);
1488 (void) cancel_work_sync(&man->work);
1489 dma_pool_destroy(man->dheaders);
1490 dma_pool_destroy(man->headers);
1491 DRM_DESTROY_WAITQUEUE(&man->idle_queue);
1492 DRM_DESTROY_WAITQUEUE(&man->alloc_queue);
1493 mutex_destroy(&man->cur_mutex);
1494 mutex_destroy(&man->space_mutex);
1495 mutex_destroy(&man->error_mutex);
1496 spin_lock_destroy(&man->lock);
1497 kfree(man);