Lines Matching refs:rq

292 	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
293 bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
295 assert(bo->rq == NULL);
632 DBG(("%s: retiring bo handle=%d (needed flush? %d), rq? %d [busy?=%d]\n",
633 __FUNCTION__, bo->handle, bo->needs_flush, bo->rq != NULL,
638 if (bo->rq) {
652 DBG(("%s: retiring bo handle=%d (needed flush? %d), rq? %d [busy?=%d]\n",
653 __FUNCTION__, bo->handle, bo->needs_flush, bo->rq != NULL,
658 if (bo->rq) {
871 struct kgem_request *rq;
873 rq = __kgem_freed_request;
874 if (rq) {
875 __kgem_freed_request = *(struct kgem_request **)rq;
877 rq = malloc(sizeof(*rq));
878 if (rq == NULL)
879 rq = &kgem->static_request;
882 list_init(&rq->buffers);
883 rq->bo = NULL;
884 rq->ring = 0;
886 return rq;
889 static void __kgem_request_free(struct kgem_request *rq)
891 _list_del(&rq->list);
893 free(rq);
895 *(struct kgem_request **)rq = __kgem_freed_request;
896 __kgem_freed_request = rq;
2037 bo->rq = MAKE_REQUEST(kgem->next_request, kgem->ring);
2085 assert(!bo->snoop || bo->rq == NULL);
2096 assert(bo->rq == NULL);
2145 assert(bo->rq == NULL);
2223 assert(bo->rq == NULL);
2238 assert(bo->rq != NULL);
2239 if (RQ(bo->rq) == (void *)kgem) {
2302 __FUNCTION__, bo->handle, bo->delta, bo->rq != NULL));
2303 if (bo->rq)
2334 assert(bo->rq == NULL);
2390 assert(bo->rq == NULL);
2435 assert(RQ(bo->rq) == kgem->next_request);
2469 assert(RQ(a->rq) == kgem->next_request);
2472 assert(RQ(b->rq) == kgem->next_request);
2508 if (bo->exec == NULL && bo->rq && !__kgem_busy(kgem, bo->handle))
2510 if (bo->rq == NULL)
2541 if (bo->rq && bo->exec == NULL && !__kgem_busy(kgem, bo->handle))
2544 if (bo->rq) {
2613 __FUNCTION__, bo->base.handle, bo->base.rq != NULL, bo->base.exec != NULL));
2615 assert(bo->base.exec == NULL || RQ(bo->base.rq) == kgem->next_request);
2616 if (bo->base.rq)
2633 assert(RQ(bo->rq) == (void *)kgem);
2660 static bool __kgem_retire_rq(struct kgem *kgem, struct kgem_request *rq)
2665 __FUNCTION__, rq->bo->handle));
2666 assert(RQ(rq->bo->rq) == rq);
2668 if (rq == kgem->fence[rq->ring])
2669 kgem->fence[rq->ring] = NULL;
2671 while (!list_is_empty(&rq->buffers)) {
2674 bo = list_first_entry(&rq->buffers,
2678 assert(RQ(bo->rq) == rq);
2690 bo->rq = MAKE_REQUEST(kgem, RQ_RING(bo->rq));
2697 bo->rq = NULL;
2704 assert(rq->bo->rq == NULL);
2705 assert(rq->bo->exec == NULL);
2706 assert(list_is_empty(&rq->bo->request));
2707 assert(rq->bo->refcnt > 0);
2709 if (--rq->bo->refcnt == 0) {
2710 if (kgem_bo_set_purgeable(kgem, rq->bo)) {
2711 kgem_bo_move_to_inactive(kgem, rq->bo);
2715 __FUNCTION__, rq->bo->handle));
2716 kgem_bo_free(kgem, rq->bo);
2720 __kgem_request_free(rq);
2729 struct kgem_request *rq;
2731 rq = list_first_entry(&kgem->requests[ring],
2734 assert(rq->ring == ring);
2735 if (__kgem_busy(kgem, rq->bo->handle))
2738 retired |= __kgem_retire_rq(kgem, rq);
2797 struct kgem_request *rq;
2802 rq = kgem->fence[ring];
2803 if (rq) {
2806 if (__kgem_busy(kgem, rq->bo->handle)) {
2808 __FUNCTION__, rq->bo->handle));
2818 } while (tmp != rq);
2825 rq = list_last_entry(&kgem->requests[ring],
2827 assert(rq->ring == ring);
2828 if (__kgem_busy(kgem, rq->bo->handle)) {
2830 __FUNCTION__, rq->bo->handle));
2831 kgem->fence[ring] = rq;
2836 __FUNCTION__, ring, rq->bo->handle));
2839 rq = list_first_entry(&kgem->requests[ring],
2842 assert(rq->ring == ring);
2843 __kgem_retire_rq(kgem, rq);
2851 struct kgem_request *rq = bo->rq, *tmp;
2852 struct list *requests = &kgem->requests[RQ_RING(rq) == I915_EXEC_BLT];
2854 rq = RQ(rq);
2855 assert(rq != &kgem->static_request);
2856 if (rq == (struct kgem_request *)kgem) {
2863 assert(tmp->ring == rq->ring);
2865 } while (tmp != rq);
2871 struct kgem_request *rq = kgem->next_request;
2877 list_for_each_entry(bo, &rq->buffers, request) {
2880 gem_read(kgem->fd, rq->bo->handle, &value, kgem->reloc[i].offset, has_64bit ? 8 : 4);
2905 struct kgem_request *rq = kgem->next_request;
2910 list_for_each_entry_safe(bo, next, &rq->buffers, request) {
2920 assert(RQ(bo->rq) == rq || (RQ(bo->proxy->rq) == rq));
2945 if (rq == &kgem->static_request) {
2951 set_domain.handle = rq->bo->handle;
2960 assert(list_is_empty(&rq->buffers));
2962 assert(rq->bo->map__gtt == NULL);
2963 assert(rq->bo->map__wc == NULL);
2964 assert(rq->bo->map__cpu == NULL);
2965 gem_close(kgem->fd, rq->bo->handle);
2968 assert(rq->ring < ARRAY_SIZE(kgem->requests));
2969 list_add_tail(&rq->list, &kgem->requests[rq->ring]);
2972 if (kgem->fence[rq->ring] == NULL &&
2973 __kgem_busy(kgem, rq->bo->handle))
2974 kgem->fence[rq->ring] = rq;
3053 assert(bo->base.rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
3093 shrink->rq = bo->base.rq;
3100 bo->base.rq = NULL;
3138 shrink->rq = bo->base.rq;
3145 bo->base.rq = NULL;
3179 struct kgem_request *rq;
3181 rq = list_first_entry(&kgem->requests[n],
3184 assert(rq->ring == n);
3185 while (!list_is_empty(&rq->buffers)) {
3188 bo = list_first_entry(&rq->buffers,
3199 __kgem_request_free(rq);
3302 struct kgem_request *rq = kgem->next_request;
3304 while (!list_is_empty(&rq->buffers)) {
3306 list_first_entry(&rq->buffers,
3311 assert(RQ(bo->rq) == rq);
3321 bo->rq = (void *)kgem;
3326 if (bo->refcnt || bo->rq)
3332 if (rq != &kgem->static_request) {
3333 list_init(&rq->list);
3334 __kgem_request_free(rq);
3393 if (!bo->rq) {
3402 assert(RQ(bo->rq)->bo == bo);
3403 __kgem_retire_rq(kgem, RQ(bo->rq));
3412 if (!bo->rq) {
3421 __kgem_retire_rq(kgem, RQ(bo->rq));
3459 assert(bo->rq == NULL);
3577 struct kgem_request *rq;
3607 rq = kgem->next_request;
3608 assert(rq->bo == NULL);
3610 rq->bo = kgem_create_batch(kgem);
3611 if (rq->bo) {
3615 assert(!rq->bo->needs_flush);
3618 kgem->exec[i].handle = rq->bo->handle;
3622 kgem->exec[i].offset = rq->bo->presumed_offset;
3627 rq->bo->exec = &kgem->exec[i];
3628 rq->bo->rq = MAKE_REQUEST(rq, kgem->ring); /* useful sanity check */
3629 list_add(&rq->bo->request, &rq->buffers);
3630 rq->ring = kgem->ring == KGEM_BLT;
3653 set_domain.handle = rq->bo->handle;
3729 if (gem_read(kgem->fd, rq->bo->handle, kgem->batch, 0, batch_end*sizeof(uint32_t)) == 0)
3873 struct kgem_request *rq = __kgem_freed_request;
3874 __kgem_freed_request = *(struct kgem_request **)rq;
3875 free(rq);
4005 struct kgem_request *rq;
4008 rq = list_first_entry(&kgem->requests[n],
4015 set_domain.handle = rq->bo->handle;
4098 if (RQ(bo->rq) == (void *)kgem) {
4158 assert(bo->rq == NULL);
4205 assert(!!bo->rq == !!use_active);
4750 bo->rq = (void *)kgem;
4868 if (flags & CREATE_INACTIVE && bo->rq) {
4908 if (flags & CREATE_INACTIVE && bo->rq)
5074 assert(bo->rq == NULL);
5598 assert(bo->rq);
5607 __FUNCTION__, bo->handle, bo->rq != NULL));
5622 if (bo->rq)
5648 if (bo->rq == NULL || RQ_RING(bo->rq) == kgem->ring)
5760 busy &= bo->rq != NULL;
5882 if (bo->rq)
5934 busy &= bo->rq != NULL;
6015 bo->rq = MAKE_REQUEST(kgem->next_request,
6031 assert(bo->rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
6032 assert(RQ_RING(bo->rq) == kgem->ring);
6098 bo->rq = MAKE_REQUEST(kgem->next_request,
6114 assert(bo->rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
6115 assert(RQ_RING(bo->rq) == kgem->ring);
6180 assert(bo->rq == NULL);
6611 /* Proxies are only tracked for busyness on the current rq */
6613 assert(RQ(target->rq) == kgem->next_request);
6616 bo->rq = target->rq;
6671 if (old->rq)
6939 (bo->base.rq == NULL ||
7070 assert(old->rq == NULL);
7346 assert(_bo->rq == NULL);