Lines Matching defs:rq

317 	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
318 bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
320 assert(bo->rq == NULL);
798 DBG(("%s: retiring bo handle=%d (needed flush? %d), rq? %d [busy?=%d]\n",
799 __FUNCTION__, bo->handle, bo->needs_flush, bo->rq != NULL,
804 if (bo->rq)
812 DBG(("%s: retiring bo handle=%d (needed flush? %d), rq? %d [busy?=%d]\n",
813 __FUNCTION__, bo->handle, bo->needs_flush, bo->rq != NULL,
818 if (bo->rq) {
1025 struct kgem_request *rq;
1028 rq = &kgem->static_request;
1030 rq = __kgem_freed_request;
1031 if (rq) {
1032 __kgem_freed_request = *(struct kgem_request **)rq;
1034 rq = malloc(sizeof(*rq));
1035 if (rq == NULL)
1036 rq = &kgem->static_request;
1040 list_init(&rq->buffers);
1041 rq->bo = NULL;
1042 rq->ring = 0;
1044 return rq;
1047 static void __kgem_request_free(struct kgem_request *rq)
1049 _list_del(&rq->list);
1051 free(rq);
1053 *(struct kgem_request **)rq = __kgem_freed_request;
1054 __kgem_freed_request = rq;
1792 if (bo->rq == NULL)
1854 struct kgem_request *rq;
1856 rq = list_first_entry(&kgem->requests[ring],
1858 assert(rq->ring == ring);
1859 assert(rq->bo);
1860 assert(RQ(rq->bo->rq) == rq);
1861 if (kgem_bo_wait(kgem, rq->bo) == 0)
2493 bo->rq = MAKE_REQUEST(kgem->next_request, kgem->ring);
2556 assert(!bo->snoop || bo->rq == NULL);
2567 assert(bo->rq == NULL);
2616 assert(bo->rq == NULL);
2703 assert(bo->rq == NULL);
2719 assert(bo->rq != NULL);
2720 if (RQ(bo->rq) == (void *)kgem) {
2783 __FUNCTION__, bo->handle, bo->delta, bo->rq != NULL));
2784 if (bo->rq)
2816 assert(bo->rq == NULL);
2871 assert(bo->rq == NULL);
2916 assert(RQ(bo->rq) == kgem->next_request);
2955 assert(RQ(a->rq) == kgem->next_request);
2960 assert(RQ(b->rq) == kgem->next_request);
2997 if (bo->exec == NULL && bo->rq && !__kgem_busy(kgem, bo->handle))
2999 if (bo->rq == NULL)
3030 if (bo->rq && bo->exec == NULL && !__kgem_busy(kgem, bo->handle))
3033 if (bo->rq) {
3099 __FUNCTION__, bo->base.handle, bo->base.rq != NULL, bo->base.exec != NULL));
3101 assert(bo->base.exec == NULL || RQ(bo->base.rq) == kgem->next_request);
3102 if (bo->base.rq)
3119 assert(RQ(bo->rq) == (void *)kgem);
3168 bo->rq = MAKE_REQUEST(kgem, !!(busy.busy & ~0x1ffff));
3174 static bool __kgem_retire_rq(struct kgem *kgem, struct kgem_request *rq)
3179 __FUNCTION__, rq->bo->handle));
3180 assert(RQ(rq->bo->rq) == rq);
3181 assert(rq != (struct kgem_request *)kgem);
3182 assert(rq != &kgem->static_request);
3184 if (rq == kgem->fence[rq->ring])
3185 kgem->fence[rq->ring] = NULL;
3187 while (!list_is_empty(&rq->buffers)) {
3190 bo = list_first_entry(&rq->buffers,
3194 assert(RQ(bo->rq) == rq);
3201 assert(bo != rq->bo);
3208 bo->rq = NULL;
3215 assert(rq->bo->rq == NULL);
3216 assert(rq->bo->exec == NULL);
3217 assert(list_is_empty(&rq->bo->request));
3218 assert(rq->bo->refcnt > 0);
3220 if (--rq->bo->refcnt == 0) {
3221 kgem_bo_move_to_inactive(kgem, rq->bo);
3225 __kgem_request_free(rq);
3235 struct kgem_request *rq;
3239 rq = list_first_entry(&kgem->requests[ring],
3242 assert(rq->ring == ring);
3243 assert(rq->bo);
3244 assert(RQ(rq->bo->rq) == rq);
3245 if (__kgem_busy(kgem, rq->bo->handle))
3248 retired |= __kgem_retire_rq(kgem, rq);
3307 struct kgem_request *rq;
3312 rq = kgem->fence[ring];
3313 if (rq) {
3316 if (__kgem_busy(kgem, rq->bo->handle)) {
3318 __FUNCTION__, rq->bo->handle));
3328 } while (tmp != rq);
3335 rq = list_last_entry(&kgem->requests[ring],
3337 assert(rq->ring == ring);
3338 assert(rq->bo);
3339 assert(RQ(rq->bo->rq) == rq);
3340 if (__kgem_busy(kgem, rq->bo->handle)) {
3342 __FUNCTION__, rq->bo->handle));
3343 kgem->fence[ring] = rq;
3348 __FUNCTION__, ring, rq->bo->handle));
3351 rq = list_first_entry(&kgem->requests[ring],
3354 assert(rq->ring == ring);
3355 __kgem_retire_rq(kgem, rq);
3363 struct kgem_request * const rq = RQ(bo->rq), *tmp;
3364 struct list *requests = &kgem->requests[rq->ring];
3366 DBG(("%s(handle=%d, ring=%d)\n", __FUNCTION__, bo->handle, rq->ring));
3368 assert(rq != &kgem->static_request);
3369 if (rq == (struct kgem_request *)kgem) {
3374 assert(rq->ring < ARRAY_SIZE(kgem->requests));
3377 assert(tmp->ring == rq->ring);
3379 } while (tmp != rq);
3381 assert(bo->needs_flush || bo->rq == NULL);
3384 return bo->rq;
3390 struct kgem_request *rq = kgem->next_request;
3396 list_for_each_entry(bo, &rq->buffers, request) {
3399 gem_read(kgem->fd, rq->bo->handle, &value, kgem->reloc[i].offset, has_64bit ? 8 : 4);
3424 struct kgem_request *rq = kgem->next_request;
3429 list_for_each_entry_safe(bo, next, &rq->buffers, request) {
3439 assert(RQ(bo->rq) == rq || (RQ(bo->proxy->rq) == rq));
3465 if (rq == &kgem->static_request) {
3471 set_domain.handle = rq->bo->handle;
3479 while (!list_is_empty(&rq->buffers)) {
3480 bo = list_first_entry(&rq->buffers,
3484 assert(RQ(bo->rq) == rq);
3490 bo->rq = NULL;
3497 assert(list_is_empty(&rq->buffers));
3499 assert(rq->bo->map__gtt == NULL);
3500 assert(rq->bo->map__wc == NULL);
3501 assert(rq->bo->map__cpu == NULL);
3502 gem_close(kgem->fd, rq->bo->handle);
3505 assert(rq != (struct kgem_request *)kgem);
3506 assert(rq->ring < ARRAY_SIZE(kgem->requests));
3507 assert(rq->bo);
3508 list_add_tail(&rq->list, &kgem->requests[rq->ring]);
3511 if (kgem->fence[rq->ring] == NULL &&
3512 __kgem_busy(kgem, rq->bo->handle))
3513 kgem->fence[rq->ring] = rq;
3594 assert(bo->base.rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
3636 shrink->rq = bo->base.rq;
3643 bo->base.rq = NULL;
3684 shrink->rq = bo->base.rq;
3691 bo->base.rq = NULL;
3725 struct kgem_request *rq;
3727 rq = list_first_entry(&kgem->requests[n],
3730 assert(rq->ring == n);
3731 while (!list_is_empty(&rq->buffers)) {
3734 bo = list_first_entry(&rq->buffers,
3745 if (--rq->bo->refcnt == 0)
3746 kgem_bo_free(kgem, rq->bo);
3748 __kgem_request_free(rq);
3764 assert(bo->rq == NULL);
3853 struct kgem_request *rq = kgem->next_request;
3855 while (!list_is_empty(&rq->buffers)) {
3857 list_first_entry(&rq->buffers,
3862 assert(RQ(bo->rq) == rq);
3872 bo->rq = (void *)kgem;
3877 if (bo->refcnt || bo->rq)
3883 if (rq != &kgem->static_request) {
3884 list_init(&rq->list);
3885 __kgem_request_free(rq);
3933 if (bo->rq) {
3934 assert(RQ(bo->rq)->bo == bo);
3938 __kgem_retire_rq(kgem, RQ(bo->rq));
3939 assert(bo->rq == NULL);
4001 assert(bo->rq == NULL);
4120 struct kgem_request *rq;
4151 rq = kgem->next_request;
4152 assert(rq->bo == NULL);
4154 rq->bo = kgem_create_batch(kgem);
4155 if (rq->bo) {
4158 assert(!rq->bo->needs_flush);
4161 kgem->exec[i].handle = rq->bo->handle;
4165 kgem->exec[i].offset = rq->bo->presumed_offset;
4171 rq->bo->exec = &kgem->exec[i];
4172 rq->bo->rq = MAKE_REQUEST(rq, kgem->ring); /* useful sanity check */
4173 list_add(&rq->bo->request, &rq->buffers);
4174 rq->ring = kgem->ring == KGEM_BLT;
4270 set_domain.handle = rq->bo->handle;
4278 if (gem_read(kgem->fd, rq->bo->handle, kgem->batch, 0, batch_end*sizeof(uint32_t)) == 0)
4391 struct kgem_request *rq = __kgem_freed_request;
4392 __kgem_freed_request = *(struct kgem_request **)rq;
4393 free(rq);
4523 struct kgem_request *rq;
4525 rq = list_last_entry(&kgem->requests[n],
4530 assert(rq->ring == n);
4531 assert(rq->bo);
4532 assert(RQ(rq->bo->rq) == rq);
4533 kgem_bo_wait(kgem, rq->bo);
4614 if (RQ(bo->rq) == (void *)kgem) {
4674 assert(bo->rq == NULL);
4721 assert(!!bo->rq == !!use_active);
5268 bo->rq = (void *)kgem;
5427 if (flags & CREATE_INACTIVE && bo->rq) {
5467 if (flags & CREATE_INACTIVE && bo->rq)
5633 assert(bo->rq == NULL);
6167 assert(bo->rq);
6176 __FUNCTION__, bo->handle, bo->rq != NULL));
6191 if (bo->rq)
6223 if (bo->rq == NULL || RQ_RING(bo->rq) == kgem->ring)
6335 busy &= bo->rq != NULL;
6457 if (bo->rq)
6509 busy &= bo->rq != NULL;
6639 bo->rq = MAKE_REQUEST(kgem->next_request,
6655 assert(bo->rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
6656 assert(RQ_RING(bo->rq) == kgem->ring);
6722 bo->rq = MAKE_REQUEST(kgem->next_request,
6738 assert(bo->rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
6739 assert(RQ_RING(bo->rq) == kgem->ring);
6804 assert(bo->rq == NULL);
7151 if (bo->rq == NULL && (kgem->has_llc || bo->snoop) && !write)
7262 /* Proxies are only tracked for busyness on the current rq */
7264 assert(RQ(target->rq) == kgem->next_request);
7267 bo->rq = target->rq;
7322 if (old->rq)
7590 (bo->base.rq == NULL ||
7721 assert(old->rq == NULL);
7997 assert(_bo->rq == NULL);