HomeSort by: relevance | last modified time | path
    Searched refs:timeline (Results 1 - 25 of 26) sorted by relevancy

1 2

  /src/sys/external/bsd/drm2/dist/drm/i915/gt/selftests/
mock_timeline.c 16 void mock_timeline_init(struct intel_timeline *timeline, u64 context)
18 timeline->gt = NULL;
19 timeline->fence_context = context;
21 mutex_init(&timeline->mutex);
23 INIT_ACTIVE_FENCE(&timeline->last_request);
24 INIT_LIST_HEAD(&timeline->requests);
26 i915_syncmap_init(&timeline->sync);
28 INIT_LIST_HEAD(&timeline->link);
31 void mock_timeline_fini(struct intel_timeline *timeline)
33 i915_syncmap_free(&timeline->sync)
    [all...]
mock_timeline.h 16 void mock_timeline_init(struct intel_timeline *timeline, u64 context);
17 void mock_timeline_fini(struct intel_timeline *timeline);
  /src/sys/external/bsd/drm2/dist/drm/i915/gt/
intel_timeline.h 45 intel_timeline_get(struct intel_timeline *timeline)
47 kref_get(&timeline->kref);
48 return timeline;
52 static inline void intel_timeline_put(struct intel_timeline *timeline)
54 kref_put(&timeline->kref, __intel_timeline_free);
intel_timeline.c 56 hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline)
58 struct intel_gt_timelines *gt = &timeline->gt->timelines;
77 vma = __hwsp_alloc(timeline->gt);
84 hwsp->gt = timeline->gt;
209 int intel_timeline_init(struct intel_timeline *timeline,
215 kref_init(&timeline->kref);
216 atomic_set(&timeline->pin_count, 0);
218 timeline->gt = gt;
220 timeline->has_initial_breadcrumb = !hwsp;
221 timeline->hwsp_cacheline = NULL
297 struct intel_timeline *timeline; local in function:intel_timeline_create
564 struct intel_timeline *timeline = local in function:__intel_timeline_free
    [all...]
intel_context.h 25 ce__->timeline->fence_context, \
109 lockdep_assert_held(&ce->timeline->mutex);
116 lockdep_assert_held(&ce->timeline->mutex);
122 lockdep_assert_held(&ce->timeline->mutex);
141 __acquires(&ce->timeline->mutex)
143 struct intel_timeline *tl = ce->timeline;
intel_context.c 235 intel_timeline_unpin(ce->timeline);
254 err = intel_timeline_pin(ce->timeline);
268 intel_timeline_unpin(ce->timeline);
303 if (ce->timeline)
304 intel_timeline_put(ce->timeline);
339 intel_timeline_enter(ce->timeline);
344 intel_timeline_exit(ce->timeline);
351 struct intel_timeline *tl = ce->timeline;
357 if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! *
    [all...]
mock_engine.c 145 mock_timeline_unpin(ce->timeline);
158 GEM_BUG_ON(ce->timeline);
159 ce->timeline = intel_timeline_create(ce->engine->gt, NULL);
160 if (IS_ERR(ce->timeline)) {
162 return PTR_ERR(ce->timeline);
165 mock_timeline_pin(ce->timeline);
intel_context_types.h 58 struct intel_timeline *timeline; member in struct:intel_context
73 unsigned int active_count; /* protected by timeline->mutex */
intel_engine_heartbeat.c 124 mutex_lock(&ce->timeline->mutex);
140 mutex_unlock(&ce->timeline->mutex);
204 if (mutex_lock_interruptible(&ce->timeline->mutex))
222 mutex_unlock(&ce->timeline->mutex);
intel_engine_pm.c 78 mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
86 mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
125 * engine->wakeref.counter or our timeline->active_count.
140 /* Let new submissions commence (and maybe retire this timeline) */
164 * Note, we do this without taking the timeline->mutex. We cannot
166 * already underneath the timeline->mutex. Instead we rely on the
170 * the context, as they assume protection by the timeline->mutex.
192 GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
218 __queue_and_release_pm(rq, ce->timeline, engine);
intel_ring_submission.c 1331 ce->timeline = intel_timeline_get(engine->legacy.timeline);
1817 intel_timeline_unpin(engine->legacy.timeline);
1818 intel_timeline_put(engine->legacy.timeline);
1859 * Using a global execution timeline; the previous final breadcrumb is
1963 struct intel_timeline *timeline; local in function:intel_ring_submission_setup
1987 timeline = intel_timeline_create(engine->gt, engine->status_page.vma);
1988 if (IS_ERR(timeline)) {
1989 err = PTR_ERR(timeline);
1992 GEM_BUG_ON(timeline->has_initial_breadcrumb)
    [all...]
intel_engine_cs.c 623 struct intel_timeline timeline; member in struct:measure_breadcrumb
639 if (intel_timeline_init(&frame->timeline,
644 mutex_lock(&frame->timeline.mutex);
654 rcu_assign_pointer(frame->rq.timeline, &frame->timeline);
656 dw = intel_timeline_pin(&frame->timeline);
666 intel_timeline_unpin(&frame->timeline);
669 mutex_unlock(&frame->timeline.mutex);
670 intel_timeline_fini(&frame->timeline);
723 lockdep_set_class(&ce->timeline->mutex, &kernel)
    [all...]
selftest_rc6.c 101 *cs++ = ce->timeline->hwsp_offset + 8;
intel_lrc.c 195 * (each request in the timeline waits for the completion fence of
1226 * requests so that inter-timeline dependencies (i.e other timelines)
1237 head = active_request(ce->timeline, rq)->head;
1327 if (list_is_last(&rq->link, &ce->timeline->requests) &&
1329 intel_engine_add_retire(engine, ce->timeline);
1485 ce->timeline->fence_context,
1501 ce->timeline->fence_context,
1509 ce->timeline->fence_context,
1517 ce->timeline->fence_context,
2689 cap->rq = active_request(cap->rq->context->timeline, cap->rq)
    [all...]
intel_breadcrumbs.c 207 add_retire(b, ce->timeline);
318 * hasn't event started). We could walk the timeline->requests,
intel_engine_types.h 336 struct intel_timeline *timeline; member in struct:intel_engine_cs::__anonee01bb2a0408
selftest_engine_heartbeat.c 38 return timeline_sync(engine->kernel_context->timeline);
  /src/sys/external/bsd/drm2/dist/drm/i915/
i915_active.c 40 u64 timeline; member in struct:active_node
142 if (a->timeline < b->timeline)
144 if (a->timeline > b->timeline)
159 if (a->timeline < *k)
161 if (a->timeline > *k)
279 * We track the most recently used timeline to skip a rbtree search
283 * current timeline.
286 if (node && node->timeline == idx
    [all...]
i915_request.h 175 struct intel_timeline __rcu *timeline; member in struct:i915_request
187 * We pin the timeline->mutex while constructing the request to
189 * The timeline->mutex must be held to ensure that only this caller
190 * can use the ring and manipulate the associated timeline during
233 * the HW status page (or our timeline's local equivalent). The full
234 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
239 * If we need to access the timeline's seqno for this request in
243 * inside the timeline's HWSP vma, but it is only valid while this
244 * request has not completed and guarded by the timeline mutex.
285 /** timeline->request entry for this request *
    [all...]
i915_request.c 73 * The timeline struct (as part of the ppgtt underneath a context)
218 * Virtual engines complicate acquiring the engine timeline lock,
484 * to the engine timeline (__i915_request_submit()). The waiters
628 struct intel_timeline *tl = ce->timeline;
699 RCU_INIT_POINTER(rq->timeline, tl);
801 rcu_access_pointer(signal->timeline));
808 &rcu_dereference(signal->timeline)->requests)) {
812 * Peek at the request before us in the timeline. That
815 * still part of the signaler's timeline.
885 * the timeline HWSP upon wrapping, so that everyone listenin
1227 struct intel_timeline *timeline = i915_request_timeline(rq); local in function:__i915_request_add_to_timeline
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/i915/gem/
i915_gem_context_types.h 82 struct intel_timeline *timeline; member in struct:i915_gem_context
i915_gem_context.c 242 GEM_BUG_ON(ce->timeline);
243 if (ctx->timeline)
244 ce->timeline = intel_timeline_get(ctx->timeline);
321 if (ctx->timeline)
322 intel_timeline_put(ctx->timeline);
448 if (!ce->timeline)
451 mutex_lock(&ce->timeline->mutex);
452 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
461 mutex_unlock(&ce->timeline->mutex)
752 struct intel_timeline *timeline; local in function:i915_gem_create_context
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/
amdgpu_trace.h 171 __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
181 __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
187 TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
188 __entry->sched_job_id, __get_str(timeline), __entry->context,
197 __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
206 __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
212 TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
213 __entry->sched_job_id, __get_str(timeline), __entry->context,
  /src/sys/external/bsd/drm2/dist/drm/i915/selftests/
i915_active.c 258 "\ttimeline: %llx\n", it->timeline);
  /src/sys/external/bsd/drm2/dist/drm/
drm_syncobj.c 238 * drm_syncobj_add_point - add new timeline point to the syncobj
239 * @syncobj: sync object to add timeline point do
244 * Add the chain node as new timeline point to the syncobj.
259 /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
261 DRM_ERROR("You are adding an unorder point to timeline!\n");
328 * @point: timeline point
1276 struct drm_syncobj **syncobjs, bool timeline)
1281 if (!timeline) {
1611 /* It is most likely that timeline has

Completed in 25 milliseconds

1 2