HomeSort by: relevance | last modified time | path
    Searched refs:seqno (Results 1 - 25 of 73) sorted by relevancy

1 2 3

  /src/sys/external/bsd/drm2/dist/drm/i915/
i915_syncmap.h 36 int i915_syncmap_set(struct i915_syncmap **root, u64 id, u32 seqno);
37 bool i915_syncmap_is_later(struct i915_syncmap **root, u64 id, u32 seqno);
i915_syncmap.c 42 * context id to the last u32 fence seqno waited upon from that context.
51 * A leaf holds an array of u32 seqno, and has height 0. The bitmap field
52 * allows us to store whether a particular seqno is valid (i.e. allows us
84 * Following this header is an array of either seqno or child pointers:
86 * u32 seqno[KSYNCMAP];
149 * @seqno: the sequence number along the other timeline
156 * Returns true if the two timelines are already synchronised wrt to @seqno,
159 bool i915_syncmap_is_later(struct i915_syncmap **root, u64 id, u32 seqno)
200 return seqno_later(__sync_seqno(p)[idx], seqno);
219 static inline void __sync_set_seqno(struct i915_syncmap *p, u64 id, u32 seqno)
    [all...]
i915_request.h 57 rq__->fence.context, rq__->fence.seqno, \
239 * If we need to access the timeline's seqno for this request in
417 * has the associated breadcrumb value of rq->fence.seqno, when the HW
422 u32 seqno; local in function:hwsp_seqno
425 seqno = __hwsp_seqno(rq);
428 return seqno;
433 return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
513 return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
518 rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
  /src/sys/external/bsd/drm2/dist/drm/i915/gt/
intel_timeline.h 58 u64 context, u32 seqno)
60 return i915_syncmap_set(&tl->sync, context, seqno);
66 return __intel_timeline_sync_set(tl, fence->context, fence->seqno);
70 u64 context, u32 seqno)
72 return i915_syncmap_is_later(&tl->sync, context, seqno);
78 return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno);
85 u32 *seqno);
selftest_timeline.c 169 u32 seqno; member in struct:__igt_sync
181 if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
182 pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
183 name, p->name, ctx, p->seqno, yesno(p->expected));
188 ret = __intel_timeline_sync_set(tl, ctx, p->seqno);
366 u32 seqno = prandom_u32_state(&prng); local in function:bench_sync
368 if (!__intel_timeline_sync_is_later(&tl, id, seqno))
369 __intel_timeline_sync_set(&tl, id, seqno);
380 /* Benchmark searching for a known context id and changing the seqno */
496 if (*tl->hwsp_seqno != tl->seqno) {
678 u32 seqno[2]; local in function:live_hwsp_wrap
    [all...]
intel_timeline_types.h 27 u32 seqno; member in struct:intel_timeline
76 * We track the most recent seqno that we wait on in every context so
intel_breadcrumbs.c 94 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
101 i915_seqno_passed(rq->fence.seqno,
102 list_next_entry(rq, signal_link)->fence.seqno))
106 i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno,
107 rq->fence.seqno))
315 * We keep the seqno in retirement order, so we can break
332 if (i915_seqno_passed(rq->fence.seqno, it->fence.seqno))
390 (uint64_t)rq->fence.seqno,
intel_timeline.c 397 GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
399 return tl->seqno += 1 + tl->has_initial_breadcrumb;
404 tl->seqno -= 1 + tl->has_initial_breadcrumb;
410 u32 *seqno)
421 * we cannot wraparound our seqno value (the HW semaphore does
484 *seqno = timeline_advance(tl);
485 GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
499 u32 *seqno)
501 *seqno = timeline_advance(tl);
504 if (unlikely(!*seqno && tl->hwsp_cacheline)
    [all...]
selftest_hangcheck.c 56 u32 *seqno; member in struct:hang
92 h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
210 *batch++ = rq->fence.seqno;
224 *batch++ = rq->fence.seqno;
237 *batch++ = rq->fence.seqno;
249 *batch++ = rq->fence.seqno;
289 return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
311 rq->fence.seqno),
314 rq->fence.seqno),
607 __func__, rq->fence.seqno, hws_seqno(&h, rq))
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/vmwgfx/
vmwgfx_marker.c 38 uint32_t seqno; member in struct:vmw_marker
61 uint32_t seqno)
68 marker->seqno = seqno;
95 if (signaled_seqno - marker->seqno > (1 << 30))
136 uint32_t seqno; local in function:vmw_wait_lag
142 seqno = atomic_read(&dev_priv->marker_seq);
146 seqno = marker->seqno;
150 ret = vmw_wait_seqno(dev_priv, false, seqno, true
    [all...]
vmwgfx_irq.c 156 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
166 uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); local in function:vmw_update_seqno
170 if (dev_priv->last_read_seqno != seqno) {
171 dev_priv->last_read_seqno = seqno;
172 vmw_marker_pull(&fifo_state->marker_queue, seqno);
178 uint32_t seqno)
185 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
190 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
194 vmw_fifo_idle(dev_priv, seqno))
198 * Then check if the seqno is higher than what we've actuall
    [all...]
vmwgfx_fence.c 101 * a) When a new fence seqno has been submitted by the fifo code.
111 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
113 * the subsystem makes sure the fence goal seqno is updated.
115 * The fence goal seqno irq is on as long as there are unsignaled fence
152 u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); local in function:vmw_fence_enable_signaling
153 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
378 struct vmw_fence_obj *fence, u32 seqno,
384 fman->ctx, seqno);
424 * seqno if needed
508 uint32_t seqno, new_seqno; local in function:__vmw_fences_update
915 const u32 seqno = dev_priv->last_read_seqno; local in function:vmw_fence_obj_signaled_ioctl
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/virtio/
virtgpu_fence.c 52 if (WARN_ON_ONCE(fence->f.seqno == 0))
56 if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno)
63 snprintf(str, size, "%llu", f->seqno);
91 /* This only partially initializes the fence because the seqno is
108 fence->f.seqno = ++drv->sync_seq;
116 cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno);
129 if (last_seq < fence->f.seqno)
  /src/sys/external/bsd/drm2/linux/
linux_dma_fence_chain.c 44 * dma_fence_chain_init(chain, prev, fence, seqno)
51 struct dma_fence *fence, uint64_t seqno)
62 !__dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
65 seqno = MAX(prev->seqno, seqno);
69 chain->prev_seqno = prev->seqno;
73 context, seqno);
278 * dma_fence_chain_find_seqno(&fence, seqno)
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/i915/selftests/
igt_spinner.h 26 void *seqno; member in struct:igt_spinner
i915_syncmap.c 151 static int check_seqno(struct i915_syncmap *leaf, unsigned int idx, u32 seqno)
159 if (__sync_seqno(leaf)[idx] != seqno) {
160 pr_err("%s: seqno[%d], found %x, expected %x\n",
161 __func__, idx, __sync_seqno(leaf)[idx], seqno);
168 static int check_one(struct i915_syncmap **sync, u64 context, u32 seqno)
172 err = i915_syncmap_set(sync, context, seqno);
194 err = check_seqno((*sync), ilog2((*sync)->bitmap), seqno);
198 if (!i915_syncmap_is_later(sync, context, seqno)) {
199 pr_err("Lookup of first context=%llx/seqno=%x failed!\n",
200 context, seqno);
553 u32 seqno; local in function:igt_syncmap_random
    [all...]
igt_spinner.c 44 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
158 *batch++ = rq->fence.seqno;
204 u32 *seqno = spin->seqno + seqno_offset(rq->fence.context); local in function:hws_seqno
206 return READ_ONCE(*seqno);
229 rq->fence.seqno),
232 rq->fence.seqno),
  /src/sys/external/bsd/drm2/dist/drm/radeon/
radeon_trace.h 129 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
131 TP_ARGS(dev, ring, seqno),
136 __field(u32, seqno)
142 __entry->seqno = seqno;
145 TP_printk("dev=%u, ring=%d, seqno=%u",
146 __entry->dev, __entry->ring, __entry->seqno)
151 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
153 TP_ARGS(dev, ring, seqno)
158 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
    [all...]
  /src/sys/external/bsd/drm2/include/
radeon_trace.h 56 uint32_t seqno __unused)
62 int ring __unused, uint32_t seqno __unused)
68 uint32_t seqno __unused)
  /src/sys/external/bsd/drm2/dist/drm/scheduler/
gpu_scheduler_trace.h 85 __field(unsigned, seqno)
93 __entry->seqno = fence->seqno;
98 __entry->seqno)
  /src/sys/dev/
midisynvar.h 151 u_int seqno; /* allocation index (increases with time) */ member in struct:voice
180 u_int seqno; member in struct:midisyn
  /src/usr.sbin/btattach/
init_csr.c 52 uint16_t seqno; member in struct:bccmd::__anon8f7f6ed60108
94 cmd.message.seqno = htole16(0);
  /src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/
amdgpu_trace.h 173 __field(unsigned int, seqno)
183 __entry->seqno = job->base.s_fence->finished.seqno;
187 TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
189 __entry->seqno, __get_str(ring), __entry->num_ibs)
199 __field(unsigned int, seqno)
208 __entry->seqno = job->base.s_fence->finished.seqno;
212 TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
214 __entry->seqno, __get_str(ring), __entry->num_ibs
    [all...]
  /src/sys/net/
ppp-deflate.c 64 int seqno; member in struct:deflate_state
208 state->seqno = 0;
223 state->seqno = 0;
272 wptr[0] = state->seqno >> 8;
273 wptr[1] = state->seqno;
283 ++state->seqno;
425 state->seqno = 0;
441 state->seqno = 0;
488 if (seq != state->seqno) {
491 state->unit, seq, state->seqno);
    [all...]
  /src/usr.sbin/timed/timed/
measure.c 63 static n_short seqno = 0; variable in typeref:typename:n_short
135 oicp.icmp_seq = seqno;
212 || icp.icmp_seq < seqno
272 seqno += TRIALS; /* allocate our sequence numbers */

Completed in 20 milliseconds

1 2 3