Home | History | Annotate | Line # | Download | only in gt
      1 /*	$NetBSD: intel_ring.h,v 1.2 2021/12/18 23:45:30 riastradh Exp $	*/
      2 
      3 /*
      4  * SPDX-License-Identifier: MIT
      5  *
      6  * Copyright  2019 Intel Corporation
      7  */
      8 
      9 #ifndef INTEL_RING_H
     10 #define INTEL_RING_H
     11 
     12 #include "i915_gem.h" /* GEM_BUG_ON */
     13 #include "i915_request.h"
     14 #include "intel_ring_types.h"
     15 
     16 struct intel_engine_cs;
     17 
     18 struct intel_ring *
     19 intel_engine_create_ring(struct intel_engine_cs *engine, int size);
     20 
     21 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords);
     22 int intel_ring_cacheline_align(struct i915_request *rq);
     23 
     24 unsigned int intel_ring_update_space(struct intel_ring *ring);
     25 
     26 int intel_ring_pin(struct intel_ring *ring);
     27 void intel_ring_unpin(struct intel_ring *ring);
     28 void intel_ring_reset(struct intel_ring *ring, u32 tail);
     29 
     30 void intel_ring_free(struct kref *ref);
     31 
     32 static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
     33 {
     34 	kref_get(&ring->ref);
     35 	return ring;
     36 }
     37 
     38 static inline void intel_ring_put(struct intel_ring *ring)
     39 {
     40 	kref_put(&ring->ref, intel_ring_free);
     41 }
     42 
     43 static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
     44 {
     45 	/* Dummy function.
     46 	 *
     47 	 * This serves as a placeholder in the code so that the reader
     48 	 * can compare against the preceding intel_ring_begin() and
     49 	 * check that the number of dwords emitted matches the space
     50 	 * reserved for the command packet (i.e. the value passed to
     51 	 * intel_ring_begin()).
     52 	 */
     53 	GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
     54 }
     55 
     56 static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
     57 {
     58 	return pos & (ring->size - 1);
     59 }
     60 
     61 static inline int intel_ring_direction(const struct intel_ring *ring,
     62 				       u32 next, u32 prev)
     63 {
     64 	typecheck(typeof(ring->size), next);
     65 	typecheck(typeof(ring->size), prev);
     66 	return (next - prev) << ring->wrap;
     67 }
     68 
     69 static inline bool
     70 intel_ring_offset_valid(const struct intel_ring *ring,
     71 			unsigned int pos)
     72 {
     73 	if (pos & -ring->size) /* must be strictly within the ring */
     74 		return false;
     75 
     76 	if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
     77 		return false;
     78 
     79 	return true;
     80 }
     81 
     82 static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
     83 {
     84 	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
     85 	u32 offset = addr - rq->ring->vaddr;
     86 	GEM_BUG_ON(offset > rq->ring->size);
     87 	return intel_ring_wrap(rq->ring, offset);
     88 }
     89 
     90 static inline void
     91 assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
     92 {
     93 	GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
     94 
     95 	/*
     96 	 * "Ring Buffer Use"
     97 	 *	Gen2 BSpec "1. Programming Environment" / 1.4.4.6
     98 	 *	Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
     99 	 *	Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
    100 	 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
    101 	 * same cacheline, the Head Pointer must not be greater than the Tail
    102 	 * Pointer."
    103 	 *
    104 	 * We use ring->head as the last known location of the actual RING_HEAD,
    105 	 * it may have advanced but in the worst case it is equally the same
    106 	 * as ring->head and so we should never program RING_TAIL to advance
    107 	 * into the same cacheline as ring->head.
    108 	 */
    109 #define cacheline(a) round_down(a, CACHELINE_BYTES)
    110 	GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
    111 		   tail < ring->head);
    112 #undef cacheline
    113 }
    114 
    115 static inline unsigned int
    116 intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
    117 {
    118 	/* Whilst writes to the tail are strictly order, there is no
    119 	 * serialisation between readers and the writers. The tail may be
    120 	 * read by i915_request_retire() just as it is being updated
    121 	 * by execlists, as although the breadcrumb is complete, the context
    122 	 * switch hasn't been seen.
    123 	 */
    124 	assert_ring_tail_valid(ring, tail);
    125 	ring->tail = tail;
    126 	return tail;
    127 }
    128 
    129 static inline unsigned int
    130 __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
    131 {
    132 	/*
    133 	 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
    134 	 * same cacheline, the Head Pointer must not be greater than the Tail
    135 	 * Pointer."
    136 	 */
    137 	GEM_BUG_ON(!is_power_of_2(size));
    138 	return (head - tail - CACHELINE_BYTES) & (size - 1);
    139 }
    140 
    141 #endif /* INTEL_RING_H */
    142