1 1.1 riastrad /* $NetBSD: intel_ring.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * SPDX-License-Identifier: MIT 5 1.1 riastrad * 6 1.1 riastrad * Copyright 2019 Intel Corporation 7 1.1 riastrad */ 8 1.1 riastrad 9 1.1 riastrad #include <sys/cdefs.h> 10 1.1 riastrad __KERNEL_RCSID(0, "$NetBSD: intel_ring.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $"); 11 1.1 riastrad 12 1.1 riastrad #include "gem/i915_gem_object.h" 13 1.1 riastrad #include "i915_drv.h" 14 1.1 riastrad #include "i915_vma.h" 15 1.1 riastrad #include "intel_engine.h" 16 1.1 riastrad #include "intel_ring.h" 17 1.1 riastrad #include "intel_timeline.h" 18 1.1 riastrad 19 1.1 riastrad unsigned int intel_ring_update_space(struct intel_ring *ring) 20 1.1 riastrad { 21 1.1 riastrad unsigned int space; 22 1.1 riastrad 23 1.1 riastrad space = __intel_ring_space(ring->head, ring->emit, ring->size); 24 1.1 riastrad 25 1.1 riastrad ring->space = space; 26 1.1 riastrad return space; 27 1.1 riastrad } 28 1.1 riastrad 29 1.1 riastrad int intel_ring_pin(struct intel_ring *ring) 30 1.1 riastrad { 31 1.1 riastrad struct i915_vma *vma = ring->vma; 32 1.1 riastrad unsigned int flags; 33 1.1 riastrad void *addr; 34 1.1 riastrad int ret; 35 1.1 riastrad 36 1.1 riastrad if (atomic_fetch_inc(&ring->pin_count)) 37 1.1 riastrad return 0; 38 1.1 riastrad 39 1.1 riastrad flags = PIN_GLOBAL; 40 1.1 riastrad 41 1.1 riastrad /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ 42 1.1 riastrad flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); 43 1.1 riastrad 44 1.1 riastrad if (vma->obj->stolen) 45 1.1 riastrad flags |= PIN_MAPPABLE; 46 1.1 riastrad else 47 1.1 riastrad flags |= PIN_HIGH; 48 1.1 riastrad 49 1.1 riastrad ret = i915_vma_pin(vma, 0, 0, flags); 50 1.1 riastrad if (unlikely(ret)) 51 1.1 riastrad goto err_unpin; 52 1.1 riastrad 53 1.1 riastrad if (i915_vma_is_map_and_fenceable(vma)) 54 1.1 riastrad addr = (void __force *)i915_vma_pin_iomap(vma); 55 1.1 riastrad else 56 1.1 riastrad addr = i915_gem_object_pin_map(vma->obj, 57 1.1 riastrad i915_coherent_map_type(vma->vm->i915)); 58 1.1 riastrad if (IS_ERR(addr)) { 59 1.1 riastrad ret = PTR_ERR(addr); 60 1.1 riastrad goto err_ring; 61 1.1 riastrad } 62 1.1 riastrad 63 1.1 riastrad i915_vma_make_unshrinkable(vma); 64 1.1 riastrad 65 1.1 riastrad /* Discard any unused bytes beyond that submitted to hw. */ 66 1.1 riastrad intel_ring_reset(ring, ring->emit); 67 1.1 riastrad 68 1.1 riastrad ring->vaddr = addr; 69 1.1 riastrad return 0; 70 1.1 riastrad 71 1.1 riastrad err_ring: 72 1.1 riastrad i915_vma_unpin(vma); 73 1.1 riastrad err_unpin: 74 1.1 riastrad atomic_dec(&ring->pin_count); 75 1.1 riastrad return ret; 76 1.1 riastrad } 77 1.1 riastrad 78 1.1 riastrad void intel_ring_reset(struct intel_ring *ring, u32 tail) 79 1.1 riastrad { 80 1.1 riastrad tail = intel_ring_wrap(ring, tail); 81 1.1 riastrad ring->tail = tail; 82 1.1 riastrad ring->head = tail; 83 1.1 riastrad ring->emit = tail; 84 1.1 riastrad intel_ring_update_space(ring); 85 1.1 riastrad } 86 1.1 riastrad 87 1.1 riastrad void intel_ring_unpin(struct intel_ring *ring) 88 1.1 riastrad { 89 1.1 riastrad struct i915_vma *vma = ring->vma; 90 1.1 riastrad 91 1.1 riastrad if (!atomic_dec_and_test(&ring->pin_count)) 92 1.1 riastrad return; 93 1.1 riastrad 94 1.1 riastrad i915_vma_unset_ggtt_write(vma); 95 1.1 riastrad if (i915_vma_is_map_and_fenceable(vma)) 96 1.1 riastrad i915_vma_unpin_iomap(vma); 97 1.1 riastrad else 98 1.1 riastrad i915_gem_object_unpin_map(vma->obj); 99 1.1 riastrad 100 1.1 riastrad i915_vma_make_purgeable(vma); 101 1.1 riastrad i915_vma_unpin(vma); 102 1.1 riastrad } 103 1.1 riastrad 104 1.1 riastrad static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) 105 1.1 riastrad { 106 1.1 riastrad struct i915_address_space *vm = &ggtt->vm; 107 1.1 riastrad struct drm_i915_private *i915 = vm->i915; 108 1.1 riastrad struct drm_i915_gem_object *obj; 109 1.1 riastrad struct i915_vma *vma; 110 1.1 riastrad 111 1.1 riastrad obj = ERR_PTR(-ENODEV); 112 1.1 riastrad if (i915_ggtt_has_aperture(ggtt)) 113 1.1 riastrad obj = i915_gem_object_create_stolen(i915, size); 114 1.1 riastrad if (IS_ERR(obj)) 115 1.1 riastrad obj = i915_gem_object_create_internal(i915, size); 116 1.1 riastrad if (IS_ERR(obj)) 117 1.1 riastrad return ERR_CAST(obj); 118 1.1 riastrad 119 1.1 riastrad /* 120 1.1 riastrad * Mark ring buffers as read-only from GPU side (so no stray overwrites) 121 1.1 riastrad * if supported by the platform's GGTT. 122 1.1 riastrad */ 123 1.1 riastrad if (vm->has_read_only) 124 1.1 riastrad i915_gem_object_set_readonly(obj); 125 1.1 riastrad 126 1.1 riastrad vma = i915_vma_instance(obj, vm, NULL); 127 1.1 riastrad if (IS_ERR(vma)) 128 1.1 riastrad goto err; 129 1.1 riastrad 130 1.1 riastrad return vma; 131 1.1 riastrad 132 1.1 riastrad err: 133 1.1 riastrad i915_gem_object_put(obj); 134 1.1 riastrad return vma; 135 1.1 riastrad } 136 1.1 riastrad 137 1.1 riastrad struct intel_ring * 138 1.1 riastrad intel_engine_create_ring(struct intel_engine_cs *engine, int size) 139 1.1 riastrad { 140 1.1 riastrad struct drm_i915_private *i915 = engine->i915; 141 1.1 riastrad struct intel_ring *ring; 142 1.1 riastrad struct i915_vma *vma; 143 1.1 riastrad 144 1.1 riastrad GEM_BUG_ON(!is_power_of_2(size)); 145 1.1 riastrad GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES); 146 1.1 riastrad 147 1.1 riastrad ring = kzalloc(sizeof(*ring), GFP_KERNEL); 148 1.1 riastrad if (!ring) 149 1.1 riastrad return ERR_PTR(-ENOMEM); 150 1.1 riastrad 151 1.1 riastrad kref_init(&ring->ref); 152 1.1 riastrad ring->size = size; 153 1.1 riastrad ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size); 154 1.1 riastrad 155 1.1 riastrad /* 156 1.1 riastrad * Workaround an erratum on the i830 which causes a hang if 157 1.1 riastrad * the TAIL pointer points to within the last 2 cachelines 158 1.1 riastrad * of the buffer. 159 1.1 riastrad */ 160 1.1 riastrad ring->effective_size = size; 161 1.1 riastrad if (IS_I830(i915) || IS_I845G(i915)) 162 1.1 riastrad ring->effective_size -= 2 * CACHELINE_BYTES; 163 1.1 riastrad 164 1.1 riastrad intel_ring_update_space(ring); 165 1.1 riastrad 166 1.1 riastrad vma = create_ring_vma(engine->gt->ggtt, size); 167 1.1 riastrad if (IS_ERR(vma)) { 168 1.1 riastrad kfree(ring); 169 1.1 riastrad return ERR_CAST(vma); 170 1.1 riastrad } 171 1.1 riastrad ring->vma = vma; 172 1.1 riastrad 173 1.1 riastrad return ring; 174 1.1 riastrad } 175 1.1 riastrad 176 1.1 riastrad void intel_ring_free(struct kref *ref) 177 1.1 riastrad { 178 1.1 riastrad struct intel_ring *ring = container_of(ref, typeof(*ring), ref); 179 1.1 riastrad 180 1.1 riastrad i915_vma_put(ring->vma); 181 1.1 riastrad kfree(ring); 182 1.1 riastrad } 183 1.1 riastrad 184 1.1 riastrad static noinline int 185 1.1 riastrad wait_for_space(struct intel_ring *ring, 186 1.1 riastrad struct intel_timeline *tl, 187 1.1 riastrad unsigned int bytes) 188 1.1 riastrad { 189 1.1 riastrad struct i915_request *target; 190 1.1 riastrad long timeout; 191 1.1 riastrad 192 1.1 riastrad if (intel_ring_update_space(ring) >= bytes) 193 1.1 riastrad return 0; 194 1.1 riastrad 195 1.1 riastrad GEM_BUG_ON(list_empty(&tl->requests)); 196 1.1 riastrad list_for_each_entry(target, &tl->requests, link) { 197 1.1 riastrad if (target->ring != ring) 198 1.1 riastrad continue; 199 1.1 riastrad 200 1.1 riastrad /* Would completion of this request free enough space? */ 201 1.1 riastrad if (bytes <= __intel_ring_space(target->postfix, 202 1.1 riastrad ring->emit, ring->size)) 203 1.1 riastrad break; 204 1.1 riastrad } 205 1.1 riastrad 206 1.1 riastrad if (GEM_WARN_ON(&target->link == &tl->requests)) 207 1.1 riastrad return -ENOSPC; 208 1.1 riastrad 209 1.1 riastrad timeout = i915_request_wait(target, 210 1.1 riastrad I915_WAIT_INTERRUPTIBLE, 211 1.1 riastrad MAX_SCHEDULE_TIMEOUT); 212 1.1 riastrad if (timeout < 0) 213 1.1 riastrad return timeout; 214 1.1 riastrad 215 1.1 riastrad i915_request_retire_upto(target); 216 1.1 riastrad 217 1.1 riastrad intel_ring_update_space(ring); 218 1.1 riastrad GEM_BUG_ON(ring->space < bytes); 219 1.1 riastrad return 0; 220 1.1 riastrad } 221 1.1 riastrad 222 1.1 riastrad u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) 223 1.1 riastrad { 224 1.1 riastrad struct intel_ring *ring = rq->ring; 225 1.1 riastrad const unsigned int remain_usable = ring->effective_size - ring->emit; 226 1.1 riastrad const unsigned int bytes = num_dwords * sizeof(u32); 227 1.1 riastrad unsigned int need_wrap = 0; 228 1.1 riastrad unsigned int total_bytes; 229 1.1 riastrad u32 *cs; 230 1.1 riastrad 231 1.1 riastrad /* Packets must be qword aligned. */ 232 1.1 riastrad GEM_BUG_ON(num_dwords & 1); 233 1.1 riastrad 234 1.1 riastrad total_bytes = bytes + rq->reserved_space; 235 1.1 riastrad GEM_BUG_ON(total_bytes > ring->effective_size); 236 1.1 riastrad 237 1.1 riastrad if (unlikely(total_bytes > remain_usable)) { 238 1.1 riastrad const int remain_actual = ring->size - ring->emit; 239 1.1 riastrad 240 1.1 riastrad if (bytes > remain_usable) { 241 1.1 riastrad /* 242 1.1 riastrad * Not enough space for the basic request. So need to 243 1.1 riastrad * flush out the remainder and then wait for 244 1.1 riastrad * base + reserved. 245 1.1 riastrad */ 246 1.1 riastrad total_bytes += remain_actual; 247 1.1 riastrad need_wrap = remain_actual | 1; 248 1.1 riastrad } else { 249 1.1 riastrad /* 250 1.1 riastrad * The base request will fit but the reserved space 251 1.1 riastrad * falls off the end. So we don't need an immediate 252 1.1 riastrad * wrap and only need to effectively wait for the 253 1.1 riastrad * reserved size from the start of ringbuffer. 254 1.1 riastrad */ 255 1.1 riastrad total_bytes = rq->reserved_space + remain_actual; 256 1.1 riastrad } 257 1.1 riastrad } 258 1.1 riastrad 259 1.1 riastrad if (unlikely(total_bytes > ring->space)) { 260 1.1 riastrad int ret; 261 1.1 riastrad 262 1.1 riastrad /* 263 1.1 riastrad * Space is reserved in the ringbuffer for finalising the 264 1.1 riastrad * request, as that cannot be allowed to fail. During request 265 1.1 riastrad * finalisation, reserved_space is set to 0 to stop the 266 1.1 riastrad * overallocation and the assumption is that then we never need 267 1.1 riastrad * to wait (which has the risk of failing with EINTR). 268 1.1 riastrad * 269 1.1 riastrad * See also i915_request_alloc() and i915_request_add(). 270 1.1 riastrad */ 271 1.1 riastrad GEM_BUG_ON(!rq->reserved_space); 272 1.1 riastrad 273 1.1 riastrad ret = wait_for_space(ring, 274 1.1 riastrad i915_request_timeline(rq), 275 1.1 riastrad total_bytes); 276 1.1 riastrad if (unlikely(ret)) 277 1.1 riastrad return ERR_PTR(ret); 278 1.1 riastrad } 279 1.1 riastrad 280 1.1 riastrad if (unlikely(need_wrap)) { 281 1.1 riastrad need_wrap &= ~1; 282 1.1 riastrad GEM_BUG_ON(need_wrap > ring->space); 283 1.1 riastrad GEM_BUG_ON(ring->emit + need_wrap > ring->size); 284 1.1 riastrad GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64))); 285 1.1 riastrad 286 1.1 riastrad /* Fill the tail with MI_NOOP */ 287 1.1 riastrad memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64)); 288 1.1 riastrad ring->space -= need_wrap; 289 1.1 riastrad ring->emit = 0; 290 1.1 riastrad } 291 1.1 riastrad 292 1.1 riastrad GEM_BUG_ON(ring->emit > ring->size - bytes); 293 1.1 riastrad GEM_BUG_ON(ring->space < bytes); 294 1.1 riastrad cs = ring->vaddr + ring->emit; 295 1.1 riastrad GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs))); 296 1.1 riastrad ring->emit += bytes; 297 1.1 riastrad ring->space -= bytes; 298 1.1 riastrad 299 1.1 riastrad return cs; 300 1.1 riastrad } 301 1.1 riastrad 302 1.1 riastrad /* Align the ring tail to a cacheline boundary */ 303 1.1 riastrad int intel_ring_cacheline_align(struct i915_request *rq) 304 1.1 riastrad { 305 1.1 riastrad int num_dwords; 306 1.1 riastrad void *cs; 307 1.1 riastrad 308 1.1 riastrad num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32); 309 1.1 riastrad if (num_dwords == 0) 310 1.1 riastrad return 0; 311 1.1 riastrad 312 1.1 riastrad num_dwords = CACHELINE_DWORDS - num_dwords; 313 1.1 riastrad GEM_BUG_ON(num_dwords & 1); 314 1.1 riastrad 315 1.1 riastrad cs = intel_ring_begin(rq, num_dwords); 316 1.1 riastrad if (IS_ERR(cs)) 317 1.1 riastrad return PTR_ERR(cs); 318 1.1 riastrad 319 1.1 riastrad memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2); 320 1.1 riastrad intel_ring_advance(rq, cs + num_dwords); 321 1.1 riastrad 322 1.1 riastrad GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1)); 323 1.1 riastrad return 0; 324 1.1 riastrad } 325