i915_gem.c revision 1.54.6.1 1 /* $NetBSD: i915_gem.c,v 1.54.6.1 2020/01/17 21:47:32 ad Exp $ */
2
3 /*
4 * Copyright 2008-2015 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 * Eric Anholt <eric (at) anholt.net>
27 *
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: i915_gem.c,v 1.54.6.1 2020/01/17 21:47:32 ad Exp $");
32
33 #ifdef __NetBSD__
34 #if 0 /* XXX uvmhist option? */
35 #include "opt_uvmhist.h"
36 #endif
37
38 #include <sys/types.h>
39 #include <sys/param.h>
40
41 #include <uvm/uvm.h>
42 #include <uvm/uvm_extern.h>
43 #include <uvm/uvm_fault.h>
44 #include <uvm/uvm_page.h>
45 #include <uvm/uvm_pmap.h>
46 #include <uvm/uvm_prot.h>
47
48 #include <drm/bus_dma_hacks.h>
49 #endif
50
51 #include <drm/drmP.h>
52 #include <drm/drm_vma_manager.h>
53 #include <drm/i915_drm.h>
54 #include "i915_drv.h"
55 #include "i915_vgpu.h"
56 #include "i915_trace.h"
57 #include "intel_drv.h"
58 #include <linux/shmem_fs.h>
59 #include <linux/slab.h>
60 #include <linux/swap.h>
61 #include <linux/pci.h>
62 #include <linux/dma-buf.h>
63 #include <linux/errno.h>
64 #include <linux/time.h>
65 #include <linux/err.h>
66 #include <linux/bitops.h>
67 #include <linux/printk.h>
68 #include <asm/param.h>
69 #include <asm/page.h>
70 #include <asm/cpufeature.h>
71
72 #define RQ_BUG_ON(expr)
73
74 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
75 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
76 static void
77 i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
78 static void
79 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
80
81 static bool cpu_cache_is_coherent(struct drm_device *dev,
82 enum i915_cache_level level)
83 {
84 return HAS_LLC(dev) || level != I915_CACHE_NONE;
85 }
86
87 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
88 {
89 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
90 return true;
91
92 return obj->pin_display;
93 }
94
95 /* some bookkeeping */
96 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
97 size_t size)
98 {
99 spin_lock(&dev_priv->mm.object_stat_lock);
100 dev_priv->mm.object_count++;
101 dev_priv->mm.object_memory += size;
102 spin_unlock(&dev_priv->mm.object_stat_lock);
103 }
104
105 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
106 size_t size)
107 {
108 spin_lock(&dev_priv->mm.object_stat_lock);
109 dev_priv->mm.object_count--;
110 dev_priv->mm.object_memory -= size;
111 spin_unlock(&dev_priv->mm.object_stat_lock);
112 }
113
114 static int
115 i915_gem_wait_for_error(struct i915_gpu_error *error)
116 {
117 int ret;
118
119 #define EXIT_COND (!i915_reset_in_progress(error) || \
120 i915_terminally_wedged(error))
121 if (EXIT_COND)
122 return 0;
123
124 /*
125 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
126 * userspace. If it takes that long something really bad is going on and
127 * we should simply try to bail out and fail as gracefully as possible.
128 */
129 #ifdef __NetBSD__
130 spin_lock(&error->reset_lock);
131 DRM_SPIN_TIMED_WAIT_UNTIL(ret, &error->reset_queue, &error->reset_lock,
132 10*HZ, EXIT_COND);
133 spin_unlock(&error->reset_lock);
134 #else
135 ret = wait_event_interruptible_timeout(error->reset_queue,
136 EXIT_COND,
137 10*HZ);
138 #endif
139 if (ret == 0) {
140 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
141 return -EIO;
142 } else if (ret < 0) {
143 return ret;
144 }
145 #undef EXIT_COND
146
147 return 0;
148 }
149
150 int i915_mutex_lock_interruptible(struct drm_device *dev)
151 {
152 struct drm_i915_private *dev_priv = dev->dev_private;
153 int ret;
154
155 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
156 if (ret)
157 return ret;
158
159 ret = mutex_lock_interruptible(&dev->struct_mutex);
160 if (ret)
161 return ret;
162
163 WARN_ON(i915_verify_lists(dev));
164 return 0;
165 }
166
167 int
168 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
169 struct drm_file *file)
170 {
171 struct drm_i915_private *dev_priv = dev->dev_private;
172 struct drm_i915_gem_get_aperture *args = data;
173 struct i915_gtt *ggtt = &dev_priv->gtt;
174 struct i915_vma *vma;
175 size_t pinned;
176
177 pinned = 0;
178 mutex_lock(&dev->struct_mutex);
179 list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
180 if (vma->pin_count)
181 pinned += vma->node.size;
182 list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
183 if (vma->pin_count)
184 pinned += vma->node.size;
185 mutex_unlock(&dev->struct_mutex);
186
187 args->aper_size = dev_priv->gtt.base.total;
188 args->aper_available_size = args->aper_size - pinned;
189
190 return 0;
191 }
192
193 static int
194 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
195 {
196 #ifndef __NetBSD__
197 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
198 #endif
199 char *vaddr = obj->phys_handle->vaddr;
200 #ifndef __NetBSD__
201 struct sg_table *st;
202 struct scatterlist *sg;
203 #endif
204 int i;
205
206 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
207 return -EINVAL;
208
209 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
210 struct page *page;
211 char *src;
212
213 #ifdef __NetBSD__
214 struct pglist pages = TAILQ_HEAD_INITIALIZER(pages);
215 int ret;
216 /* XXX errno NetBSD->Linux */
217 ret = -uvm_obj_wirepages(obj->base.filp, i*PAGE_SIZE,
218 (i + 1)*PAGE_SIZE, &pages);
219 if (ret)
220 return ret;
221 page = container_of(TAILQ_FIRST(&pages), struct page, p_vmp);
222 #else
223 page = shmem_read_mapping_page(mapping, i);
224 if (IS_ERR(page))
225 return PTR_ERR(page);
226 #endif
227
228 src = kmap_atomic(page);
229 memcpy(vaddr, src, PAGE_SIZE);
230 drm_clflush_virt_range(vaddr, PAGE_SIZE);
231 kunmap_atomic(src);
232
233 #ifdef __NetBSD__
234 uvm_obj_unwirepages(obj->base.filp, i*PAGE_SIZE,
235 (i + 1)*PAGE_SIZE);
236 #else
237 page_cache_release(page);
238 #endif
239 vaddr += PAGE_SIZE;
240 }
241
242 i915_gem_chipset_flush(obj->base.dev);
243
244 #ifdef __NetBSD__
245 obj->pages = obj->phys_handle->dmah_map;
246 #else
247 st = kmalloc(sizeof(*st), GFP_KERNEL);
248 if (st == NULL)
249 return -ENOMEM;
250
251 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
252 kfree(st);
253 return -ENOMEM;
254 }
255
256 sg = st->sgl;
257 sg->offset = 0;
258 sg->length = obj->base.size;
259
260 sg_dma_address(sg) = obj->phys_handle->busaddr;
261 sg_dma_len(sg) = obj->base.size;
262
263 obj->pages = st;
264 #endif
265 return 0;
266 }
267
268 static void
269 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
270 {
271 int ret;
272
273 BUG_ON(obj->madv == __I915_MADV_PURGED);
274
275 ret = i915_gem_object_set_to_cpu_domain(obj, true);
276 if (ret) {
277 /* In the event of a disaster, abandon all caches and
278 * hope for the best.
279 */
280 WARN_ON(ret != -EIO);
281 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
282 }
283
284 if (obj->madv == I915_MADV_DONTNEED)
285 obj->dirty = 0;
286
287 if (obj->dirty) {
288 #ifndef __NetBSD__
289 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
290 #endif
291 const char *vaddr = obj->phys_handle->vaddr;
292 int i;
293
294 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
295 struct page *page;
296 char *dst;
297
298 #ifdef __NetBSD__
299 struct pglist pages = TAILQ_HEAD_INITIALIZER(pages);
300 /* XXX errno NetBSD->Linux */
301 ret = -uvm_obj_wirepages(obj->base.filp,
302 i*PAGE_SIZE, (i + 1)*PAGE_SIZE, &pages);
303 if (ret)
304 continue;
305 page = container_of(TAILQ_FIRST(&pages), struct page,
306 p_vmp);
307 #endif
308
309 dst = kmap_atomic(page);
310 drm_clflush_virt_range(vaddr, PAGE_SIZE);
311 memcpy(dst, vaddr, PAGE_SIZE);
312 kunmap_atomic(dst);
313
314 set_page_dirty(page);
315 #ifdef __NetBSD__
316 /* XXX mark page accessed */
317 uvm_obj_unwirepages(obj->base.filp, i*PAGE_SIZE,
318 (i+1)*PAGE_SIZE);
319 #else
320 if (obj->madv == I915_MADV_WILLNEED)
321 mark_page_accessed(page);
322 page_cache_release(page);
323 #endif
324 vaddr += PAGE_SIZE;
325 }
326 obj->dirty = 0;
327 }
328
329 #ifdef __NetBSD__
330 obj->pages = NULL;
331 #else
332 sg_free_table(obj->pages);
333 kfree(obj->pages);
334 #endif
335 }
336
337 static void
338 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
339 {
340 drm_pci_free(obj->base.dev, obj->phys_handle);
341 }
342
343 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
344 .get_pages = i915_gem_object_get_pages_phys,
345 .put_pages = i915_gem_object_put_pages_phys,
346 .release = i915_gem_object_release_phys,
347 };
348
349 static int
350 drop_pages(struct drm_i915_gem_object *obj)
351 {
352 struct i915_vma *vma, *next;
353 int ret;
354
355 drm_gem_object_reference(&obj->base);
356 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
357 if (i915_vma_unbind(vma))
358 break;
359
360 ret = i915_gem_object_put_pages(obj);
361 drm_gem_object_unreference(&obj->base);
362
363 return ret;
364 }
365
366 int
367 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
368 int align)
369 {
370 drm_dma_handle_t *phys;
371 int ret;
372
373 if (obj->phys_handle) {
374 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
375 return -EBUSY;
376
377 return 0;
378 }
379
380 if (obj->madv != I915_MADV_WILLNEED)
381 return -EFAULT;
382
383 if (obj->base.filp == NULL)
384 return -EINVAL;
385
386 ret = drop_pages(obj);
387 if (ret)
388 return ret;
389
390 /* create a new object */
391 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
392 if (!phys)
393 return -ENOMEM;
394
395 obj->phys_handle = phys;
396 obj->ops = &i915_gem_phys_ops;
397
398 return i915_gem_object_get_pages(obj);
399 }
400
401 static int
402 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
403 struct drm_i915_gem_pwrite *args,
404 struct drm_file *file_priv)
405 {
406 struct drm_device *dev = obj->base.dev;
407 void *vaddr = (char *)obj->phys_handle->vaddr + args->offset;
408 char __user *user_data = to_user_ptr(args->data_ptr);
409 int ret = 0;
410
411 /* We manually control the domain here and pretend that it
412 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
413 */
414 ret = i915_gem_object_wait_rendering(obj, false);
415 if (ret)
416 return ret;
417
418 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
419 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
420 unsigned long unwritten;
421
422 /* The physical object once assigned is fixed for the lifetime
423 * of the obj, so we can safely drop the lock and continue
424 * to access vaddr.
425 */
426 mutex_unlock(&dev->struct_mutex);
427 unwritten = copy_from_user(vaddr, user_data, args->size);
428 mutex_lock(&dev->struct_mutex);
429 if (unwritten) {
430 ret = -EFAULT;
431 goto out;
432 }
433 }
434
435 drm_clflush_virt_range(vaddr, args->size);
436 i915_gem_chipset_flush(dev);
437
438 out:
439 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
440 return ret;
441 }
442
443 void *i915_gem_object_alloc(struct drm_device *dev)
444 {
445 struct drm_i915_private *dev_priv = dev->dev_private;
446 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
447 }
448
449 void i915_gem_object_free(struct drm_i915_gem_object *obj)
450 {
451 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
452 kmem_cache_free(dev_priv->objects, obj);
453 }
454
455 static int
456 i915_gem_create(struct drm_file *file,
457 struct drm_device *dev,
458 uint64_t size,
459 uint32_t *handle_p)
460 {
461 struct drm_i915_gem_object *obj;
462 int ret;
463 u32 handle;
464
465 size = roundup(size, PAGE_SIZE);
466 if (size == 0)
467 return -EINVAL;
468
469 /* Allocate the new object */
470 obj = i915_gem_alloc_object(dev, size);
471 if (obj == NULL)
472 return -ENOMEM;
473
474 ret = drm_gem_handle_create(file, &obj->base, &handle);
475 /* drop reference from allocate - handle holds it now */
476 drm_gem_object_unreference_unlocked(&obj->base);
477 if (ret)
478 return ret;
479
480 *handle_p = handle;
481 return 0;
482 }
483
484 int
485 i915_gem_dumb_create(struct drm_file *file,
486 struct drm_device *dev,
487 struct drm_mode_create_dumb *args)
488 {
489 /* have to work out size/pitch and return them */
490 #ifdef __NetBSD__ /* ALIGN means something else. */
491 args->pitch = round_up(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
492 #else
493 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
494 #endif
495 args->size = args->pitch * args->height;
496 return i915_gem_create(file, dev,
497 args->size, &args->handle);
498 }
499
500 /**
501 * Creates a new mm object and returns a handle to it.
502 */
503 int
504 i915_gem_create_ioctl(struct drm_device *dev, void *data,
505 struct drm_file *file)
506 {
507 struct drm_i915_gem_create *args = data;
508
509 return i915_gem_create(file, dev,
510 args->size, &args->handle);
511 }
512
513 static inline int
514 __copy_to_user_swizzled(char __user *cpu_vaddr,
515 const char *gpu_vaddr, int gpu_offset,
516 int length)
517 {
518 int ret, cpu_offset = 0;
519
520 while (length > 0) {
521 #ifdef __NetBSD__ /* XXX ALIGN means something else. */
522 int cacheline_end = round_up(gpu_offset + 1, 64);
523 #else
524 int cacheline_end = ALIGN(gpu_offset + 1, 64);
525 #endif
526 int this_length = min(cacheline_end - gpu_offset, length);
527 int swizzled_gpu_offset = gpu_offset ^ 64;
528
529 ret = __copy_to_user(cpu_vaddr + cpu_offset,
530 gpu_vaddr + swizzled_gpu_offset,
531 this_length);
532 if (ret)
533 return ret + length;
534
535 cpu_offset += this_length;
536 gpu_offset += this_length;
537 length -= this_length;
538 }
539
540 return 0;
541 }
542
543 static inline int
544 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
545 const char __user *cpu_vaddr,
546 int length)
547 {
548 int ret, cpu_offset = 0;
549
550 while (length > 0) {
551 #ifdef __NetBSD__ /* XXX ALIGN means something else. */
552 int cacheline_end = round_up(gpu_offset + 1, 64);
553 #else
554 int cacheline_end = ALIGN(gpu_offset + 1, 64);
555 #endif
556 int this_length = min(cacheline_end - gpu_offset, length);
557 int swizzled_gpu_offset = gpu_offset ^ 64;
558
559 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
560 cpu_vaddr + cpu_offset,
561 this_length);
562 if (ret)
563 return ret + length;
564
565 cpu_offset += this_length;
566 gpu_offset += this_length;
567 length -= this_length;
568 }
569
570 return 0;
571 }
572
573 /*
574 * Pins the specified object's pages and synchronizes the object with
575 * GPU accesses. Sets needs_clflush to non-zero if the caller should
576 * flush the object from the CPU cache.
577 */
578 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
579 int *needs_clflush)
580 {
581 int ret;
582
583 *needs_clflush = 0;
584
585 if (!obj->base.filp)
586 return -EINVAL;
587
588 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
589 /* If we're not in the cpu read domain, set ourself into the gtt
590 * read domain and manually flush cachelines (if required). This
591 * optimizes for the case when the gpu will dirty the data
592 * anyway again before the next pread happens. */
593 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
594 obj->cache_level);
595 ret = i915_gem_object_wait_rendering(obj, true);
596 if (ret)
597 return ret;
598 }
599
600 ret = i915_gem_object_get_pages(obj);
601 if (ret)
602 return ret;
603
604 i915_gem_object_pin_pages(obj);
605
606 return ret;
607 }
608
609 /* Per-page copy function for the shmem pread fastpath.
610 * Flushes invalid cachelines before reading the target if
611 * needs_clflush is set. */
612 static int
613 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
614 char __user *user_data,
615 bool page_do_bit17_swizzling, bool needs_clflush)
616 {
617 #ifdef __NetBSD__ /* XXX atomic shmem fast path */
618 return -EFAULT;
619 #else
620 char *vaddr;
621 int ret;
622
623 if (unlikely(page_do_bit17_swizzling))
624 return -EINVAL;
625
626 vaddr = kmap_atomic(page);
627 if (needs_clflush)
628 drm_clflush_virt_range(vaddr + shmem_page_offset,
629 page_length);
630 ret = __copy_to_user_inatomic(user_data,
631 vaddr + shmem_page_offset,
632 page_length);
633 kunmap_atomic(vaddr);
634
635 return ret ? -EFAULT : 0;
636 #endif
637 }
638
639 static void
640 shmem_clflush_swizzled_range(char *addr, unsigned long length,
641 bool swizzled)
642 {
643 if (unlikely(swizzled)) {
644 unsigned long start = (unsigned long) addr;
645 unsigned long end = (unsigned long) addr + length;
646
647 /* For swizzling simply ensure that we always flush both
648 * channels. Lame, but simple and it works. Swizzled
649 * pwrite/pread is far from a hotpath - current userspace
650 * doesn't use it at all. */
651 start = round_down(start, 128);
652 end = round_up(end, 128);
653
654 drm_clflush_virt_range((void *)start, end - start);
655 } else {
656 drm_clflush_virt_range(addr, length);
657 }
658
659 }
660
661 /* Only difference to the fast-path function is that this can handle bit17
662 * and uses non-atomic copy and kmap functions. */
663 static int
664 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
665 char __user *user_data,
666 bool page_do_bit17_swizzling, bool needs_clflush)
667 {
668 char *vaddr;
669 int ret;
670
671 vaddr = kmap(page);
672 if (needs_clflush)
673 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
674 page_length,
675 page_do_bit17_swizzling);
676
677 if (page_do_bit17_swizzling)
678 ret = __copy_to_user_swizzled(user_data,
679 vaddr, shmem_page_offset,
680 page_length);
681 else
682 ret = __copy_to_user(user_data,
683 vaddr + shmem_page_offset,
684 page_length);
685 kunmap(page);
686
687 return ret ? - EFAULT : 0;
688 }
689
690 static int
691 i915_gem_shmem_pread(struct drm_device *dev,
692 struct drm_i915_gem_object *obj,
693 struct drm_i915_gem_pread *args,
694 struct drm_file *file)
695 {
696 char __user *user_data;
697 ssize_t remain;
698 loff_t offset;
699 int shmem_page_offset, page_length, ret = 0;
700 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
701 #ifndef __NetBSD__ /* XXX */
702 int prefaulted = 0;
703 #endif
704 int needs_clflush = 0;
705 #ifndef __NetBSD__
706 struct sg_page_iter sg_iter;
707 #endif
708
709 user_data = to_user_ptr(args->data_ptr);
710 remain = args->size;
711
712 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
713
714 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
715 if (ret)
716 return ret;
717
718 offset = args->offset;
719
720 #ifdef __NetBSD__
721 while (0 < remain)
722 #else
723 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
724 offset >> PAGE_SHIFT)
725 #endif
726 {
727 #ifdef __NetBSD__
728 struct page *const page = i915_gem_object_get_page(obj,
729 atop(offset));
730 #else
731 struct page *page = sg_page_iter_page(&sg_iter);
732
733 if (remain <= 0)
734 break;
735 #endif
736
737 /* Operation in this page
738 *
739 * shmem_page_offset = offset within page in shmem file
740 * page_length = bytes to copy for this page
741 */
742 shmem_page_offset = offset_in_page(offset);
743 page_length = remain;
744 if ((shmem_page_offset + page_length) > PAGE_SIZE)
745 page_length = PAGE_SIZE - shmem_page_offset;
746
747 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
748 (page_to_phys(page) & (1 << 17)) != 0;
749
750 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
751 user_data, page_do_bit17_swizzling,
752 needs_clflush);
753 if (ret == 0)
754 goto next_page;
755
756 mutex_unlock(&dev->struct_mutex);
757 #ifndef __NetBSD__
758 if (likely(!i915.prefault_disable) && !prefaulted) {
759 ret = fault_in_multipages_writeable(user_data, remain);
760 /* Userspace is tricking us, but we've already clobbered
761 * its pages with the prefault and promised to write the
762 * data up to the first fault. Hence ignore any errors
763 * and just continue. */
764 (void)ret;
765 prefaulted = 1;
766 }
767 #endif
768 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
769 user_data, page_do_bit17_swizzling,
770 needs_clflush);
771
772 mutex_lock(&dev->struct_mutex);
773
774 if (ret)
775 goto out;
776
777 next_page:
778 remain -= page_length;
779 user_data += page_length;
780 offset += page_length;
781 }
782
783 out:
784 i915_gem_object_unpin_pages(obj);
785
786 return ret;
787 }
788
789 /**
790 * Reads data from the object referenced by handle.
791 *
792 * On error, the contents of *data are undefined.
793 */
794 int
795 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
796 struct drm_file *file)
797 {
798 struct drm_i915_gem_pread *args = data;
799 struct drm_gem_object *gobj;
800 struct drm_i915_gem_object *obj;
801 int ret = 0;
802
803 if (args->size == 0)
804 return 0;
805
806 if (!access_ok(VERIFY_WRITE,
807 to_user_ptr(args->data_ptr),
808 args->size))
809 return -EFAULT;
810
811 ret = i915_mutex_lock_interruptible(dev);
812 if (ret)
813 return ret;
814
815 gobj = drm_gem_object_lookup(dev, file, args->handle);
816 if (gobj == NULL) {
817 ret = -ENOENT;
818 goto unlock;
819 }
820 obj = to_intel_bo(gobj);
821
822 /* Bounds check source. */
823 if (args->offset > obj->base.size ||
824 args->size > obj->base.size - args->offset) {
825 ret = -EINVAL;
826 goto out;
827 }
828
829 /* prime objects have no backing filp to GEM pread/pwrite
830 * pages from.
831 */
832 if (!obj->base.filp) {
833 ret = -EINVAL;
834 goto out;
835 }
836
837 trace_i915_gem_object_pread(obj, args->offset, args->size);
838
839 ret = i915_gem_shmem_pread(dev, obj, args, file);
840
841 out:
842 drm_gem_object_unreference(&obj->base);
843 unlock:
844 mutex_unlock(&dev->struct_mutex);
845 return ret;
846 }
847
848 /* This is the fast write path which cannot handle
849 * page faults in the source data
850 */
851
852 static inline int
853 fast_user_write(struct io_mapping *mapping,
854 loff_t page_base, int page_offset,
855 char __user *user_data,
856 int length)
857 {
858 #ifdef __NetBSD__ /* XXX atomic shmem fast path */
859 return -EFAULT;
860 #else
861 void __iomem *vaddr_atomic;
862 void *vaddr;
863 unsigned long unwritten;
864
865 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
866 /* We can use the cpu mem copy function because this is X86. */
867 vaddr = (void __force*)vaddr_atomic + page_offset;
868 unwritten = __copy_from_user_inatomic_nocache(vaddr,
869 user_data, length);
870 io_mapping_unmap_atomic(vaddr_atomic);
871 return unwritten;
872 #endif
873 }
874
875 /**
876 * This is the fast pwrite path, where we copy the data directly from the
877 * user into the GTT, uncached.
878 */
879 static int
880 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
881 struct drm_i915_gem_object *obj,
882 struct drm_i915_gem_pwrite *args,
883 struct drm_file *file)
884 {
885 struct drm_i915_private *dev_priv = dev->dev_private;
886 ssize_t remain;
887 loff_t offset, page_base;
888 char __user *user_data;
889 int page_offset, page_length, ret;
890
891 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
892 if (ret)
893 goto out;
894
895 ret = i915_gem_object_set_to_gtt_domain(obj, true);
896 if (ret)
897 goto out_unpin;
898
899 ret = i915_gem_object_put_fence(obj);
900 if (ret)
901 goto out_unpin;
902
903 user_data = to_user_ptr(args->data_ptr);
904 remain = args->size;
905
906 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
907
908 intel_fb_obj_invalidate(obj, ORIGIN_GTT);
909
910 while (remain > 0) {
911 /* Operation in this page
912 *
913 * page_base = page offset within aperture
914 * page_offset = offset within page
915 * page_length = bytes to copy for this page
916 */
917 page_base = offset & PAGE_MASK;
918 page_offset = offset_in_page(offset);
919 page_length = remain;
920 if ((page_offset + remain) > PAGE_SIZE)
921 page_length = PAGE_SIZE - page_offset;
922
923 /* If we get a fault while copying data, then (presumably) our
924 * source page isn't available. Return the error and we'll
925 * retry in the slow path.
926 */
927 if (fast_user_write(dev_priv->gtt.mappable, page_base,
928 page_offset, user_data, page_length)) {
929 ret = -EFAULT;
930 goto out_flush;
931 }
932
933 remain -= page_length;
934 user_data += page_length;
935 offset += page_length;
936 }
937
938 out_flush:
939 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
940 out_unpin:
941 i915_gem_object_ggtt_unpin(obj);
942 out:
943 return ret;
944 }
945
946 /* Per-page copy function for the shmem pwrite fastpath.
947 * Flushes invalid cachelines before writing to the target if
948 * needs_clflush_before is set and flushes out any written cachelines after
949 * writing if needs_clflush is set. */
950 static int
951 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
952 char __user *user_data,
953 bool page_do_bit17_swizzling,
954 bool needs_clflush_before,
955 bool needs_clflush_after)
956 {
957 #ifdef __NetBSD__
958 return -EFAULT;
959 #else
960 char *vaddr;
961 int ret;
962
963 if (unlikely(page_do_bit17_swizzling))
964 return -EINVAL;
965
966 vaddr = kmap_atomic(page);
967 if (needs_clflush_before)
968 drm_clflush_virt_range(vaddr + shmem_page_offset,
969 page_length);
970 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
971 user_data, page_length);
972 if (needs_clflush_after)
973 drm_clflush_virt_range(vaddr + shmem_page_offset,
974 page_length);
975 kunmap_atomic(vaddr);
976
977 return ret ? -EFAULT : 0;
978 #endif
979 }
980
981 /* Only difference to the fast-path function is that this can handle bit17
982 * and uses non-atomic copy and kmap functions. */
983 static int
984 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
985 char __user *user_data,
986 bool page_do_bit17_swizzling,
987 bool needs_clflush_before,
988 bool needs_clflush_after)
989 {
990 char *vaddr;
991 int ret;
992
993 vaddr = kmap(page);
994 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
995 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
996 page_length,
997 page_do_bit17_swizzling);
998 if (page_do_bit17_swizzling)
999 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
1000 user_data,
1001 page_length);
1002 else
1003 ret = __copy_from_user(vaddr + shmem_page_offset,
1004 user_data,
1005 page_length);
1006 if (needs_clflush_after)
1007 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1008 page_length,
1009 page_do_bit17_swizzling);
1010 kunmap(page);
1011
1012 return ret ? -EFAULT : 0;
1013 }
1014
1015 static int
1016 i915_gem_shmem_pwrite(struct drm_device *dev,
1017 struct drm_i915_gem_object *obj,
1018 struct drm_i915_gem_pwrite *args,
1019 struct drm_file *file)
1020 {
1021 ssize_t remain;
1022 loff_t offset;
1023 char __user *user_data;
1024 int shmem_page_offset, page_length, ret = 0;
1025 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
1026 int hit_slowpath = 0;
1027 int needs_clflush_after = 0;
1028 int needs_clflush_before = 0;
1029 #ifndef __NetBSD__
1030 struct sg_page_iter sg_iter;
1031 int flush_mask = boot_cpu_data.x86_clflush_size - 1;
1032 #else
1033 int flush_mask = cpu_info_primary.ci_cflush_lsize - 1;
1034 #endif
1035
1036 user_data = to_user_ptr(args->data_ptr);
1037 remain = args->size;
1038
1039 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
1040
1041 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1042 /* If we're not in the cpu write domain, set ourself into the gtt
1043 * write domain and manually flush cachelines (if required). This
1044 * optimizes for the case when the gpu will use the data
1045 * right away and we therefore have to clflush anyway. */
1046 needs_clflush_after = cpu_write_needs_clflush(obj);
1047 ret = i915_gem_object_wait_rendering(obj, false);
1048 if (ret)
1049 return ret;
1050 }
1051 /* Same trick applies to invalidate partially written cachelines read
1052 * before writing. */
1053 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
1054 needs_clflush_before =
1055 !cpu_cache_is_coherent(dev, obj->cache_level);
1056
1057 ret = i915_gem_object_get_pages(obj);
1058 if (ret)
1059 return ret;
1060
1061 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1062
1063 i915_gem_object_pin_pages(obj);
1064
1065 offset = args->offset;
1066 obj->dirty = 1;
1067
1068 #ifdef __NetBSD__
1069 while (0 < remain)
1070 #else
1071 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
1072 offset >> PAGE_SHIFT)
1073 #endif
1074 {
1075 #ifdef __NetBSD__
1076 struct page *const page = i915_gem_object_get_page(obj,
1077 atop(offset));
1078 #else
1079 struct page *page = sg_page_iter_page(&sg_iter);
1080
1081 if (remain <= 0)
1082 break;
1083 #endif
1084
1085 /* Operation in this page
1086 *
1087 * shmem_page_offset = offset within page in shmem file
1088 * page_length = bytes to copy for this page
1089 */
1090 shmem_page_offset = offset_in_page(offset);
1091
1092 page_length = remain;
1093 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1094 page_length = PAGE_SIZE - shmem_page_offset;
1095
1096 /* If we don't overwrite a cacheline completely we need to be
1097 * careful to have up-to-date data by first clflushing. Don't
1098 * overcomplicate things and flush the entire patch. */
1099 const int partial_cacheline_write = needs_clflush_before &&
1100 ((shmem_page_offset | page_length) & flush_mask);
1101
1102 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1103 (page_to_phys(page) & (1 << 17)) != 0;
1104
1105 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1106 user_data, page_do_bit17_swizzling,
1107 partial_cacheline_write,
1108 needs_clflush_after);
1109 if (ret == 0)
1110 goto next_page;
1111
1112 hit_slowpath = 1;
1113 mutex_unlock(&dev->struct_mutex);
1114 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1115 user_data, page_do_bit17_swizzling,
1116 partial_cacheline_write,
1117 needs_clflush_after);
1118
1119 mutex_lock(&dev->struct_mutex);
1120
1121 if (ret)
1122 goto out;
1123
1124 next_page:
1125 remain -= page_length;
1126 user_data += page_length;
1127 offset += page_length;
1128 }
1129
1130 out:
1131 i915_gem_object_unpin_pages(obj);
1132
1133 if (hit_slowpath) {
1134 /*
1135 * Fixup: Flush cpu caches in case we didn't flush the dirty
1136 * cachelines in-line while writing and the object moved
1137 * out of the cpu write domain while we've dropped the lock.
1138 */
1139 if (!needs_clflush_after &&
1140 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1141 if (i915_gem_clflush_object(obj, obj->pin_display))
1142 needs_clflush_after = true;
1143 }
1144 }
1145
1146 if (needs_clflush_after)
1147 i915_gem_chipset_flush(dev);
1148 else
1149 obj->cache_dirty = true;
1150
1151 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1152 return ret;
1153 }
1154
1155 /**
1156 * Writes data to the object referenced by handle.
1157 *
1158 * On error, the contents of the buffer that were to be modified are undefined.
1159 */
1160 int
1161 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1162 struct drm_file *file)
1163 {
1164 struct drm_i915_private *dev_priv = dev->dev_private;
1165 struct drm_i915_gem_pwrite *args = data;
1166 struct drm_gem_object *gobj;
1167 struct drm_i915_gem_object *obj;
1168 int ret;
1169
1170 if (args->size == 0)
1171 return 0;
1172
1173 if (!access_ok(VERIFY_READ,
1174 to_user_ptr(args->data_ptr),
1175 args->size))
1176 return -EFAULT;
1177
1178 #ifndef __NetBSD__ /* XXX prefault */
1179 if (likely(!i915.prefault_disable)) {
1180 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
1181 args->size);
1182 if (ret)
1183 return -EFAULT;
1184 }
1185 #endif
1186
1187 intel_runtime_pm_get(dev_priv);
1188
1189 ret = i915_mutex_lock_interruptible(dev);
1190 if (ret)
1191 goto put_rpm;
1192
1193 gobj = drm_gem_object_lookup(dev, file, args->handle);
1194 if (gobj == NULL) {
1195 ret = -ENOENT;
1196 goto unlock;
1197 }
1198 obj = to_intel_bo(gobj);
1199
1200 /* Bounds check destination. */
1201 if (args->offset > obj->base.size ||
1202 args->size > obj->base.size - args->offset) {
1203 ret = -EINVAL;
1204 goto out;
1205 }
1206
1207 /* prime objects have no backing filp to GEM pread/pwrite
1208 * pages from.
1209 */
1210 if (!obj->base.filp) {
1211 ret = -EINVAL;
1212 goto out;
1213 }
1214
1215 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1216
1217 ret = -EFAULT;
1218 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1219 * it would end up going through the fenced access, and we'll get
1220 * different detiling behavior between reading and writing.
1221 * pread/pwrite currently are reading and writing from the CPU
1222 * perspective, requiring manual detiling by the client.
1223 */
1224 if (obj->tiling_mode == I915_TILING_NONE &&
1225 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1226 cpu_write_needs_clflush(obj)) {
1227 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1228 /* Note that the gtt paths might fail with non-page-backed user
1229 * pointers (e.g. gtt mappings when moving data between
1230 * textures). Fallback to the shmem path in that case. */
1231 }
1232
1233 if (ret == -EFAULT || ret == -ENOSPC) {
1234 if (obj->phys_handle)
1235 ret = i915_gem_phys_pwrite(obj, args, file);
1236 else
1237 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1238 }
1239
1240 out:
1241 drm_gem_object_unreference(&obj->base);
1242 unlock:
1243 mutex_unlock(&dev->struct_mutex);
1244 put_rpm:
1245 intel_runtime_pm_put(dev_priv);
1246
1247 return ret;
1248 }
1249
1250 int
1251 i915_gem_check_wedge(struct i915_gpu_error *error,
1252 bool interruptible)
1253 {
1254 if (i915_reset_in_progress(error)) {
1255 /* Non-interruptible callers can't handle -EAGAIN, hence return
1256 * -EIO unconditionally for these. */
1257 if (!interruptible)
1258 return -EIO;
1259
1260 /* Recovery complete, but the reset failed ... */
1261 if (i915_terminally_wedged(error))
1262 return -EIO;
1263
1264 /*
1265 * Check if GPU Reset is in progress - we need intel_ring_begin
1266 * to work properly to reinit the hw state while the gpu is
1267 * still marked as reset-in-progress. Handle this with a flag.
1268 */
1269 if (!error->reload_in_reset)
1270 return -EAGAIN;
1271 }
1272
1273 return 0;
1274 }
1275
1276 #ifndef __NetBSD__
1277 static void fake_irq(unsigned long data)
1278 {
1279 wake_up_process((struct task_struct *)data);
1280 }
1281 #endif
1282
1283 static bool missed_irq(struct drm_i915_private *dev_priv,
1284 struct intel_engine_cs *ring)
1285 {
1286 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1287 }
1288
1289 #ifndef __NetBSD__
1290 static unsigned long local_clock_us(unsigned *cpu)
1291 {
1292 unsigned long t;
1293
1294 /* Cheaply and approximately convert from nanoseconds to microseconds.
1295 * The result and subsequent calculations are also defined in the same
1296 * approximate microseconds units. The principal source of timing
1297 * error here is from the simple truncation.
1298 *
1299 * Note that local_clock() is only defined wrt to the current CPU;
1300 * the comparisons are no longer valid if we switch CPUs. Instead of
1301 * blocking preemption for the entire busywait, we can detect the CPU
1302 * switch and use that as indicator of system load and a reason to
1303 * stop busywaiting, see busywait_stop().
1304 */
1305 *cpu = get_cpu();
1306 t = local_clock() >> 10;
1307 put_cpu();
1308
1309 return t;
1310 }
1311
1312 static bool busywait_stop(unsigned long timeout, unsigned cpu)
1313 {
1314 unsigned this_cpu;
1315
1316 if (time_after(local_clock_us(&this_cpu), timeout))
1317 return true;
1318
1319 return this_cpu != cpu;
1320 }
1321 #endif
1322
1323 static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
1324 {
1325 #ifndef __NetBSD__
1326 unsigned long timeout;
1327 unsigned cpu;
1328 #endif
1329
1330 /* When waiting for high frequency requests, e.g. during synchronous
1331 * rendering split between the CPU and GPU, the finite amount of time
1332 * required to set up the irq and wait upon it limits the response
1333 * rate. By busywaiting on the request completion for a short while we
1334 * can service the high frequency waits as quick as possible. However,
1335 * if it is a slow request, we want to sleep as quickly as possible.
1336 * The tradeoff between waiting and sleeping is roughly the time it
1337 * takes to sleep on a request, on the order of a microsecond.
1338 */
1339
1340 if (req->ring->irq_refcount)
1341 return -EBUSY;
1342
1343 /* Only spin if we know the GPU is processing this request */
1344 if (!i915_gem_request_started(req, true))
1345 return -EAGAIN;
1346
1347 #ifndef __NetBSD__ /* XXX No local clock in usec. */
1348 timeout = local_clock_us(&cpu) + 5;
1349 while (!need_resched()) {
1350 if (i915_gem_request_completed(req, true))
1351 return 0;
1352
1353 if (signal_pending_state(state, current))
1354 break;
1355
1356 if (busywait_stop(timeout, cpu))
1357 break;
1358
1359 cpu_relax_lowlatency();
1360 }
1361 #endif
1362
1363 if (i915_gem_request_completed(req, false))
1364 return 0;
1365
1366 return -EAGAIN;
1367 }
1368
1369 /**
1370 * __i915_wait_request - wait until execution of request has finished
1371 * @req: duh!
1372 * @reset_counter: reset sequence associated with the given request
1373 * @interruptible: do an interruptible wait (normally yes)
1374 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1375 *
1376 * Note: It is of utmost importance that the passed in seqno and reset_counter
1377 * values have been read by the caller in an smp safe manner. Where read-side
1378 * locks are involved, it is sufficient to read the reset_counter before
1379 * unlocking the lock that protects the seqno. For lockless tricks, the
1380 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1381 * inserted.
1382 *
1383 * Returns 0 if the request was found within the alloted time. Else returns the
1384 * errno with remaining time filled in timeout argument.
1385 */
1386 int __i915_wait_request(struct drm_i915_gem_request *req,
1387 unsigned reset_counter,
1388 bool interruptible,
1389 s64 *timeout,
1390 struct intel_rps_client *rps)
1391 {
1392 struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
1393 struct drm_device *dev = ring->dev;
1394 struct drm_i915_private *dev_priv = dev->dev_private;
1395 const bool irq_test_in_progress =
1396 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1397 #ifdef __NetBSD__
1398 int state = 0;
1399 bool wedged;
1400 #else
1401 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1402 DEFINE_WAIT(wait);
1403 unsigned long timeout_expire;
1404 #endif
1405 s64 before, now;
1406 int ret;
1407
1408 WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1409
1410 if (list_empty(&req->list))
1411 return 0;
1412
1413 if (i915_gem_request_completed(req, true))
1414 return 0;
1415
1416 #ifndef __NetBSD__
1417 timeout_expire = 0;
1418 if (timeout) {
1419 if (WARN_ON(*timeout < 0))
1420 return -EINVAL;
1421
1422 if (*timeout == 0)
1423 return -ETIME;
1424
1425 timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
1426 }
1427 #endif
1428
1429 if (INTEL_INFO(dev_priv)->gen >= 6)
1430 gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
1431
1432 /* Record current time in case interrupted by signal, or wedged */
1433 trace_i915_gem_request_wait_begin(req);
1434 before = ktime_get_raw_ns();
1435
1436 /* Optimistic spin for the next jiffie before touching IRQs */
1437 ret = __i915_spin_request(req, state);
1438 if (ret == 0)
1439 goto out;
1440
1441 if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
1442 ret = -ENODEV;
1443 goto out;
1444 }
1445
1446 #ifdef __NetBSD__
1447 # define EXIT_COND \
1448 ((wedged = (reset_counter != \
1449 atomic_read(&dev_priv->gpu_error.reset_counter))) || \
1450 i915_gem_request_completed(req, false))
1451 spin_lock(&dev_priv->irq_lock);
1452 if (timeout) {
1453 int ticks = missed_irq(dev_priv, ring) ? 1 :
1454 nsecs_to_jiffies_timeout(*timeout);
1455 if (interruptible) {
1456 DRM_SPIN_TIMED_WAIT_UNTIL(ret, &ring->irq_queue,
1457 &dev_priv->irq_lock, ticks, EXIT_COND);
1458 } else {
1459 DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, &ring->irq_queue,
1460 &dev_priv->irq_lock, ticks, EXIT_COND);
1461 }
1462 if (ret < 0) /* Failure: return negative error as is. */
1463 ;
1464 else if (ret == 0) /* Timed out: return -ETIME. */
1465 ret = -ETIME;
1466 else /* Succeeded (ret > 0): return 0. */
1467 ret = 0;
1468 } else {
1469 if (interruptible) {
1470 DRM_SPIN_WAIT_UNTIL(ret, &ring->irq_queue,
1471 &dev_priv->irq_lock, EXIT_COND);
1472 } else {
1473 DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &ring->irq_queue,
1474 &dev_priv->irq_lock, EXIT_COND);
1475 }
1476 /* ret is negative on failure or zero on success. */
1477 }
1478 spin_unlock(&dev_priv->irq_lock);
1479 if (wedged) {
1480 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1481 if (ret == 0)
1482 ret = -EAGAIN;
1483 }
1484 #else
1485 for (;;) {
1486 struct timer_list timer;
1487
1488 prepare_to_wait(&ring->irq_queue, &wait, state);
1489
1490 /* We need to check whether any gpu reset happened in between
1491 * the caller grabbing the seqno and now ... */
1492 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1493 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1494 * is truely gone. */
1495 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1496 if (ret == 0)
1497 ret = -EAGAIN;
1498 break;
1499 }
1500
1501 if (i915_gem_request_completed(req, false)) {
1502 ret = 0;
1503 break;
1504 }
1505
1506 if (signal_pending_state(state, current)) {
1507 ret = -ERESTARTSYS;
1508 break;
1509 }
1510
1511 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1512 ret = -ETIME;
1513 break;
1514 }
1515
1516 timer.function = NULL;
1517 if (timeout || missed_irq(dev_priv, ring)) {
1518 unsigned long expire;
1519
1520 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1521 expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
1522 mod_timer(&timer, expire);
1523 }
1524
1525 io_schedule();
1526
1527 if (timer.function) {
1528 del_singleshot_timer_sync(&timer);
1529 destroy_timer_on_stack(&timer);
1530 }
1531 }
1532 #endif
1533 if (!irq_test_in_progress)
1534 ring->irq_put(ring);
1535
1536 #ifndef __NetBSD__
1537 finish_wait(&ring->irq_queue, &wait);
1538 #endif
1539
1540 out:
1541 now = ktime_get_raw_ns();
1542 trace_i915_gem_request_wait_end(req);
1543
1544 if (timeout) {
1545 s64 tres = *timeout - (now - before);
1546
1547 *timeout = tres < 0 ? 0 : tres;
1548
1549 /*
1550 * Apparently ktime isn't accurate enough and occasionally has a
1551 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
1552 * things up to make the test happy. We allow up to 1 jiffy.
1553 *
1554 * This is a regrssion from the timespec->ktime conversion.
1555 */
1556 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
1557 *timeout = 0;
1558 }
1559
1560 return ret;
1561 }
1562
1563 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
1564 struct drm_file *file)
1565 {
1566 struct drm_i915_private *dev_private __unused;
1567 struct drm_i915_file_private *file_priv;
1568
1569 WARN_ON(!req || !file || req->file_priv);
1570
1571 if (!req || !file)
1572 return -EINVAL;
1573
1574 if (req->file_priv)
1575 return -EINVAL;
1576
1577 dev_private = req->ring->dev->dev_private;
1578 file_priv = file->driver_priv;
1579
1580 spin_lock(&file_priv->mm.lock);
1581 req->file_priv = file_priv;
1582 list_add_tail(&req->client_list, &file_priv->mm.request_list);
1583 spin_unlock(&file_priv->mm.lock);
1584
1585 #ifndef __NetBSD__
1586 req->pid = get_pid(task_pid(current));
1587 #endif
1588
1589 return 0;
1590 }
1591
1592 static inline void
1593 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
1594 {
1595 struct drm_i915_file_private *file_priv = request->file_priv;
1596
1597 if (!file_priv)
1598 return;
1599
1600 spin_lock(&file_priv->mm.lock);
1601 list_del(&request->client_list);
1602 request->file_priv = NULL;
1603 spin_unlock(&file_priv->mm.lock);
1604
1605 #ifndef __NetBSD__
1606 put_pid(request->pid);
1607 request->pid = NULL;
1608 #endif
1609 }
1610
1611 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
1612 {
1613 trace_i915_gem_request_retire(request);
1614
1615 /* We know the GPU must have read the request to have
1616 * sent us the seqno + interrupt, so use the position
1617 * of tail of the request to update the last known position
1618 * of the GPU head.
1619 *
1620 * Note this requires that we are always called in request
1621 * completion order.
1622 */
1623 request->ringbuf->last_retired_head = request->postfix;
1624
1625 list_del_init(&request->list);
1626 i915_gem_request_remove_from_client(request);
1627
1628 i915_gem_request_unreference(request);
1629 }
1630
1631 static void
1632 __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
1633 {
1634 struct intel_engine_cs *engine = req->ring;
1635 struct drm_i915_gem_request *tmp;
1636
1637 lockdep_assert_held(&engine->dev->struct_mutex);
1638
1639 if (list_empty(&req->list))
1640 return;
1641
1642 do {
1643 tmp = list_first_entry(&engine->request_list,
1644 typeof(*tmp), list);
1645
1646 i915_gem_request_retire(tmp);
1647 } while (tmp != req);
1648
1649 WARN_ON(i915_verify_lists(engine->dev));
1650 }
1651
1652 /**
1653 * Waits for a request to be signaled, and cleans up the
1654 * request and object lists appropriately for that event.
1655 */
1656 int
1657 i915_wait_request(struct drm_i915_gem_request *req)
1658 {
1659 struct drm_device *dev;
1660 struct drm_i915_private *dev_priv;
1661 bool interruptible;
1662 int ret;
1663
1664 BUG_ON(req == NULL);
1665
1666 dev = req->ring->dev;
1667 dev_priv = dev->dev_private;
1668 interruptible = dev_priv->mm.interruptible;
1669
1670 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1671
1672 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1673 if (ret)
1674 return ret;
1675
1676 ret = __i915_wait_request(req,
1677 atomic_read(&dev_priv->gpu_error.reset_counter),
1678 interruptible, NULL, NULL);
1679 if (ret)
1680 return ret;
1681
1682 __i915_gem_request_retire__upto(req);
1683 return 0;
1684 }
1685
1686 /**
1687 * Ensures that all rendering to the object has completed and the object is
1688 * safe to unbind from the GTT or access from the CPU.
1689 */
1690 int
1691 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1692 bool readonly)
1693 {
1694 int ret, i;
1695
1696 if (!obj->active)
1697 return 0;
1698
1699 if (readonly) {
1700 if (obj->last_write_req != NULL) {
1701 ret = i915_wait_request(obj->last_write_req);
1702 if (ret)
1703 return ret;
1704
1705 i = obj->last_write_req->ring->id;
1706 if (obj->last_read_req[i] == obj->last_write_req)
1707 i915_gem_object_retire__read(obj, i);
1708 else
1709 i915_gem_object_retire__write(obj);
1710 }
1711 } else {
1712 for (i = 0; i < I915_NUM_RINGS; i++) {
1713 if (obj->last_read_req[i] == NULL)
1714 continue;
1715
1716 ret = i915_wait_request(obj->last_read_req[i]);
1717 if (ret)
1718 return ret;
1719
1720 i915_gem_object_retire__read(obj, i);
1721 }
1722 RQ_BUG_ON(obj->active);
1723 }
1724
1725 return 0;
1726 }
1727
1728 static void
1729 i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
1730 struct drm_i915_gem_request *req)
1731 {
1732 int ring = req->ring->id;
1733
1734 if (obj->last_read_req[ring] == req)
1735 i915_gem_object_retire__read(obj, ring);
1736 else if (obj->last_write_req == req)
1737 i915_gem_object_retire__write(obj);
1738
1739 __i915_gem_request_retire__upto(req);
1740 }
1741
1742 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1743 * as the object state may change during this call.
1744 */
1745 static __must_check int
1746 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1747 struct intel_rps_client *rps,
1748 bool readonly)
1749 {
1750 struct drm_device *dev = obj->base.dev;
1751 struct drm_i915_private *dev_priv = dev->dev_private;
1752 struct drm_i915_gem_request *requests[I915_NUM_RINGS];
1753 unsigned reset_counter;
1754 int ret, i, n = 0;
1755
1756 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1757 BUG_ON(!dev_priv->mm.interruptible);
1758
1759 if (!obj->active)
1760 return 0;
1761
1762 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1763 if (ret)
1764 return ret;
1765
1766 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1767
1768 if (readonly) {
1769 struct drm_i915_gem_request *req;
1770
1771 req = obj->last_write_req;
1772 if (req == NULL)
1773 return 0;
1774
1775 requests[n++] = i915_gem_request_reference(req);
1776 } else {
1777 for (i = 0; i < I915_NUM_RINGS; i++) {
1778 struct drm_i915_gem_request *req;
1779
1780 req = obj->last_read_req[i];
1781 if (req == NULL)
1782 continue;
1783
1784 requests[n++] = i915_gem_request_reference(req);
1785 }
1786 }
1787
1788 mutex_unlock(&dev->struct_mutex);
1789 for (i = 0; ret == 0 && i < n; i++)
1790 ret = __i915_wait_request(requests[i], reset_counter, true,
1791 NULL, rps);
1792 mutex_lock(&dev->struct_mutex);
1793
1794 for (i = 0; i < n; i++) {
1795 if (ret == 0)
1796 i915_gem_object_retire_request(obj, requests[i]);
1797 i915_gem_request_unreference(requests[i]);
1798 }
1799
1800 return ret;
1801 }
1802
1803 static struct intel_rps_client *to_rps_client(struct drm_file *file)
1804 {
1805 struct drm_i915_file_private *fpriv = file->driver_priv;
1806 return &fpriv->rps;
1807 }
1808
1809 /**
1810 * Called when user space prepares to use an object with the CPU, either
1811 * through the mmap ioctl's mapping or a GTT mapping.
1812 */
1813 int
1814 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1815 struct drm_file *file)
1816 {
1817 struct drm_i915_gem_set_domain *args = data;
1818 struct drm_gem_object *gobj;
1819 struct drm_i915_gem_object *obj;
1820 uint32_t read_domains = args->read_domains;
1821 uint32_t write_domain = args->write_domain;
1822 int ret;
1823
1824 /* Only handle setting domains to types used by the CPU. */
1825 if (write_domain & I915_GEM_GPU_DOMAINS)
1826 return -EINVAL;
1827
1828 if (read_domains & I915_GEM_GPU_DOMAINS)
1829 return -EINVAL;
1830
1831 /* Having something in the write domain implies it's in the read
1832 * domain, and only that read domain. Enforce that in the request.
1833 */
1834 if (write_domain != 0 && read_domains != write_domain)
1835 return -EINVAL;
1836
1837 ret = i915_mutex_lock_interruptible(dev);
1838 if (ret)
1839 return ret;
1840
1841 gobj = drm_gem_object_lookup(dev, file, args->handle);
1842 if (gobj == NULL) {
1843 ret = -ENOENT;
1844 goto unlock;
1845 }
1846 obj = to_intel_bo(gobj);
1847
1848 /* Try to flush the object off the GPU without holding the lock.
1849 * We will repeat the flush holding the lock in the normal manner
1850 * to catch cases where we are gazumped.
1851 */
1852 ret = i915_gem_object_wait_rendering__nonblocking(obj,
1853 to_rps_client(file),
1854 !write_domain);
1855 if (ret)
1856 goto unref;
1857
1858 if (read_domains & I915_GEM_DOMAIN_GTT)
1859 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1860 else
1861 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1862
1863 if (write_domain != 0)
1864 intel_fb_obj_invalidate(obj,
1865 write_domain == I915_GEM_DOMAIN_GTT ?
1866 ORIGIN_GTT : ORIGIN_CPU);
1867
1868 unref:
1869 drm_gem_object_unreference(&obj->base);
1870 unlock:
1871 mutex_unlock(&dev->struct_mutex);
1872 return ret;
1873 }
1874
1875 /**
1876 * Called when user space has done writes to this buffer
1877 */
1878 int
1879 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1880 struct drm_file *file)
1881 {
1882 struct drm_i915_gem_sw_finish *args = data;
1883 struct drm_gem_object *gobj;
1884 struct drm_i915_gem_object *obj;
1885 int ret = 0;
1886
1887 ret = i915_mutex_lock_interruptible(dev);
1888 if (ret)
1889 return ret;
1890
1891 gobj = drm_gem_object_lookup(dev, file, args->handle);
1892 if (gobj == NULL) {
1893 ret = -ENOENT;
1894 goto unlock;
1895 }
1896 obj = to_intel_bo(gobj);
1897
1898 /* Pinned buffers may be scanout, so flush the cache */
1899 if (obj->pin_display)
1900 i915_gem_object_flush_cpu_write_domain(obj);
1901
1902 drm_gem_object_unreference(&obj->base);
1903 unlock:
1904 mutex_unlock(&dev->struct_mutex);
1905 return ret;
1906 }
1907
1908 /**
1909 * Maps the contents of an object, returning the address it is mapped
1910 * into.
1911 *
1912 * While the mapping holds a reference on the contents of the object, it doesn't
1913 * imply a ref on the object itself.
1914 *
1915 * IMPORTANT:
1916 *
1917 * DRM driver writers who look a this function as an example for how to do GEM
1918 * mmap support, please don't implement mmap support like here. The modern way
1919 * to implement DRM mmap support is with an mmap offset ioctl (like
1920 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1921 * That way debug tooling like valgrind will understand what's going on, hiding
1922 * the mmap call in a driver private ioctl will break that. The i915 driver only
1923 * does cpu mmaps this way because we didn't know better.
1924 */
1925 int
1926 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1927 struct drm_file *file)
1928 {
1929 struct drm_i915_gem_mmap *args = data;
1930 struct drm_gem_object *obj;
1931 unsigned long addr;
1932 #ifdef __NetBSD__
1933 struct drm_i915_private *dev_priv = dev->dev_private;
1934 int ret;
1935
1936 if ((dev_priv->quirks & QUIRK_NETBSD_VERSION_CALLED) == 0)
1937 args->flags = 0;
1938 #endif
1939
1940 if (args->flags & ~(I915_MMAP_WC))
1941 return -EINVAL;
1942
1943 if (args->flags & I915_MMAP_WC && !cpu_has_pat)
1944 return -ENODEV;
1945
1946 obj = drm_gem_object_lookup(dev, file, args->handle);
1947 if (obj == NULL)
1948 return -ENOENT;
1949
1950 /* prime objects have no backing filp to GEM mmap
1951 * pages from.
1952 */
1953 if (!obj->filp) {
1954 drm_gem_object_unreference_unlocked(obj);
1955 return -EINVAL;
1956 }
1957
1958 #ifdef __NetBSD__
1959 /* Acquire a reference for uvm_map to consume. */
1960 uao_reference(obj->filp);
1961 addr = (*curproc->p_emul->e_vm_default_addr)(curproc,
1962 (vaddr_t)curproc->p_vmspace->vm_daddr, args->size,
1963 curproc->p_vmspace->vm_map.flags & VM_MAP_TOPDOWN);
1964 /* XXX errno NetBSD->Linux */
1965 ret = -uvm_map(&curproc->p_vmspace->vm_map, &addr, args->size,
1966 obj->filp, args->offset, 0,
1967 UVM_MAPFLAG((VM_PROT_READ | VM_PROT_WRITE),
1968 (VM_PROT_READ | VM_PROT_WRITE), UVM_INH_COPY, UVM_ADV_NORMAL,
1969 0));
1970 if (ret) {
1971 uao_detach(obj->filp);
1972 drm_gem_object_unreference_unlocked(obj);
1973 return ret;
1974 }
1975 drm_gem_object_unreference_unlocked(obj);
1976 #else
1977 addr = vm_mmap(obj->filp, 0, args->size,
1978 PROT_READ | PROT_WRITE, MAP_SHARED,
1979 args->offset);
1980 if (args->flags & I915_MMAP_WC) {
1981 struct mm_struct *mm = current->mm;
1982 struct vm_area_struct *vma;
1983
1984 down_write(&mm->mmap_sem);
1985 vma = find_vma(mm, addr);
1986 if (vma)
1987 vma->vm_page_prot =
1988 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1989 else
1990 addr = -ENOMEM;
1991 up_write(&mm->mmap_sem);
1992 }
1993 drm_gem_object_unreference_unlocked(obj);
1994 if (IS_ERR((void *)addr))
1995 return addr;
1996 #endif
1997
1998 args->addr_ptr = (uint64_t) addr;
1999
2000 return 0;
2001 }
2002
2003 #ifdef __NetBSD__ /* XXX gem gtt fault */
2004 static int i915_udv_fault(struct uvm_faultinfo *, vaddr_t,
2005 struct vm_page **, int, int, vm_prot_t, int, paddr_t);
2006
2007 int
2008 i915_gem_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps,
2009 int npages, int centeridx, vm_prot_t access_type, int flags)
2010 {
2011 struct uvm_object *uobj = ufi->entry->object.uvm_obj;
2012 struct drm_gem_object *gem_obj =
2013 container_of(uobj, struct drm_gem_object, gemo_uvmobj);
2014 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
2015 struct drm_device *dev = obj->base.dev;
2016 struct drm_i915_private *dev_priv = dev->dev_private;
2017 voff_t byte_offset;
2018 pgoff_t page_offset;
2019 int ret = 0;
2020 bool write = ISSET(access_type, VM_PROT_WRITE)? 1 : 0;
2021
2022 byte_offset = (ufi->entry->offset + (vaddr - ufi->entry->start));
2023 KASSERT(byte_offset <= obj->base.size);
2024 page_offset = (byte_offset >> PAGE_SHIFT);
2025
2026 intel_runtime_pm_get(dev_priv);
2027
2028 /* Thanks, uvm, but we don't need this lock. */
2029 mutex_exit(uobj->vmobjlock);
2030
2031 ret = i915_mutex_lock_interruptible(dev);
2032 if (ret)
2033 goto out;
2034
2035 trace_i915_gem_object_fault(obj, page_offset, true, write);
2036
2037 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
2038 if (ret)
2039 goto unlock;
2040
2041 if ((obj->cache_level != I915_CACHE_NONE) && !HAS_LLC(dev)) {
2042 ret = -EINVAL;
2043 goto unlock;
2044 }
2045
2046 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
2047 if (ret)
2048 goto unlock;
2049
2050 ret = i915_gem_object_set_to_gtt_domain(obj, write);
2051 if (ret)
2052 goto unpin;
2053
2054 ret = i915_gem_object_get_fence(obj);
2055 if (ret)
2056 goto unpin;
2057
2058 obj->fault_mappable = true;
2059
2060 /* XXX errno NetBSD->Linux */
2061 ret = -i915_udv_fault(ufi, vaddr, pps, npages, centeridx, access_type,
2062 flags,
2063 (dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj)));
2064 unpin:
2065 i915_gem_object_ggtt_unpin(obj);
2066 unlock:
2067 mutex_unlock(&dev->struct_mutex);
2068 out:
2069 mutex_enter(uobj->vmobjlock);
2070 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
2071 if (ret == -ERESTART)
2072 uvm_wait("i915flt");
2073
2074 /*
2075 * Remap EINTR to success, so that we return to userland.
2076 * On the way out, we'll deliver the signal, and if the signal
2077 * is not fatal then the user code which faulted will most likely
2078 * fault again, and we'll come back here for another try.
2079 */
2080 if (ret == -EINTR)
2081 ret = 0;
2082 /* XXX Deal with GPU hangs here... */
2083 intel_runtime_pm_put(dev_priv);
2084 /* XXX errno Linux->NetBSD */
2085 return -ret;
2086 }
2087
2088 /*
2089 * XXX i915_udv_fault is copypasta of udv_fault from uvm_device.c.
2090 *
2091 * XXX pmap_enter_default instead of pmap_enter because of a problem
2092 * with using weak aliases in kernel modules or something.
2093 */
2094 int pmap_enter_default(pmap_t, vaddr_t, paddr_t, vm_prot_t, unsigned);
2095
2096 static int
2097 i915_udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps,
2098 int npages, int centeridx, vm_prot_t access_type, int flags,
2099 paddr_t gtt_paddr)
2100 {
2101 struct vm_map_entry *entry = ufi->entry;
2102 vaddr_t curr_va;
2103 off_t curr_offset;
2104 paddr_t paddr;
2105 u_int mmapflags;
2106 int lcv, retval;
2107 vm_prot_t mapprot;
2108 UVMHIST_FUNC("i915_udv_fault"); UVMHIST_CALLED(maphist);
2109 UVMHIST_LOG(maphist," flags=%jd", flags,0,0,0);
2110
2111 /*
2112 * we do not allow device mappings to be mapped copy-on-write
2113 * so we kill any attempt to do so here.
2114 */
2115
2116 if (UVM_ET_ISCOPYONWRITE(entry)) {
2117 UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=0x%jx)",
2118 entry->etype, 0,0,0);
2119 return(EIO);
2120 }
2121
2122 /*
2123 * now we must determine the offset in udv to use and the VA to
2124 * use for pmap_enter. note that we always use orig_map's pmap
2125 * for pmap_enter (even if we have a submap). since virtual
2126 * addresses in a submap must match the main map, this is ok.
2127 */
2128
2129 /* udv offset = (offset from start of entry) + entry's offset */
2130 curr_offset = entry->offset + (vaddr - entry->start);
2131 /* pmap va = vaddr (virtual address of pps[0]) */
2132 curr_va = vaddr;
2133
2134 /*
2135 * loop over the page range entering in as needed
2136 */
2137
2138 retval = 0;
2139 for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE,
2140 curr_va += PAGE_SIZE) {
2141 if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
2142 continue;
2143
2144 if (pps[lcv] == PGO_DONTCARE)
2145 continue;
2146
2147 paddr = (gtt_paddr + curr_offset);
2148 mmapflags = 0;
2149 mapprot = ufi->entry->protection;
2150 UVMHIST_LOG(maphist,
2151 " MAPPING: device: pm=0x%#jx, va=0x%jx, pa=0x%jx, at=%jd",
2152 (uintptr_t)ufi->orig_map->pmap, curr_va, paddr, mapprot);
2153 if (pmap_enter_default(ufi->orig_map->pmap, curr_va, paddr, mapprot,
2154 PMAP_CANFAIL | mapprot | mmapflags) != 0) {
2155 /*
2156 * pmap_enter() didn't have the resource to
2157 * enter this mapping. Unlock everything,
2158 * wait for the pagedaemon to free up some
2159 * pages, and then tell uvm_fault() to start
2160 * the fault again.
2161 *
2162 * XXX Needs some rethinking for the PGO_ALLPAGES
2163 * XXX case.
2164 */
2165 pmap_update(ufi->orig_map->pmap); /* sync what we have so far */
2166 return (ERESTART);
2167 }
2168 }
2169
2170 pmap_update(ufi->orig_map->pmap);
2171 return (retval);
2172 }
2173 #else
2174 /**
2175 * i915_gem_fault - fault a page into the GTT
2176 * @vma: VMA in question
2177 * @vmf: fault info
2178 *
2179 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
2180 * from userspace. The fault handler takes care of binding the object to
2181 * the GTT (if needed), allocating and programming a fence register (again,
2182 * only if needed based on whether the old reg is still valid or the object
2183 * is tiled) and inserting a new PTE into the faulting process.
2184 *
2185 * Note that the faulting process may involve evicting existing objects
2186 * from the GTT and/or fence registers to make room. So performance may
2187 * suffer if the GTT working set is large or there are few fence registers
2188 * left.
2189 */
2190 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2191 {
2192 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
2193 struct drm_device *dev = obj->base.dev;
2194 struct drm_i915_private *dev_priv = dev->dev_private;
2195 struct i915_ggtt_view view = i915_ggtt_view_normal;
2196 pgoff_t page_offset;
2197 unsigned long pfn;
2198 int ret = 0;
2199 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
2200
2201 intel_runtime_pm_get(dev_priv);
2202
2203 /* We don't use vmf->pgoff since that has the fake offset */
2204 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
2205 PAGE_SHIFT;
2206
2207 ret = i915_mutex_lock_interruptible(dev);
2208 if (ret)
2209 goto out;
2210
2211 trace_i915_gem_object_fault(obj, page_offset, true, write);
2212
2213 /* Try to flush the object off the GPU first without holding the lock.
2214 * Upon reacquiring the lock, we will perform our sanity checks and then
2215 * repeat the flush holding the lock in the normal manner to catch cases
2216 * where we are gazumped.
2217 */
2218 ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
2219 if (ret)
2220 goto unlock;
2221
2222 /* Access to snoopable pages through the GTT is incoherent. */
2223 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
2224 ret = -EFAULT;
2225 goto unlock;
2226 }
2227
2228 /* Use a partial view if the object is bigger than the aperture. */
2229 if (obj->base.size >= dev_priv->gtt.mappable_end &&
2230 obj->tiling_mode == I915_TILING_NONE) {
2231 static const unsigned int chunk_size = 256; // 1 MiB
2232
2233 memset(&view, 0, sizeof(view));
2234 view.type = I915_GGTT_VIEW_PARTIAL;
2235 view.params.partial.offset = rounddown(page_offset, chunk_size);
2236 view.params.partial.size =
2237 min_t(unsigned int,
2238 chunk_size,
2239 (vma->vm_end - vma->vm_start)/PAGE_SIZE -
2240 view.params.partial.offset);
2241 }
2242
2243 /* Now pin it into the GTT if needed */
2244 ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
2245 if (ret)
2246 goto unlock;
2247
2248 ret = i915_gem_object_set_to_gtt_domain(obj, write);
2249 if (ret)
2250 goto unpin;
2251
2252 ret = i915_gem_object_get_fence(obj);
2253 if (ret)
2254 goto unpin;
2255
2256 /* Finally, remap it using the new GTT offset */
2257 pfn = dev_priv->gtt.mappable_base +
2258 i915_gem_obj_ggtt_offset_view(obj, &view);
2259 pfn >>= PAGE_SHIFT;
2260
2261 if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
2262 /* Overriding existing pages in partial view does not cause
2263 * us any trouble as TLBs are still valid because the fault
2264 * is due to userspace losing part of the mapping or never
2265 * having accessed it before (at this partials' range).
2266 */
2267 unsigned long base = vma->vm_start +
2268 (view.params.partial.offset << PAGE_SHIFT);
2269 unsigned int i;
2270
2271 for (i = 0; i < view.params.partial.size; i++) {
2272 ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
2273 if (ret)
2274 break;
2275 }
2276
2277 obj->fault_mappable = true;
2278 } else {
2279 if (!obj->fault_mappable) {
2280 unsigned long size = min_t(unsigned long,
2281 vma->vm_end - vma->vm_start,
2282 obj->base.size);
2283 int i;
2284
2285 for (i = 0; i < size >> PAGE_SHIFT; i++) {
2286 ret = vm_insert_pfn(vma,
2287 (unsigned long)vma->vm_start + i * PAGE_SIZE,
2288 pfn + i);
2289 if (ret)
2290 break;
2291 }
2292
2293 obj->fault_mappable = true;
2294 } else
2295 ret = vm_insert_pfn(vma,
2296 (unsigned long)vmf->virtual_address,
2297 pfn + page_offset);
2298 }
2299 unpin:
2300 i915_gem_object_ggtt_unpin_view(obj, &view);
2301 unlock:
2302 mutex_unlock(&dev->struct_mutex);
2303 out:
2304 switch (ret) {
2305 case -EIO:
2306 /*
2307 * We eat errors when the gpu is terminally wedged to avoid
2308 * userspace unduly crashing (gl has no provisions for mmaps to
2309 * fail). But any other -EIO isn't ours (e.g. swap in failure)
2310 * and so needs to be reported.
2311 */
2312 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
2313 ret = VM_FAULT_SIGBUS;
2314 break;
2315 }
2316 case -EAGAIN:
2317 /*
2318 * EAGAIN means the gpu is hung and we'll wait for the error
2319 * handler to reset everything when re-faulting in
2320 * i915_mutex_lock_interruptible.
2321 */
2322 case 0:
2323 case -ERESTARTSYS:
2324 case -EINTR:
2325 case -EBUSY:
2326 /*
2327 * EBUSY is ok: this just means that another thread
2328 * already did the job.
2329 */
2330 ret = VM_FAULT_NOPAGE;
2331 break;
2332 case -ENOMEM:
2333 ret = VM_FAULT_OOM;
2334 break;
2335 case -ENOSPC:
2336 case -EFAULT:
2337 ret = VM_FAULT_SIGBUS;
2338 break;
2339 default:
2340 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
2341 ret = VM_FAULT_SIGBUS;
2342 break;
2343 }
2344
2345 intel_runtime_pm_put(dev_priv);
2346 return ret;
2347 }
2348 #endif
2349
2350 /**
2351 * i915_gem_release_mmap - remove physical page mappings
2352 * @obj: obj in question
2353 *
2354 * Preserve the reservation of the mmapping with the DRM core code, but
2355 * relinquish ownership of the pages back to the system.
2356 *
2357 * It is vital that we remove the page mapping if we have mapped a tiled
2358 * object through the GTT and then lose the fence register due to
2359 * resource pressure. Similarly if the object has been moved out of the
2360 * aperture, than pages mapped into userspace must be revoked. Removing the
2361 * mapping will then trigger a page fault on the next user access, allowing
2362 * fixup by i915_gem_fault().
2363 */
2364 void
2365 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
2366 {
2367 if (!obj->fault_mappable)
2368 return;
2369
2370 #ifdef __NetBSD__ /* XXX gem gtt fault */
2371 {
2372 struct drm_device *const dev = obj->base.dev;
2373 struct drm_i915_private *const dev_priv = dev->dev_private;
2374 const paddr_t start = dev_priv->gtt.mappable_base +
2375 i915_gem_obj_ggtt_offset(obj);
2376 const size_t size = obj->base.size;
2377 const paddr_t end = start + size;
2378 paddr_t pa;
2379
2380 KASSERT((start & (PAGE_SIZE - 1)) == 0);
2381 KASSERT((size & (PAGE_SIZE - 1)) == 0);
2382
2383 for (pa = start; pa < end; pa += PAGE_SIZE)
2384 pmap_pv_protect(pa, VM_PROT_NONE);
2385 }
2386 #else
2387 drm_vma_node_unmap(&obj->base.vma_node,
2388 obj->base.dev->anon_inode->i_mapping);
2389 #endif
2390 obj->fault_mappable = false;
2391 }
2392
2393 void
2394 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
2395 {
2396 struct drm_i915_gem_object *obj;
2397
2398 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
2399 i915_gem_release_mmap(obj);
2400 }
2401
2402 uint32_t
2403 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
2404 {
2405 uint32_t gtt_size;
2406
2407 if (INTEL_INFO(dev)->gen >= 4 ||
2408 tiling_mode == I915_TILING_NONE)
2409 return size;
2410
2411 /* Previous chips need a power-of-two fence region when tiling */
2412 if (INTEL_INFO(dev)->gen == 3)
2413 gtt_size = 1024*1024;
2414 else
2415 gtt_size = 512*1024;
2416
2417 while (gtt_size < size)
2418 gtt_size <<= 1;
2419
2420 return gtt_size;
2421 }
2422
2423 /**
2424 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
2425 * @obj: object to check
2426 *
2427 * Return the required GTT alignment for an object, taking into account
2428 * potential fence register mapping.
2429 */
2430 uint32_t
2431 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
2432 int tiling_mode, bool fenced)
2433 {
2434 /*
2435 * Minimum alignment is 4k (GTT page size), but might be greater
2436 * if a fence register is needed for the object.
2437 */
2438 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
2439 tiling_mode == I915_TILING_NONE)
2440 return 4096;
2441
2442 /*
2443 * Previous chips need to be aligned to the size of the smallest
2444 * fence register that can contain the object.
2445 */
2446 return i915_gem_get_gtt_size(dev, size, tiling_mode);
2447 }
2448
2449 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2450 {
2451 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2452 int ret;
2453
2454 if (drm_vma_node_has_offset(&obj->base.vma_node))
2455 return 0;
2456
2457 dev_priv->mm.shrinker_no_lock_stealing = true;
2458
2459 ret = drm_gem_create_mmap_offset(&obj->base);
2460 if (ret != -ENOSPC)
2461 goto out;
2462
2463 /* Badly fragmented mmap space? The only way we can recover
2464 * space is by destroying unwanted objects. We can't randomly release
2465 * mmap_offsets as userspace expects them to be persistent for the
2466 * lifetime of the objects. The closest we can is to release the
2467 * offsets on purgeable objects by truncating it and marking it purged,
2468 * which prevents userspace from ever using that object again.
2469 */
2470 i915_gem_shrink(dev_priv,
2471 obj->base.size >> PAGE_SHIFT,
2472 I915_SHRINK_BOUND |
2473 I915_SHRINK_UNBOUND |
2474 I915_SHRINK_PURGEABLE);
2475 ret = drm_gem_create_mmap_offset(&obj->base);
2476 if (ret != -ENOSPC)
2477 goto out;
2478
2479 i915_gem_shrink_all(dev_priv);
2480 ret = drm_gem_create_mmap_offset(&obj->base);
2481 out:
2482 dev_priv->mm.shrinker_no_lock_stealing = false;
2483
2484 return ret;
2485 }
2486
2487 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2488 {
2489 drm_gem_free_mmap_offset(&obj->base);
2490 }
2491
2492 int
2493 i915_gem_mmap_gtt(struct drm_file *file,
2494 struct drm_device *dev,
2495 uint32_t handle,
2496 uint64_t *offset)
2497 {
2498 struct drm_gem_object *gobj;
2499 struct drm_i915_gem_object *obj;
2500 int ret;
2501
2502 ret = i915_mutex_lock_interruptible(dev);
2503 if (ret)
2504 return ret;
2505
2506 gobj = drm_gem_object_lookup(dev, file, handle);
2507 if (gobj == NULL) {
2508 ret = -ENOENT;
2509 goto unlock;
2510 }
2511 obj = to_intel_bo(gobj);
2512
2513 if (obj->madv != I915_MADV_WILLNEED) {
2514 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
2515 ret = -EFAULT;
2516 goto out;
2517 }
2518
2519 ret = i915_gem_object_create_mmap_offset(obj);
2520 if (ret)
2521 goto out;
2522
2523 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2524
2525 out:
2526 drm_gem_object_unreference(&obj->base);
2527 unlock:
2528 mutex_unlock(&dev->struct_mutex);
2529 return ret;
2530 }
2531
2532 /**
2533 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2534 * @dev: DRM device
2535 * @data: GTT mapping ioctl data
2536 * @file: GEM object info
2537 *
2538 * Simply returns the fake offset to userspace so it can mmap it.
2539 * The mmap call will end up in drm_gem_mmap(), which will set things
2540 * up so we can get faults in the handler above.
2541 *
2542 * The fault handler will take care of binding the object into the GTT
2543 * (since it may have been evicted to make room for something), allocating
2544 * a fence register, and mapping the appropriate aperture address into
2545 * userspace.
2546 */
2547 int
2548 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2549 struct drm_file *file)
2550 {
2551 struct drm_i915_gem_mmap_gtt *args = data;
2552
2553 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2554 }
2555
2556 /* Immediately discard the backing storage */
2557 static void
2558 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2559 {
2560 i915_gem_object_free_mmap_offset(obj);
2561
2562 if (obj->base.filp == NULL)
2563 return;
2564
2565 #ifdef __NetBSD__
2566 {
2567 struct uvm_object *const uobj = obj->base.filp;
2568
2569 if (uobj != NULL) {
2570 /* XXX Calling pgo_put like this is bogus. */
2571 mutex_enter(uobj->vmobjlock);
2572 (*uobj->pgops->pgo_put)(uobj, 0, obj->base.size,
2573 (PGO_ALLPAGES | PGO_FREE));
2574 }
2575 }
2576 #else
2577 /* Our goal here is to return as much of the memory as
2578 * is possible back to the system as we are called from OOM.
2579 * To do this we must instruct the shmfs to drop all of its
2580 * backing pages, *now*.
2581 */
2582 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2583 #endif
2584 obj->madv = __I915_MADV_PURGED;
2585 }
2586
2587 /* Try to discard unwanted pages */
2588 static void
2589 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2590 {
2591 #ifdef __NetBSD__
2592 struct uvm_object *uobj;
2593 #else
2594 struct address_space *mapping;
2595 #endif
2596
2597 switch (obj->madv) {
2598 case I915_MADV_DONTNEED:
2599 i915_gem_object_truncate(obj);
2600 case __I915_MADV_PURGED:
2601 return;
2602 }
2603
2604 if (obj->base.filp == NULL)
2605 return;
2606
2607 #ifdef __NetBSD__
2608 uobj = obj->base.filp;
2609 mutex_enter(uobj->vmobjlock);
2610 (*uobj->pgops->pgo_put)(uobj, 0, obj->base.size,
2611 PGO_ALLPAGES|PGO_DEACTIVATE|PGO_CLEANIT);
2612 #else
2613 mapping = file_inode(obj->base.filp)->i_mapping,
2614 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2615 #endif
2616 }
2617
2618 static void
2619 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2620 {
2621 #ifdef __NetBSD__
2622 struct drm_device *const dev = obj->base.dev;
2623 struct vm_page *page;
2624 int ret;
2625
2626 /* XXX Cargo-culted from the Linux code. */
2627 BUG_ON(obj->madv == __I915_MADV_PURGED);
2628
2629 ret = i915_gem_object_set_to_cpu_domain(obj, true);
2630 if (ret) {
2631 WARN_ON(ret != -EIO);
2632 i915_gem_clflush_object(obj, true);
2633 obj->base.read_domains = obj->base.write_domain =
2634 I915_GEM_DOMAIN_CPU;
2635 }
2636
2637 i915_gem_gtt_finish_object(obj);
2638
2639 if (i915_gem_object_needs_bit17_swizzle(obj))
2640 i915_gem_object_save_bit_17_swizzle(obj);
2641
2642 if (obj->madv == I915_MADV_DONTNEED)
2643 obj->dirty = 0;
2644
2645 if (obj->dirty) {
2646 mutex_enter(obj->base.filp->vmobjlock);
2647 TAILQ_FOREACH(page, &obj->pageq, pageq.queue) {
2648 uvm_pagemarkdirty(page, UVM_PAGE_STATUS_DIRTY);
2649 /* XXX mark page accessed */
2650 }
2651 mutex_exit(obj->base.filp->vmobjlock);
2652 }
2653 obj->dirty = 0;
2654
2655 uvm_obj_unwirepages(obj->base.filp, 0, obj->base.size);
2656 bus_dmamap_destroy(dev->dmat, obj->pages);
2657 #else
2658 struct sg_page_iter sg_iter;
2659 int ret;
2660
2661 BUG_ON(obj->madv == __I915_MADV_PURGED);
2662
2663 ret = i915_gem_object_set_to_cpu_domain(obj, true);
2664 if (ret) {
2665 /* In the event of a disaster, abandon all caches and
2666 * hope for the best.
2667 */
2668 WARN_ON(ret != -EIO);
2669 i915_gem_clflush_object(obj, true);
2670 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2671 }
2672
2673 i915_gem_gtt_finish_object(obj);
2674
2675 if (i915_gem_object_needs_bit17_swizzle(obj))
2676 i915_gem_object_save_bit_17_swizzle(obj);
2677
2678 if (obj->madv == I915_MADV_DONTNEED)
2679 obj->dirty = 0;
2680
2681 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
2682 struct page *page = sg_page_iter_page(&sg_iter);
2683
2684 if (obj->dirty)
2685 set_page_dirty(page);
2686
2687 if (obj->madv == I915_MADV_WILLNEED)
2688 mark_page_accessed(page);
2689
2690 page_cache_release(page);
2691 }
2692 obj->dirty = 0;
2693
2694 sg_free_table(obj->pages);
2695 kfree(obj->pages);
2696 #endif
2697 }
2698
2699 int
2700 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2701 {
2702 const struct drm_i915_gem_object_ops *ops = obj->ops;
2703
2704 if (obj->pages == NULL)
2705 return 0;
2706
2707 if (obj->pages_pin_count)
2708 return -EBUSY;
2709
2710 BUG_ON(i915_gem_obj_bound_any(obj));
2711
2712 /* ->put_pages might need to allocate memory for the bit17 swizzle
2713 * array, hence protect them from being reaped by removing them from gtt
2714 * lists early. */
2715 list_del(&obj->global_list);
2716
2717 ops->put_pages(obj);
2718 obj->pages = NULL;
2719
2720 i915_gem_object_invalidate(obj);
2721
2722 return 0;
2723 }
2724
2725 static int
2726 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2727 {
2728 #ifdef __NetBSD__
2729 struct drm_device *const dev = obj->base.dev;
2730 struct drm_i915_private *dev_priv = dev->dev_private;
2731 struct vm_page *page;
2732 int ret;
2733
2734 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2735 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2736
2737 KASSERT(obj->pages == NULL);
2738 TAILQ_INIT(&obj->pageq);
2739
2740 /* XXX errno NetBSD->Linux */
2741 ret = -bus_dmamap_create(dev->dmat, obj->base.size,
2742 obj->base.size/PAGE_SIZE, PAGE_SIZE, 0, BUS_DMA_NOWAIT,
2743 &obj->pages);
2744 if (ret)
2745 goto fail0;
2746
2747 /* XXX errno NetBSD->Linux */
2748 ret = -uvm_obj_wirepages(obj->base.filp, 0, obj->base.size,
2749 &obj->pageq);
2750 if (ret) /* XXX Try purge, shrink. */
2751 goto fail1;
2752
2753 /*
2754 * Check that the paddrs will fit in 40 bits, or 32 bits on i965.
2755 *
2756 * XXX This should be unnecessary: the uao should guarantee
2757 * this constraint after uao_set_pgfl.
2758 *
2759 * XXX This should also be expanded for newer devices.
2760 */
2761 TAILQ_FOREACH(page, &obj->pageq, pageq.queue) {
2762 const uint64_t mask =
2763 (IS_BROADWATER(dev) || IS_CRESTLINE(dev)?
2764 0xffffffffULL : 0xffffffffffULL);
2765 if (VM_PAGE_TO_PHYS(page) & ~mask) {
2766 DRM_ERROR("GEM physical address exceeds %u bits"
2767 ": %"PRIxMAX"\n",
2768 popcount64(mask),
2769 (uintmax_t)VM_PAGE_TO_PHYS(page));
2770 ret = -EIO;
2771 goto fail2;
2772 }
2773 }
2774
2775 ret = i915_gem_gtt_prepare_object(obj);
2776 if (ret)
2777 goto fail2;
2778
2779 if (i915_gem_object_needs_bit17_swizzle(obj))
2780 i915_gem_object_do_bit_17_swizzle(obj);
2781
2782 if (obj->tiling_mode != I915_TILING_NONE &&
2783 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2784 i915_gem_object_pin_pages(obj);
2785
2786 /* Success! */
2787 return 0;
2788
2789 fail3: __unused
2790 i915_gem_gtt_finish_object(obj);
2791 fail2: uvm_obj_unwirepages(obj->base.filp, 0, obj->base.size);
2792 fail1: bus_dmamap_destroy(dev->dmat, obj->pages);
2793 obj->pages = NULL;
2794 fail0: KASSERT(ret);
2795 return ret;
2796 #else
2797 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2798 int page_count, i;
2799 struct address_space *mapping;
2800 struct sg_table *st;
2801 struct scatterlist *sg;
2802 struct sg_page_iter sg_iter;
2803 struct page *page;
2804 unsigned long last_pfn = 0; /* suppress gcc warning */
2805 int ret;
2806 gfp_t gfp;
2807
2808 /* Assert that the object is not currently in any GPU domain. As it
2809 * wasn't in the GTT, there shouldn't be any way it could have been in
2810 * a GPU cache
2811 */
2812 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2813 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2814
2815 st = kmalloc(sizeof(*st), GFP_KERNEL);
2816 if (st == NULL)
2817 return -ENOMEM;
2818
2819 page_count = obj->base.size / PAGE_SIZE;
2820 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2821 kfree(st);
2822 return -ENOMEM;
2823 }
2824
2825 /* Get the list of pages out of our struct file. They'll be pinned
2826 * at this point until we release them.
2827 *
2828 * Fail silently without starting the shrinker
2829 */
2830 mapping = file_inode(obj->base.filp)->i_mapping;
2831 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2832 gfp |= __GFP_NORETRY | __GFP_NOWARN;
2833 sg = st->sgl;
2834 st->nents = 0;
2835 for (i = 0; i < page_count; i++) {
2836 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2837 if (IS_ERR(page)) {
2838 i915_gem_shrink(dev_priv,
2839 page_count,
2840 I915_SHRINK_BOUND |
2841 I915_SHRINK_UNBOUND |
2842 I915_SHRINK_PURGEABLE);
2843 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2844 }
2845 if (IS_ERR(page)) {
2846 /* We've tried hard to allocate the memory by reaping
2847 * our own buffer, now let the real VM do its job and
2848 * go down in flames if truly OOM.
2849 */
2850 i915_gem_shrink_all(dev_priv);
2851 page = shmem_read_mapping_page(mapping, i);
2852 if (IS_ERR(page)) {
2853 ret = PTR_ERR(page);
2854 goto err_pages;
2855 }
2856 }
2857 #ifdef CONFIG_SWIOTLB
2858 if (swiotlb_nr_tbl()) {
2859 st->nents++;
2860 sg_set_page(sg, page, PAGE_SIZE, 0);
2861 sg = sg_next(sg);
2862 continue;
2863 }
2864 #endif
2865 if (!i || page_to_pfn(page) != last_pfn + 1) {
2866 if (i)
2867 sg = sg_next(sg);
2868 st->nents++;
2869 sg_set_page(sg, page, PAGE_SIZE, 0);
2870 } else {
2871 sg->length += PAGE_SIZE;
2872 }
2873 last_pfn = page_to_pfn(page);
2874
2875 /* Check that the i965g/gm workaround works. */
2876 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2877 }
2878 #ifdef CONFIG_SWIOTLB
2879 if (!swiotlb_nr_tbl())
2880 #endif
2881 sg_mark_end(sg);
2882 obj->pages = st;
2883
2884 ret = i915_gem_gtt_prepare_object(obj);
2885 if (ret)
2886 goto err_pages;
2887
2888 if (i915_gem_object_needs_bit17_swizzle(obj))
2889 i915_gem_object_do_bit_17_swizzle(obj);
2890
2891 if (obj->tiling_mode != I915_TILING_NONE &&
2892 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2893 i915_gem_object_pin_pages(obj);
2894
2895 return 0;
2896
2897 err_pages:
2898 sg_mark_end(sg);
2899 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2900 page_cache_release(sg_page_iter_page(&sg_iter));
2901 sg_free_table(st);
2902 kfree(st);
2903
2904 /* shmemfs first checks if there is enough memory to allocate the page
2905 * and reports ENOSPC should there be insufficient, along with the usual
2906 * ENOMEM for a genuine allocation failure.
2907 *
2908 * We use ENOSPC in our driver to mean that we have run out of aperture
2909 * space and so want to translate the error from shmemfs back to our
2910 * usual understanding of ENOMEM.
2911 */
2912 if (ret == -ENOSPC)
2913 ret = -ENOMEM;
2914
2915 return ret;
2916 #endif
2917 }
2918
2919 /* Ensure that the associated pages are gathered from the backing storage
2920 * and pinned into our object. i915_gem_object_get_pages() may be called
2921 * multiple times before they are released by a single call to
2922 * i915_gem_object_put_pages() - once the pages are no longer referenced
2923 * either as a result of memory pressure (reaping pages under the shrinker)
2924 * or as the object is itself released.
2925 */
2926 int
2927 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2928 {
2929 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2930 const struct drm_i915_gem_object_ops *ops = obj->ops;
2931 int ret;
2932
2933 if (obj->pages)
2934 return 0;
2935
2936 if (obj->madv != I915_MADV_WILLNEED) {
2937 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2938 return -EFAULT;
2939 }
2940
2941 BUG_ON(obj->pages_pin_count);
2942
2943 ret = ops->get_pages(obj);
2944 if (ret)
2945 return ret;
2946
2947 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2948
2949 #ifndef __NetBSD__
2950 obj->get_page.sg = obj->pages->sgl;
2951 obj->get_page.last = 0;
2952 #endif
2953
2954 return 0;
2955 }
2956
2957 void i915_vma_move_to_active(struct i915_vma *vma,
2958 struct drm_i915_gem_request *req)
2959 {
2960 struct drm_i915_gem_object *obj = vma->obj;
2961 struct intel_engine_cs *ring;
2962
2963 ring = i915_gem_request_get_ring(req);
2964
2965 /* Add a reference if we're newly entering the active list. */
2966 if (obj->active == 0)
2967 drm_gem_object_reference(&obj->base);
2968 obj->active |= intel_ring_flag(ring);
2969
2970 list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
2971 i915_gem_request_assign(&obj->last_read_req[ring->id], req);
2972
2973 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2974 }
2975
2976 static void
2977 i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
2978 {
2979 RQ_BUG_ON(obj->last_write_req == NULL);
2980 RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
2981
2982 i915_gem_request_assign(&obj->last_write_req, NULL);
2983 intel_fb_obj_flush(obj, true, ORIGIN_CS);
2984 }
2985
2986 static void
2987 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
2988 {
2989 struct i915_vma *vma;
2990
2991 RQ_BUG_ON(obj->last_read_req[ring] == NULL);
2992 RQ_BUG_ON(!(obj->active & (1 << ring)));
2993
2994 list_del_init(&obj->ring_list[ring]);
2995 i915_gem_request_assign(&obj->last_read_req[ring], NULL);
2996
2997 if (obj->last_write_req && obj->last_write_req->ring->id == ring)
2998 i915_gem_object_retire__write(obj);
2999
3000 obj->active &= ~(1 << ring);
3001 if (obj->active)
3002 return;
3003
3004 /* Bump our place on the bound list to keep it roughly in LRU order
3005 * so that we don't steal from recently used but inactive objects
3006 * (unless we are forced to ofc!)
3007 */
3008 list_move_tail(&obj->global_list,
3009 &to_i915(obj->base.dev)->mm.bound_list);
3010
3011 list_for_each_entry(vma, &obj->vma_list, vma_link) {
3012 if (!list_empty(&vma->mm_list))
3013 list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
3014 }
3015
3016 i915_gem_request_assign(&obj->last_fenced_req, NULL);
3017 drm_gem_object_unreference(&obj->base);
3018 }
3019
3020 static int
3021 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
3022 {
3023 struct drm_i915_private *dev_priv = dev->dev_private;
3024 struct intel_engine_cs *ring;
3025 int ret, i, j;
3026
3027 /* Carefully retire all requests without writing to the rings */
3028 for_each_ring(ring, dev_priv, i) {
3029 ret = intel_ring_idle(ring);
3030 if (ret)
3031 return ret;
3032 }
3033 i915_gem_retire_requests(dev);
3034
3035 /* Finally reset hw state */
3036 for_each_ring(ring, dev_priv, i) {
3037 intel_ring_init_seqno(ring, seqno);
3038
3039 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
3040 ring->semaphore.sync_seqno[j] = 0;
3041 }
3042
3043 return 0;
3044 }
3045
3046 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
3047 {
3048 struct drm_i915_private *dev_priv = dev->dev_private;
3049 int ret;
3050
3051 if (seqno == 0)
3052 return -EINVAL;
3053
3054 /* HWS page needs to be set less than what we
3055 * will inject to ring
3056 */
3057 ret = i915_gem_init_seqno(dev, seqno - 1);
3058 if (ret)
3059 return ret;
3060
3061 /* Carefully set the last_seqno value so that wrap
3062 * detection still works
3063 */
3064 dev_priv->next_seqno = seqno;
3065 dev_priv->last_seqno = seqno - 1;
3066 if (dev_priv->last_seqno == 0)
3067 dev_priv->last_seqno--;
3068
3069 return 0;
3070 }
3071
3072 int
3073 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
3074 {
3075 struct drm_i915_private *dev_priv = dev->dev_private;
3076
3077 /* reserve 0 for non-seqno */
3078 if (dev_priv->next_seqno == 0) {
3079 int ret = i915_gem_init_seqno(dev, 0);
3080 if (ret)
3081 return ret;
3082
3083 dev_priv->next_seqno = 1;
3084 }
3085
3086 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
3087 return 0;
3088 }
3089
3090 /*
3091 * NB: This function is not allowed to fail. Doing so would mean the the
3092 * request is not being tracked for completion but the work itself is
3093 * going to happen on the hardware. This would be a Bad Thing(tm).
3094 */
3095 void __i915_add_request(struct drm_i915_gem_request *request,
3096 struct drm_i915_gem_object *obj,
3097 bool flush_caches)
3098 {
3099 struct intel_engine_cs *ring;
3100 struct drm_i915_private *dev_priv;
3101 struct intel_ringbuffer *ringbuf;
3102 u32 request_start;
3103 int ret;
3104
3105 if (WARN_ON(request == NULL))
3106 return;
3107
3108 ring = request->ring;
3109 dev_priv = ring->dev->dev_private;
3110 ringbuf = request->ringbuf;
3111
3112 /*
3113 * To ensure that this call will not fail, space for its emissions
3114 * should already have been reserved in the ring buffer. Let the ring
3115 * know that it is time to use that space up.
3116 */
3117 intel_ring_reserved_space_use(ringbuf);
3118
3119 request_start = intel_ring_get_tail(ringbuf);
3120 /*
3121 * Emit any outstanding flushes - execbuf can fail to emit the flush
3122 * after having emitted the batchbuffer command. Hence we need to fix
3123 * things up similar to emitting the lazy request. The difference here
3124 * is that the flush _must_ happen before the next request, no matter
3125 * what.
3126 */
3127 if (flush_caches) {
3128 if (i915.enable_execlists)
3129 ret = logical_ring_flush_all_caches(request);
3130 else
3131 ret = intel_ring_flush_all_caches(request);
3132 /* Not allowed to fail! */
3133 WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
3134 }
3135
3136 /* Record the position of the start of the request so that
3137 * should we detect the updated seqno part-way through the
3138 * GPU processing the request, we never over-estimate the
3139 * position of the head.
3140 */
3141 request->postfix = intel_ring_get_tail(ringbuf);
3142
3143 if (i915.enable_execlists)
3144 ret = ring->emit_request(request);
3145 else {
3146 ret = ring->add_request(request);
3147
3148 request->tail = intel_ring_get_tail(ringbuf);
3149 }
3150 /* Not allowed to fail! */
3151 WARN(ret, "emit|add_request failed: %d!\n", ret);
3152
3153 request->head = request_start;
3154
3155 /* Whilst this request exists, batch_obj will be on the
3156 * active_list, and so will hold the active reference. Only when this
3157 * request is retired will the the batch_obj be moved onto the
3158 * inactive_list and lose its active reference. Hence we do not need
3159 * to explicitly hold another reference here.
3160 */
3161 request->batch_obj = obj;
3162
3163 request->emitted_jiffies = jiffies;
3164 request->previous_seqno = ring->last_submitted_seqno;
3165 ring->last_submitted_seqno = request->seqno;
3166 list_add_tail(&request->list, &ring->request_list);
3167
3168 trace_i915_gem_request_add(request);
3169
3170 i915_queue_hangcheck(ring->dev);
3171
3172 queue_delayed_work(dev_priv->wq,
3173 &dev_priv->mm.retire_work,
3174 round_jiffies_up_relative(HZ));
3175 intel_mark_busy(dev_priv->dev);
3176
3177 /* Sanity check that the reserved size was large enough. */
3178 intel_ring_reserved_space_end(ringbuf);
3179 }
3180
3181 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
3182 const struct intel_context *ctx)
3183 {
3184 unsigned long elapsed;
3185
3186 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
3187
3188 if (ctx->hang_stats.banned)
3189 return true;
3190
3191 if (ctx->hang_stats.ban_period_seconds &&
3192 elapsed <= ctx->hang_stats.ban_period_seconds) {
3193 if (!i915_gem_context_is_default(ctx)) {
3194 DRM_DEBUG("context hanging too fast, banning!\n");
3195 return true;
3196 } else if (i915_stop_ring_allow_ban(dev_priv)) {
3197 if (i915_stop_ring_allow_warn(dev_priv))
3198 DRM_ERROR("gpu hanging too fast, banning!\n");
3199 return true;
3200 }
3201 }
3202
3203 return false;
3204 }
3205
3206 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
3207 struct intel_context *ctx,
3208 const bool guilty)
3209 {
3210 struct i915_ctx_hang_stats *hs;
3211
3212 if (WARN_ON(!ctx))
3213 return;
3214
3215 hs = &ctx->hang_stats;
3216
3217 if (guilty) {
3218 hs->banned = i915_context_is_banned(dev_priv, ctx);
3219 hs->batch_active++;
3220 hs->guilty_ts = get_seconds();
3221 } else {
3222 hs->batch_pending++;
3223 }
3224 }
3225
3226 void i915_gem_request_free(struct kref *req_ref)
3227 {
3228 struct drm_i915_gem_request *req = container_of(req_ref,
3229 typeof(*req), ref);
3230 struct intel_context *ctx = req->ctx;
3231
3232 if (req->file_priv)
3233 i915_gem_request_remove_from_client(req);
3234
3235 if (ctx) {
3236 if (i915.enable_execlists) {
3237 if (ctx != req->ring->default_context)
3238 intel_lr_context_unpin(req);
3239 }
3240
3241 i915_gem_context_unreference(ctx);
3242 }
3243
3244 kmem_cache_free(req->i915->requests, req);
3245 }
3246
3247 int i915_gem_request_alloc(struct intel_engine_cs *ring,
3248 struct intel_context *ctx,
3249 struct drm_i915_gem_request **req_out)
3250 {
3251 struct drm_i915_private *dev_priv = to_i915(ring->dev);
3252 struct drm_i915_gem_request *req;
3253 int ret;
3254
3255 if (!req_out)
3256 return -EINVAL;
3257
3258 *req_out = NULL;
3259
3260 req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
3261 if (req == NULL)
3262 return -ENOMEM;
3263
3264 ret = i915_gem_get_seqno(ring->dev, &req->seqno);
3265 if (ret)
3266 goto err;
3267
3268 kref_init(&req->ref);
3269 req->i915 = dev_priv;
3270 req->ring = ring;
3271 req->ctx = ctx;
3272 i915_gem_context_reference(req->ctx);
3273
3274 if (i915.enable_execlists)
3275 ret = intel_logical_ring_alloc_request_extras(req);
3276 else
3277 ret = intel_ring_alloc_request_extras(req);
3278 if (ret) {
3279 i915_gem_context_unreference(req->ctx);
3280 goto err;
3281 }
3282
3283 /*
3284 * Reserve space in the ring buffer for all the commands required to
3285 * eventually emit this request. This is to guarantee that the
3286 * i915_add_request() call can't fail. Note that the reserve may need
3287 * to be redone if the request is not actually submitted straight
3288 * away, e.g. because a GPU scheduler has deferred it.
3289 */
3290 if (i915.enable_execlists)
3291 ret = intel_logical_ring_reserve_space(req);
3292 else
3293 ret = intel_ring_reserve_space(req);
3294 if (ret) {
3295 /*
3296 * At this point, the request is fully allocated even if not
3297 * fully prepared. Thus it can be cleaned up using the proper
3298 * free code.
3299 */
3300 i915_gem_request_cancel(req);
3301 return ret;
3302 }
3303
3304 *req_out = req;
3305 return 0;
3306
3307 err:
3308 kmem_cache_free(dev_priv->requests, req);
3309 return ret;
3310 }
3311
3312 void i915_gem_request_cancel(struct drm_i915_gem_request *req)
3313 {
3314 intel_ring_reserved_space_cancel(req->ringbuf);
3315
3316 i915_gem_request_unreference(req);
3317 }
3318
3319 struct drm_i915_gem_request *
3320 i915_gem_find_active_request(struct intel_engine_cs *ring)
3321 {
3322 struct drm_i915_gem_request *request;
3323
3324 list_for_each_entry(request, &ring->request_list, list) {
3325 if (i915_gem_request_completed(request, false))
3326 continue;
3327
3328 return request;
3329 }
3330
3331 return NULL;
3332 }
3333
3334 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
3335 struct intel_engine_cs *ring)
3336 {
3337 struct drm_i915_gem_request *request;
3338 bool ring_hung;
3339
3340 request = i915_gem_find_active_request(ring);
3341
3342 if (request == NULL)
3343 return;
3344
3345 ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
3346
3347 i915_set_reset_status(dev_priv, request->ctx, ring_hung);
3348
3349 list_for_each_entry_continue(request, &ring->request_list, list)
3350 i915_set_reset_status(dev_priv, request->ctx, false);
3351 }
3352
3353 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
3354 struct intel_engine_cs *ring)
3355 {
3356 while (!list_empty(&ring->active_list)) {
3357 struct drm_i915_gem_object *obj;
3358
3359 obj = list_first_entry(&ring->active_list,
3360 struct drm_i915_gem_object,
3361 ring_list[ring->id]);
3362
3363 i915_gem_object_retire__read(obj, ring->id);
3364 }
3365
3366 /*
3367 * Clear the execlists queue up before freeing the requests, as those
3368 * are the ones that keep the context and ringbuffer backing objects
3369 * pinned in place.
3370 */
3371 while (!list_empty(&ring->execlist_queue)) {
3372 struct drm_i915_gem_request *submit_req;
3373
3374 submit_req = list_first_entry(&ring->execlist_queue,
3375 struct drm_i915_gem_request,
3376 execlist_link);
3377 list_del(&submit_req->execlist_link);
3378
3379 if (submit_req->ctx != ring->default_context)
3380 intel_lr_context_unpin(submit_req);
3381
3382 i915_gem_request_unreference(submit_req);
3383 }
3384
3385 /*
3386 * We must free the requests after all the corresponding objects have
3387 * been moved off active lists. Which is the same order as the normal
3388 * retire_requests function does. This is important if object hold
3389 * implicit references on things like e.g. ppgtt address spaces through
3390 * the request.
3391 */
3392 while (!list_empty(&ring->request_list)) {
3393 struct drm_i915_gem_request *request;
3394
3395 request = list_first_entry(&ring->request_list,
3396 struct drm_i915_gem_request,
3397 list);
3398
3399 i915_gem_request_retire(request);
3400 }
3401 }
3402
3403 void i915_gem_reset(struct drm_device *dev)
3404 {
3405 struct drm_i915_private *dev_priv = dev->dev_private;
3406 struct intel_engine_cs *ring;
3407 int i;
3408
3409 /*
3410 * Before we free the objects from the requests, we need to inspect
3411 * them for finding the guilty party. As the requests only borrow
3412 * their reference to the objects, the inspection must be done first.
3413 */
3414 for_each_ring(ring, dev_priv, i)
3415 i915_gem_reset_ring_status(dev_priv, ring);
3416
3417 for_each_ring(ring, dev_priv, i)
3418 i915_gem_reset_ring_cleanup(dev_priv, ring);
3419
3420 i915_gem_context_reset(dev);
3421
3422 i915_gem_restore_fences(dev);
3423
3424 WARN_ON(i915_verify_lists(dev));
3425 }
3426
3427 /**
3428 * This function clears the request list as sequence numbers are passed.
3429 */
3430 void
3431 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
3432 {
3433 WARN_ON(i915_verify_lists(ring->dev));
3434
3435 /* Retire requests first as we use it above for the early return.
3436 * If we retire requests last, we may use a later seqno and so clear
3437 * the requests lists without clearing the active list, leading to
3438 * confusion.
3439 */
3440 while (!list_empty(&ring->request_list)) {
3441 struct drm_i915_gem_request *request;
3442
3443 request = list_first_entry(&ring->request_list,
3444 struct drm_i915_gem_request,
3445 list);
3446
3447 if (!i915_gem_request_completed(request, true))
3448 break;
3449
3450 i915_gem_request_retire(request);
3451 }
3452
3453 /* Move any buffers on the active list that are no longer referenced
3454 * by the ringbuffer to the flushing/inactive lists as appropriate,
3455 * before we free the context associated with the requests.
3456 */
3457 while (!list_empty(&ring->active_list)) {
3458 struct drm_i915_gem_object *obj;
3459
3460 obj = list_first_entry(&ring->active_list,
3461 struct drm_i915_gem_object,
3462 ring_list[ring->id]);
3463
3464 if (!list_empty(&obj->last_read_req[ring->id]->list))
3465 break;
3466
3467 i915_gem_object_retire__read(obj, ring->id);
3468 }
3469
3470 if (unlikely(ring->trace_irq_req &&
3471 i915_gem_request_completed(ring->trace_irq_req, true))) {
3472 ring->irq_put(ring);
3473 i915_gem_request_assign(&ring->trace_irq_req, NULL);
3474 }
3475
3476 WARN_ON(i915_verify_lists(ring->dev));
3477 }
3478
3479 bool
3480 i915_gem_retire_requests(struct drm_device *dev)
3481 {
3482 struct drm_i915_private *dev_priv = dev->dev_private;
3483 struct intel_engine_cs *ring;
3484 bool idle = true;
3485 int i;
3486
3487 for_each_ring(ring, dev_priv, i) {
3488 i915_gem_retire_requests_ring(ring);
3489 idle &= list_empty(&ring->request_list);
3490 if (i915.enable_execlists) {
3491 unsigned long flags;
3492
3493 spin_lock_irqsave(&ring->execlist_lock, flags);
3494 idle &= list_empty(&ring->execlist_queue);
3495 spin_unlock_irqrestore(&ring->execlist_lock, flags);
3496
3497 intel_execlists_retire_requests(ring);
3498 }
3499 }
3500
3501 if (idle)
3502 mod_delayed_work(dev_priv->wq,
3503 &dev_priv->mm.idle_work,
3504 msecs_to_jiffies(100));
3505
3506 return idle;
3507 }
3508
3509 static void
3510 i915_gem_retire_work_handler(struct work_struct *work)
3511 {
3512 struct drm_i915_private *dev_priv =
3513 container_of(work, typeof(*dev_priv), mm.retire_work.work);
3514 struct drm_device *dev = dev_priv->dev;
3515 bool idle;
3516
3517 /* Come back later if the device is busy... */
3518 idle = false;
3519 if (mutex_trylock(&dev->struct_mutex)) {
3520 idle = i915_gem_retire_requests(dev);
3521 mutex_unlock(&dev->struct_mutex);
3522 }
3523 if (!idle)
3524 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
3525 round_jiffies_up_relative(HZ));
3526 }
3527
3528 static void
3529 i915_gem_idle_work_handler(struct work_struct *work)
3530 {
3531 struct drm_i915_private *dev_priv =
3532 container_of(work, typeof(*dev_priv), mm.idle_work.work);
3533 struct drm_device *dev = dev_priv->dev;
3534 struct intel_engine_cs *ring;
3535 int i;
3536
3537 for_each_ring(ring, dev_priv, i)
3538 if (!list_empty(&ring->request_list))
3539 return;
3540
3541 intel_mark_idle(dev);
3542
3543 if (mutex_trylock(&dev->struct_mutex)) {
3544 struct intel_engine_cs *ring;
3545 int i;
3546
3547 for_each_ring(ring, dev_priv, i)
3548 i915_gem_batch_pool_fini(&ring->batch_pool);
3549
3550 mutex_unlock(&dev->struct_mutex);
3551 }
3552 }
3553
3554 /**
3555 * Ensures that an object will eventually get non-busy by flushing any required
3556 * write domains, emitting any outstanding lazy request and retiring and
3557 * completed requests.
3558 */
3559 static int
3560 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
3561 {
3562 int i;
3563
3564 if (!obj->active)
3565 return 0;
3566
3567 for (i = 0; i < I915_NUM_RINGS; i++) {
3568 struct drm_i915_gem_request *req;
3569
3570 req = obj->last_read_req[i];
3571 if (req == NULL)
3572 continue;
3573
3574 if (list_empty(&req->list))
3575 goto retire;
3576
3577 if (i915_gem_request_completed(req, true)) {
3578 __i915_gem_request_retire__upto(req);
3579 retire:
3580 i915_gem_object_retire__read(obj, i);
3581 }
3582 }
3583
3584 return 0;
3585 }
3586
3587 /**
3588 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3589 * @DRM_IOCTL_ARGS: standard ioctl arguments
3590 *
3591 * Returns 0 if successful, else an error is returned with the remaining time in
3592 * the timeout parameter.
3593 * -ETIME: object is still busy after timeout
3594 * -ERESTARTSYS: signal interrupted the wait
3595 * -ENONENT: object doesn't exist
3596 * Also possible, but rare:
3597 * -EAGAIN: GPU wedged
3598 * -ENOMEM: damn
3599 * -ENODEV: Internal IRQ fail
3600 * -E?: The add request failed
3601 *
3602 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3603 * non-zero timeout parameter the wait ioctl will wait for the given number of
3604 * nanoseconds on an object becoming unbusy. Since the wait itself does so
3605 * without holding struct_mutex the object may become re-busied before this
3606 * function completes. A similar but shorter * race condition exists in the busy
3607 * ioctl
3608 */
3609 int
3610 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3611 {
3612 struct drm_i915_private *dev_priv = dev->dev_private;
3613 struct drm_i915_gem_wait *args = data;
3614 struct drm_gem_object *gobj;
3615 struct drm_i915_gem_object *obj;
3616 struct drm_i915_gem_request *req[I915_NUM_RINGS];
3617 unsigned reset_counter;
3618 int i, n = 0;
3619 int ret;
3620
3621 if (args->flags != 0)
3622 return -EINVAL;
3623
3624 ret = i915_mutex_lock_interruptible(dev);
3625 if (ret)
3626 return ret;
3627
3628 gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
3629 if (gobj == NULL) {
3630 mutex_unlock(&dev->struct_mutex);
3631 return -ENOENT;
3632 }
3633 obj = to_intel_bo(gobj);
3634
3635 /* Need to make sure the object gets inactive eventually. */
3636 ret = i915_gem_object_flush_active(obj);
3637 if (ret)
3638 goto out;
3639
3640 if (!obj->active)
3641 goto out;
3642
3643 /* Do this after OLR check to make sure we make forward progress polling
3644 * on this IOCTL with a timeout == 0 (like busy ioctl)
3645 */
3646 if (args->timeout_ns == 0) {
3647 ret = -ETIME;
3648 goto out;
3649 }
3650
3651 drm_gem_object_unreference(&obj->base);
3652 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3653
3654 for (i = 0; i < I915_NUM_RINGS; i++) {
3655 if (obj->last_read_req[i] == NULL)
3656 continue;
3657
3658 req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
3659 }
3660
3661 mutex_unlock(&dev->struct_mutex);
3662
3663 for (i = 0; i < n; i++) {
3664 if (ret == 0)
3665 ret = __i915_wait_request(req[i], reset_counter, true,
3666 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
3667 file->driver_priv);
3668 i915_gem_request_unreference__unlocked(req[i]);
3669 }
3670 return ret;
3671
3672 out:
3673 drm_gem_object_unreference(&obj->base);
3674 mutex_unlock(&dev->struct_mutex);
3675 return ret;
3676 }
3677
3678 static int
3679 __i915_gem_object_sync(struct drm_i915_gem_object *obj,
3680 struct intel_engine_cs *to,
3681 struct drm_i915_gem_request *from_req,
3682 struct drm_i915_gem_request **to_req)
3683 {
3684 struct intel_engine_cs *from;
3685 int ret;
3686
3687 from = i915_gem_request_get_ring(from_req);
3688 if (to == from)
3689 return 0;
3690
3691 if (i915_gem_request_completed(from_req, true))
3692 return 0;
3693
3694 if (!i915_semaphore_is_enabled(obj->base.dev)) {
3695 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3696 ret = __i915_wait_request(from_req,
3697 atomic_read(&i915->gpu_error.reset_counter),
3698 i915->mm.interruptible,
3699 NULL,
3700 &i915->rps.semaphores);
3701 if (ret)
3702 return ret;
3703
3704 i915_gem_object_retire_request(obj, from_req);
3705 } else {
3706 int idx = intel_ring_sync_index(from, to);
3707 u32 seqno = i915_gem_request_get_seqno(from_req);
3708
3709 WARN_ON(!to_req);
3710
3711 if (seqno <= from->semaphore.sync_seqno[idx])
3712 return 0;
3713
3714 if (*to_req == NULL) {
3715 ret = i915_gem_request_alloc(to, to->default_context, to_req);
3716 if (ret)
3717 return ret;
3718 }
3719
3720 trace_i915_gem_ring_sync_to(*to_req, from, from_req);
3721 ret = to->semaphore.sync_to(*to_req, from, seqno);
3722 if (ret)
3723 return ret;
3724
3725 /* We use last_read_req because sync_to()
3726 * might have just caused seqno wrap under
3727 * the radar.
3728 */
3729 from->semaphore.sync_seqno[idx] =
3730 i915_gem_request_get_seqno(obj->last_read_req[from->id]);
3731 }
3732
3733 return 0;
3734 }
3735
3736 /**
3737 * i915_gem_object_sync - sync an object to a ring.
3738 *
3739 * @obj: object which may be in use on another ring.
3740 * @to: ring we wish to use the object on. May be NULL.
3741 * @to_req: request we wish to use the object for. See below.
3742 * This will be allocated and returned if a request is
3743 * required but not passed in.
3744 *
3745 * This code is meant to abstract object synchronization with the GPU.
3746 * Calling with NULL implies synchronizing the object with the CPU
3747 * rather than a particular GPU ring. Conceptually we serialise writes
3748 * between engines inside the GPU. We only allow one engine to write
3749 * into a buffer at any time, but multiple readers. To ensure each has
3750 * a coherent view of memory, we must:
3751 *
3752 * - If there is an outstanding write request to the object, the new
3753 * request must wait for it to complete (either CPU or in hw, requests
3754 * on the same ring will be naturally ordered).
3755 *
3756 * - If we are a write request (pending_write_domain is set), the new
3757 * request must wait for outstanding read requests to complete.
3758 *
3759 * For CPU synchronisation (NULL to) no request is required. For syncing with
3760 * rings to_req must be non-NULL. However, a request does not have to be
3761 * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
3762 * request will be allocated automatically and returned through *to_req. Note
3763 * that it is not guaranteed that commands will be emitted (because the system
3764 * might already be idle). Hence there is no need to create a request that
3765 * might never have any work submitted. Note further that if a request is
3766 * returned in *to_req, it is the responsibility of the caller to submit
3767 * that request (after potentially adding more work to it).
3768 *
3769 * Returns 0 if successful, else propagates up the lower layer error.
3770 */
3771 int
3772 i915_gem_object_sync(struct drm_i915_gem_object *obj,
3773 struct intel_engine_cs *to,
3774 struct drm_i915_gem_request **to_req)
3775 {
3776 const bool readonly = obj->base.pending_write_domain == 0;
3777 struct drm_i915_gem_request *req[I915_NUM_RINGS];
3778 int ret, i, n;
3779
3780 if (!obj->active)
3781 return 0;
3782
3783 if (to == NULL)
3784 return i915_gem_object_wait_rendering(obj, readonly);
3785
3786 n = 0;
3787 if (readonly) {
3788 if (obj->last_write_req)
3789 req[n++] = obj->last_write_req;
3790 } else {
3791 for (i = 0; i < I915_NUM_RINGS; i++)
3792 if (obj->last_read_req[i])
3793 req[n++] = obj->last_read_req[i];
3794 }
3795 for (i = 0; i < n; i++) {
3796 ret = __i915_gem_object_sync(obj, to, req[i], to_req);
3797 if (ret)
3798 return ret;
3799 }
3800
3801 return 0;
3802 }
3803
3804 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
3805 {
3806 u32 old_write_domain, old_read_domains;
3807
3808 /* Force a pagefault for domain tracking on next user access */
3809 i915_gem_release_mmap(obj);
3810
3811 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3812 return;
3813
3814 /* Wait for any direct GTT access to complete */
3815 mb();
3816
3817 old_read_domains = obj->base.read_domains;
3818 old_write_domain = obj->base.write_domain;
3819
3820 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
3821 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
3822
3823 trace_i915_gem_object_change_domain(obj,
3824 old_read_domains,
3825 old_write_domain);
3826 }
3827
3828 static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3829 {
3830 struct drm_i915_gem_object *obj = vma->obj;
3831 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3832 int ret;
3833
3834 if (list_empty(&vma->vma_link))
3835 return 0;
3836
3837 if (!drm_mm_node_allocated(&vma->node)) {
3838 i915_gem_vma_destroy(vma);
3839 return 0;
3840 }
3841
3842 if (vma->pin_count)
3843 return -EBUSY;
3844
3845 BUG_ON(obj->pages == NULL);
3846
3847 if (wait) {
3848 ret = i915_gem_object_wait_rendering(obj, false);
3849 if (ret)
3850 return ret;
3851 }
3852
3853 if (i915_is_ggtt(vma->vm) &&
3854 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3855 i915_gem_object_finish_gtt(obj);
3856
3857 /* release the fence reg _after_ flushing */
3858 ret = i915_gem_object_put_fence(obj);
3859 if (ret)
3860 return ret;
3861 }
3862
3863 trace_i915_vma_unbind(vma);
3864
3865 vma->vm->unbind_vma(vma);
3866 vma->bound = 0;
3867
3868 list_del_init(&vma->mm_list);
3869 if (i915_is_ggtt(vma->vm)) {
3870 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
3871 obj->map_and_fenceable = false;
3872 } else if (vma->ggtt_view.pages) {
3873 #ifdef __NetBSD__
3874 panic("rotated/partial views can't happen");
3875 #else
3876 sg_free_table(vma->ggtt_view.pages);
3877 kfree(vma->ggtt_view.pages);
3878 #endif
3879 }
3880 vma->ggtt_view.pages = NULL;
3881 }
3882
3883 drm_mm_remove_node(&vma->node);
3884 i915_gem_vma_destroy(vma);
3885
3886 /* Since the unbound list is global, only move to that list if
3887 * no more VMAs exist. */
3888 if (list_empty(&obj->vma_list))
3889 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3890
3891 /* And finally now the object is completely decoupled from this vma,
3892 * we can drop its hold on the backing storage and allow it to be
3893 * reaped by the shrinker.
3894 */
3895 i915_gem_object_unpin_pages(obj);
3896
3897 return 0;
3898 }
3899
3900 int i915_vma_unbind(struct i915_vma *vma)
3901 {
3902 return __i915_vma_unbind(vma, true);
3903 }
3904
3905 int __i915_vma_unbind_no_wait(struct i915_vma *vma)
3906 {
3907 return __i915_vma_unbind(vma, false);
3908 }
3909
3910 int i915_gpu_idle(struct drm_device *dev)
3911 {
3912 struct drm_i915_private *dev_priv = dev->dev_private;
3913 struct intel_engine_cs *ring;
3914 int ret, i;
3915
3916 /* Flush everything onto the inactive list. */
3917 for_each_ring(ring, dev_priv, i) {
3918 if (!i915.enable_execlists) {
3919 struct drm_i915_gem_request *req;
3920
3921 ret = i915_gem_request_alloc(ring, ring->default_context, &req);
3922 if (ret)
3923 return ret;
3924
3925 ret = i915_switch_context(req);
3926 if (ret) {
3927 i915_gem_request_cancel(req);
3928 return ret;
3929 }
3930
3931 i915_add_request_no_flush(req);
3932 }
3933
3934 ret = intel_ring_idle(ring);
3935 if (ret)
3936 return ret;
3937 }
3938
3939 WARN_ON(i915_verify_lists(dev));
3940 return 0;
3941 }
3942
3943 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3944 unsigned long cache_level)
3945 {
3946 struct drm_mm_node *gtt_space = &vma->node;
3947 struct drm_mm_node *other;
3948
3949 /*
3950 * On some machines we have to be careful when putting differing types
3951 * of snoopable memory together to avoid the prefetcher crossing memory
3952 * domains and dying. During vm initialisation, we decide whether or not
3953 * these constraints apply and set the drm_mm.color_adjust
3954 * appropriately.
3955 */
3956 if (vma->vm->mm.color_adjust == NULL)
3957 return true;
3958
3959 if (!drm_mm_node_allocated(gtt_space))
3960 return true;
3961
3962 if (list_empty(>t_space->node_list))
3963 return true;
3964
3965 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3966 if (other->allocated && !other->hole_follows && other->color != cache_level)
3967 return false;
3968
3969 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3970 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3971 return false;
3972
3973 return true;
3974 }
3975
3976 /**
3977 * Finds free space in the GTT aperture and binds the object or a view of it
3978 * there.
3979 */
3980 static struct i915_vma *
3981 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3982 struct i915_address_space *vm,
3983 const struct i915_ggtt_view *ggtt_view,
3984 unsigned alignment,
3985 uint64_t flags)
3986 {
3987 struct drm_device *dev = obj->base.dev;
3988 struct drm_i915_private *dev_priv = dev->dev_private;
3989 u32 fence_alignment, unfenced_alignment;
3990 u32 search_flag, alloc_flag;
3991 u64 start, end;
3992 u64 size, fence_size;
3993 struct i915_vma *vma;
3994 int ret;
3995
3996 if (i915_is_ggtt(vm)) {
3997 u32 view_size;
3998
3999 if (WARN_ON(!ggtt_view))
4000 return ERR_PTR(-EINVAL);
4001
4002 view_size = i915_ggtt_view_size(obj, ggtt_view);
4003
4004 fence_size = i915_gem_get_gtt_size(dev,
4005 view_size,
4006 obj->tiling_mode);
4007 fence_alignment = i915_gem_get_gtt_alignment(dev,
4008 view_size,
4009 obj->tiling_mode,
4010 true);
4011 unfenced_alignment = i915_gem_get_gtt_alignment(dev,
4012 view_size,
4013 obj->tiling_mode,
4014 false);
4015 size = flags & PIN_MAPPABLE ? fence_size : view_size;
4016 } else {
4017 fence_size = i915_gem_get_gtt_size(dev,
4018 obj->base.size,
4019 obj->tiling_mode);
4020 fence_alignment = i915_gem_get_gtt_alignment(dev,
4021 obj->base.size,
4022 obj->tiling_mode,
4023 true);
4024 unfenced_alignment =
4025 i915_gem_get_gtt_alignment(dev,
4026 obj->base.size,
4027 obj->tiling_mode,
4028 false);
4029 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
4030 }
4031
4032 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
4033 end = vm->total;
4034 if (flags & PIN_MAPPABLE)
4035 end = min_t(u64, end, dev_priv->gtt.mappable_end);
4036 if (flags & PIN_ZONE_4G)
4037 end = min_t(u64, end, (1ULL << 32));
4038
4039 if (alignment == 0)
4040 alignment = flags & PIN_MAPPABLE ? fence_alignment :
4041 unfenced_alignment;
4042 if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
4043 DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
4044 ggtt_view ? ggtt_view->type : 0,
4045 alignment);
4046 return ERR_PTR(-EINVAL);
4047 }
4048
4049 /* If binding the object/GGTT view requires more space than the entire
4050 * aperture has, reject it early before evicting everything in a vain
4051 * attempt to find space.
4052 */
4053 if (size > end) {
4054 DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%"PRIx64" > %s aperture=%"PRIx64"\n",
4055 ggtt_view ? ggtt_view->type : 0,
4056 size,
4057 flags & PIN_MAPPABLE ? "mappable" : "total",
4058 end);
4059 return ERR_PTR(-E2BIG);
4060 }
4061
4062 ret = i915_gem_object_get_pages(obj);
4063 if (ret)
4064 return ERR_PTR(ret);
4065
4066 i915_gem_object_pin_pages(obj);
4067
4068 vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
4069 i915_gem_obj_lookup_or_create_vma(obj, vm);
4070
4071 if (IS_ERR(vma))
4072 goto err_unpin;
4073
4074 if (flags & PIN_HIGH) {
4075 search_flag = DRM_MM_SEARCH_BELOW;
4076 alloc_flag = DRM_MM_CREATE_TOP;
4077 } else {
4078 search_flag = DRM_MM_SEARCH_DEFAULT;
4079 alloc_flag = DRM_MM_CREATE_DEFAULT;
4080 }
4081
4082 search_free:
4083 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
4084 size, alignment,
4085 obj->cache_level,
4086 start, end,
4087 search_flag,
4088 alloc_flag);
4089 if (ret) {
4090 ret = i915_gem_evict_something(dev, vm, size, alignment,
4091 obj->cache_level,
4092 start, end,
4093 flags);
4094 if (ret == 0)
4095 goto search_free;
4096
4097 goto err_free_vma;
4098 }
4099 if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
4100 ret = -EINVAL;
4101 goto err_remove_node;
4102 }
4103
4104 trace_i915_vma_bind(vma, flags);
4105 ret = i915_vma_bind(vma, obj->cache_level, flags);
4106 if (ret)
4107 goto err_remove_node;
4108
4109 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
4110 list_add_tail(&vma->mm_list, &vm->inactive_list);
4111
4112 return vma;
4113
4114 err_remove_node:
4115 drm_mm_remove_node(&vma->node);
4116 err_free_vma:
4117 i915_gem_vma_destroy(vma);
4118 vma = ERR_PTR(ret);
4119 err_unpin:
4120 i915_gem_object_unpin_pages(obj);
4121 return vma;
4122 }
4123
4124 bool
4125 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
4126 bool force)
4127 {
4128 /* If we don't have a page list set up, then we're not pinned
4129 * to GPU, and we can ignore the cache flush because it'll happen
4130 * again at bind time.
4131 */
4132 if (obj->pages == NULL)
4133 return false;
4134
4135 /*
4136 * Stolen memory is always coherent with the GPU as it is explicitly
4137 * marked as wc by the system, or the system is cache-coherent.
4138 */
4139 if (obj->stolen || obj->phys_handle)
4140 return false;
4141
4142 /* If the GPU is snooping the contents of the CPU cache,
4143 * we do not need to manually clear the CPU cache lines. However,
4144 * the caches are only snooped when the render cache is
4145 * flushed/invalidated. As we always have to emit invalidations
4146 * and flushes when moving into and out of the RENDER domain, correct
4147 * snooping behaviour occurs naturally as the result of our domain
4148 * tracking.
4149 */
4150 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
4151 obj->cache_dirty = true;
4152 return false;
4153 }
4154
4155 trace_i915_gem_object_clflush(obj);
4156 #ifdef __NetBSD__
4157 drm_clflush_pglist(&obj->pageq);
4158 #else
4159 drm_clflush_sg(obj->pages);
4160 #endif
4161 obj->cache_dirty = false;
4162
4163 return true;
4164 }
4165
4166 /** Flushes the GTT write domain for the object if it's dirty. */
4167 static void
4168 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
4169 {
4170 uint32_t old_write_domain;
4171
4172 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
4173 return;
4174
4175 /* No actual flushing is required for the GTT write domain. Writes
4176 * to it immediately go to main memory as far as we know, so there's
4177 * no chipset flush. It also doesn't land in render cache.
4178 *
4179 * However, we do have to enforce the order so that all writes through
4180 * the GTT land before any writes to the device, such as updates to
4181 * the GATT itself.
4182 */
4183 wmb();
4184
4185 old_write_domain = obj->base.write_domain;
4186 obj->base.write_domain = 0;
4187
4188 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
4189
4190 trace_i915_gem_object_change_domain(obj,
4191 obj->base.read_domains,
4192 old_write_domain);
4193 }
4194
4195 /** Flushes the CPU write domain for the object if it's dirty. */
4196 static void
4197 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
4198 {
4199 uint32_t old_write_domain;
4200
4201 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
4202 return;
4203
4204 if (i915_gem_clflush_object(obj, obj->pin_display))
4205 i915_gem_chipset_flush(obj->base.dev);
4206
4207 old_write_domain = obj->base.write_domain;
4208 obj->base.write_domain = 0;
4209
4210 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
4211
4212 trace_i915_gem_object_change_domain(obj,
4213 obj->base.read_domains,
4214 old_write_domain);
4215 }
4216
4217 /**
4218 * Moves a single object to the GTT read, and possibly write domain.
4219 *
4220 * This function returns when the move is complete, including waiting on
4221 * flushes to occur.
4222 */
4223 int
4224 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
4225 {
4226 uint32_t old_write_domain, old_read_domains;
4227 struct i915_vma *vma;
4228 int ret;
4229
4230 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
4231 return 0;
4232
4233 ret = i915_gem_object_wait_rendering(obj, !write);
4234 if (ret)
4235 return ret;
4236
4237 /* Flush and acquire obj->pages so that we are coherent through
4238 * direct access in memory with previous cached writes through
4239 * shmemfs and that our cache domain tracking remains valid.
4240 * For example, if the obj->filp was moved to swap without us
4241 * being notified and releasing the pages, we would mistakenly
4242 * continue to assume that the obj remained out of the CPU cached
4243 * domain.
4244 */
4245 ret = i915_gem_object_get_pages(obj);
4246 if (ret)
4247 return ret;
4248
4249 i915_gem_object_flush_cpu_write_domain(obj);
4250
4251 /* Serialise direct access to this object with the barriers for
4252 * coherent writes from the GPU, by effectively invalidating the
4253 * GTT domain upon first access.
4254 */
4255 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
4256 mb();
4257
4258 old_write_domain = obj->base.write_domain;
4259 old_read_domains = obj->base.read_domains;
4260
4261 /* It should now be out of any other write domains, and we can update
4262 * the domain values for our changes.
4263 */
4264 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
4265 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
4266 if (write) {
4267 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
4268 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
4269 obj->dirty = 1;
4270 }
4271
4272 trace_i915_gem_object_change_domain(obj,
4273 old_read_domains,
4274 old_write_domain);
4275
4276 /* And bump the LRU for this access */
4277 vma = i915_gem_obj_to_ggtt(obj);
4278 if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
4279 list_move_tail(&vma->mm_list,
4280 &to_i915(obj->base.dev)->gtt.base.inactive_list);
4281
4282 return 0;
4283 }
4284
4285 /**
4286 * Changes the cache-level of an object across all VMA.
4287 *
4288 * After this function returns, the object will be in the new cache-level
4289 * across all GTT and the contents of the backing storage will be coherent,
4290 * with respect to the new cache-level. In order to keep the backing storage
4291 * coherent for all users, we only allow a single cache level to be set
4292 * globally on the object and prevent it from being changed whilst the
4293 * hardware is reading from the object. That is if the object is currently
4294 * on the scanout it will be set to uncached (or equivalent display
4295 * cache coherency) and all non-MOCS GPU access will also be uncached so
4296 * that all direct access to the scanout remains coherent.
4297 */
4298 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
4299 enum i915_cache_level cache_level)
4300 {
4301 struct drm_device *dev = obj->base.dev;
4302 struct i915_vma *vma, *next;
4303 bool bound = false;
4304 int ret = 0;
4305
4306 if (obj->cache_level == cache_level)
4307 goto out;
4308
4309 /* Inspect the list of currently bound VMA and unbind any that would
4310 * be invalid given the new cache-level. This is principally to
4311 * catch the issue of the CS prefetch crossing page boundaries and
4312 * reading an invalid PTE on older architectures.
4313 */
4314 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4315 if (!drm_mm_node_allocated(&vma->node))
4316 continue;
4317
4318 if (vma->pin_count) {
4319 DRM_DEBUG("can not change the cache level of pinned objects\n");
4320 return -EBUSY;
4321 }
4322
4323 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
4324 ret = i915_vma_unbind(vma);
4325 if (ret)
4326 return ret;
4327 } else
4328 bound = true;
4329 }
4330
4331 /* We can reuse the existing drm_mm nodes but need to change the
4332 * cache-level on the PTE. We could simply unbind them all and
4333 * rebind with the correct cache-level on next use. However since
4334 * we already have a valid slot, dma mapping, pages etc, we may as
4335 * rewrite the PTE in the belief that doing so tramples upon less
4336 * state and so involves less work.
4337 */
4338 if (bound) {
4339 /* Before we change the PTE, the GPU must not be accessing it.
4340 * If we wait upon the object, we know that all the bound
4341 * VMA are no longer active.
4342 */
4343 ret = i915_gem_object_wait_rendering(obj, false);
4344 if (ret)
4345 return ret;
4346
4347 if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
4348 /* Access to snoopable pages through the GTT is
4349 * incoherent and on some machines causes a hard
4350 * lockup. Relinquish the CPU mmaping to force
4351 * userspace to refault in the pages and we can
4352 * then double check if the GTT mapping is still
4353 * valid for that pointer access.
4354 */
4355 i915_gem_release_mmap(obj);
4356
4357 /* As we no longer need a fence for GTT access,
4358 * we can relinquish it now (and so prevent having
4359 * to steal a fence from someone else on the next
4360 * fence request). Note GPU activity would have
4361 * dropped the fence as all snoopable access is
4362 * supposed to be linear.
4363 */
4364 ret = i915_gem_object_put_fence(obj);
4365 if (ret)
4366 return ret;
4367 } else {
4368 /* We either have incoherent backing store and
4369 * so no GTT access or the architecture is fully
4370 * coherent. In such cases, existing GTT mmaps
4371 * ignore the cache bit in the PTE and we can
4372 * rewrite it without confusing the GPU or having
4373 * to force userspace to fault back in its mmaps.
4374 */
4375 }
4376
4377 list_for_each_entry(vma, &obj->vma_list, vma_link) {
4378 if (!drm_mm_node_allocated(&vma->node))
4379 continue;
4380
4381 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
4382 if (ret)
4383 return ret;
4384 }
4385 }
4386
4387 list_for_each_entry(vma, &obj->vma_list, vma_link)
4388 vma->node.color = cache_level;
4389 obj->cache_level = cache_level;
4390
4391 out:
4392 /* Flush the dirty CPU caches to the backing storage so that the
4393 * object is now coherent at its new cache level (with respect
4394 * to the access domain).
4395 */
4396 if (obj->cache_dirty &&
4397 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
4398 cpu_write_needs_clflush(obj)) {
4399 if (i915_gem_clflush_object(obj, true))
4400 i915_gem_chipset_flush(obj->base.dev);
4401 }
4402
4403 return 0;
4404 }
4405
4406 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
4407 struct drm_file *file)
4408 {
4409 struct drm_i915_gem_caching *args = data;
4410 struct drm_gem_object *gobj;
4411 struct drm_i915_gem_object *obj;
4412
4413 gobj = drm_gem_object_lookup(dev, file, args->handle);
4414 if (gobj == NULL)
4415 return -ENOENT;
4416 obj = to_intel_bo(gobj);
4417
4418 switch (obj->cache_level) {
4419 case I915_CACHE_LLC:
4420 case I915_CACHE_L3_LLC:
4421 args->caching = I915_CACHING_CACHED;
4422 break;
4423
4424 case I915_CACHE_WT:
4425 args->caching = I915_CACHING_DISPLAY;
4426 break;
4427
4428 default:
4429 args->caching = I915_CACHING_NONE;
4430 break;
4431 }
4432
4433 drm_gem_object_unreference_unlocked(&obj->base);
4434 return 0;
4435 }
4436
4437 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
4438 struct drm_file *file)
4439 {
4440 struct drm_i915_private *dev_priv = dev->dev_private;
4441 struct drm_i915_gem_caching *args = data;
4442 struct drm_gem_object *gobj;
4443 struct drm_i915_gem_object *obj;
4444 enum i915_cache_level level;
4445 int ret;
4446
4447 switch (args->caching) {
4448 case I915_CACHING_NONE:
4449 level = I915_CACHE_NONE;
4450 break;
4451 case I915_CACHING_CACHED:
4452 /*
4453 * Due to a HW issue on BXT A stepping, GPU stores via a
4454 * snooped mapping may leave stale data in a corresponding CPU
4455 * cacheline, whereas normally such cachelines would get
4456 * invalidated.
4457 */
4458 if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)
4459 return -ENODEV;
4460
4461 level = I915_CACHE_LLC;
4462 break;
4463 case I915_CACHING_DISPLAY:
4464 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
4465 break;
4466 default:
4467 return -EINVAL;
4468 }
4469
4470 intel_runtime_pm_get(dev_priv);
4471
4472 ret = i915_mutex_lock_interruptible(dev);
4473 if (ret)
4474 goto rpm_put;
4475
4476 gobj = drm_gem_object_lookup(dev, file, args->handle);
4477 if (gobj == NULL) {
4478 ret = -ENOENT;
4479 goto unlock;
4480 }
4481 obj = to_intel_bo(gobj);
4482
4483 ret = i915_gem_object_set_cache_level(obj, level);
4484
4485 drm_gem_object_unreference(&obj->base);
4486 unlock:
4487 mutex_unlock(&dev->struct_mutex);
4488 rpm_put:
4489 intel_runtime_pm_put(dev_priv);
4490
4491 return ret;
4492 }
4493
4494 /*
4495 * Prepare buffer for display plane (scanout, cursors, etc).
4496 * Can be called from an uninterruptible phase (modesetting) and allows
4497 * any flushes to be pipelined (for pageflips).
4498 */
4499 int
4500 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
4501 u32 alignment,
4502 struct intel_engine_cs *pipelined,
4503 struct drm_i915_gem_request **pipelined_request,
4504 const struct i915_ggtt_view *view)
4505 {
4506 u32 old_read_domains, old_write_domain;
4507 int ret;
4508
4509 ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
4510 if (ret)
4511 return ret;
4512
4513 /* Mark the pin_display early so that we account for the
4514 * display coherency whilst setting up the cache domains.
4515 */
4516 obj->pin_display++;
4517
4518 /* The display engine is not coherent with the LLC cache on gen6. As
4519 * a result, we make sure that the pinning that is about to occur is
4520 * done with uncached PTEs. This is lowest common denominator for all
4521 * chipsets.
4522 *
4523 * However for gen6+, we could do better by using the GFDT bit instead
4524 * of uncaching, which would allow us to flush all the LLC-cached data
4525 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
4526 */
4527 ret = i915_gem_object_set_cache_level(obj,
4528 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
4529 if (ret)
4530 goto err_unpin_display;
4531
4532 /* As the user may map the buffer once pinned in the display plane
4533 * (e.g. libkms for the bootup splash), we have to ensure that we
4534 * always use map_and_fenceable for all scanout buffers.
4535 */
4536 ret = i915_gem_object_ggtt_pin(obj, view, alignment,
4537 view->type == I915_GGTT_VIEW_NORMAL ?
4538 PIN_MAPPABLE : 0);
4539 if (ret)
4540 goto err_unpin_display;
4541
4542 i915_gem_object_flush_cpu_write_domain(obj);
4543
4544 old_write_domain = obj->base.write_domain;
4545 old_read_domains = obj->base.read_domains;
4546
4547 /* It should now be out of any other write domains, and we can update
4548 * the domain values for our changes.
4549 */
4550 obj->base.write_domain = 0;
4551 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
4552
4553 trace_i915_gem_object_change_domain(obj,
4554 old_read_domains,
4555 old_write_domain);
4556
4557 return 0;
4558
4559 err_unpin_display:
4560 obj->pin_display--;
4561 return ret;
4562 }
4563
4564 void
4565 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
4566 const struct i915_ggtt_view *view)
4567 {
4568 if (WARN_ON(obj->pin_display == 0))
4569 return;
4570
4571 i915_gem_object_ggtt_unpin_view(obj, view);
4572
4573 obj->pin_display--;
4574 }
4575
4576 /**
4577 * Moves a single object to the CPU read, and possibly write domain.
4578 *
4579 * This function returns when the move is complete, including waiting on
4580 * flushes to occur.
4581 */
4582 int
4583 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4584 {
4585 uint32_t old_write_domain, old_read_domains;
4586 int ret;
4587
4588 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
4589 return 0;
4590
4591 ret = i915_gem_object_wait_rendering(obj, !write);
4592 if (ret)
4593 return ret;
4594
4595 i915_gem_object_flush_gtt_write_domain(obj);
4596
4597 old_write_domain = obj->base.write_domain;
4598 old_read_domains = obj->base.read_domains;
4599
4600 /* Flush the CPU cache if it's still invalid. */
4601 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4602 i915_gem_clflush_object(obj, false);
4603
4604 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
4605 }
4606
4607 /* It should now be out of any other write domains, and we can update
4608 * the domain values for our changes.
4609 */
4610 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
4611
4612 /* If we're writing through the CPU, then the GPU read domains will
4613 * need to be invalidated at next use.
4614 */
4615 if (write) {
4616 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4617 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4618 }
4619
4620 trace_i915_gem_object_change_domain(obj,
4621 old_read_domains,
4622 old_write_domain);
4623
4624 return 0;
4625 }
4626
4627 /* Throttle our rendering by waiting until the ring has completed our requests
4628 * emitted over 20 msec ago.
4629 *
4630 * Note that if we were to use the current jiffies each time around the loop,
4631 * we wouldn't escape the function with any frames outstanding if the time to
4632 * render a frame was over 20ms.
4633 *
4634 * This should get us reasonable parallelism between CPU and GPU but also
4635 * relatively low latency when blocking on a particular request to finish.
4636 */
4637 static int
4638 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4639 {
4640 struct drm_i915_private *dev_priv = dev->dev_private;
4641 struct drm_i915_file_private *file_priv = file->driver_priv;
4642 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
4643 struct drm_i915_gem_request *request, *target = NULL;
4644 unsigned reset_counter;
4645 int ret;
4646
4647 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4648 if (ret)
4649 return ret;
4650
4651 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
4652 if (ret)
4653 return ret;
4654
4655 spin_lock(&file_priv->mm.lock);
4656 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4657 if (time_after_eq(request->emitted_jiffies, recent_enough))
4658 break;
4659
4660 /*
4661 * Note that the request might not have been submitted yet.
4662 * In which case emitted_jiffies will be zero.
4663 */
4664 if (!request->emitted_jiffies)
4665 continue;
4666
4667 target = request;
4668 }
4669 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
4670 if (target)
4671 i915_gem_request_reference(target);
4672 spin_unlock(&file_priv->mm.lock);
4673
4674 if (target == NULL)
4675 return 0;
4676
4677 ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
4678 if (ret == 0)
4679 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4680
4681 i915_gem_request_unreference__unlocked(target);
4682
4683 return ret;
4684 }
4685
4686 static bool
4687 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4688 {
4689 struct drm_i915_gem_object *obj = vma->obj;
4690
4691 if (alignment &&
4692 vma->node.start & (alignment - 1))
4693 return true;
4694
4695 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4696 return true;
4697
4698 if (flags & PIN_OFFSET_BIAS &&
4699 vma->node.start < (flags & PIN_OFFSET_MASK))
4700 return true;
4701
4702 return false;
4703 }
4704
4705 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
4706 {
4707 struct drm_i915_gem_object *obj = vma->obj;
4708 bool mappable, fenceable;
4709 u32 fence_size, fence_alignment;
4710
4711 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4712 obj->base.size,
4713 obj->tiling_mode);
4714 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4715 obj->base.size,
4716 obj->tiling_mode,
4717 true);
4718
4719 fenceable = (vma->node.size == fence_size &&
4720 (vma->node.start & (fence_alignment - 1)) == 0);
4721
4722 mappable = (vma->node.start + fence_size <=
4723 to_i915(obj->base.dev)->gtt.mappable_end);
4724
4725 obj->map_and_fenceable = mappable && fenceable;
4726 }
4727
4728 static int
4729 i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4730 struct i915_address_space *vm,
4731 const struct i915_ggtt_view *ggtt_view,
4732 uint32_t alignment,
4733 uint64_t flags)
4734 {
4735 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4736 struct i915_vma *vma;
4737 unsigned bound;
4738 int ret;
4739
4740 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4741 return -ENODEV;
4742
4743 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4744 return -EINVAL;
4745
4746 if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
4747 return -EINVAL;
4748
4749 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
4750 return -EINVAL;
4751
4752 vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
4753 i915_gem_obj_to_vma(obj, vm);
4754
4755 if (IS_ERR(vma))
4756 return PTR_ERR(vma);
4757
4758 if (vma) {
4759 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4760 return -EBUSY;
4761
4762 if (i915_vma_misplaced(vma, alignment, flags)) {
4763 WARN(vma->pin_count,
4764 "bo is already pinned in %s with incorrect alignment:"
4765 " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
4766 " obj->map_and_fenceable=%d\n",
4767 ggtt_view ? "ggtt" : "ppgtt",
4768 upper_32_bits(vma->node.start),
4769 lower_32_bits(vma->node.start),
4770 alignment,
4771 !!(flags & PIN_MAPPABLE),
4772 obj->map_and_fenceable);
4773 ret = i915_vma_unbind(vma);
4774 if (ret)
4775 return ret;
4776
4777 vma = NULL;
4778 }
4779 }
4780
4781 bound = vma ? vma->bound : 0;
4782 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4783 vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
4784 flags);
4785 if (IS_ERR(vma))
4786 return PTR_ERR(vma);
4787 } else {
4788 ret = i915_vma_bind(vma, obj->cache_level, flags);
4789 if (ret)
4790 return ret;
4791 }
4792
4793 if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
4794 (bound ^ vma->bound) & GLOBAL_BIND) {
4795 __i915_vma_set_map_and_fenceable(vma);
4796 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4797 }
4798
4799 vma->pin_count++;
4800 return 0;
4801 }
4802
4803 int
4804 i915_gem_object_pin(struct drm_i915_gem_object *obj,
4805 struct i915_address_space *vm,
4806 uint32_t alignment,
4807 uint64_t flags)
4808 {
4809 return i915_gem_object_do_pin(obj, vm,
4810 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
4811 alignment, flags);
4812 }
4813
4814 int
4815 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4816 const struct i915_ggtt_view *view,
4817 uint32_t alignment,
4818 uint64_t flags)
4819 {
4820 if (WARN_ONCE(!view, "no view specified"))
4821 return -EINVAL;
4822
4823 return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
4824 alignment, flags | PIN_GLOBAL);
4825 }
4826
4827 void
4828 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
4829 const struct i915_ggtt_view *view)
4830 {
4831 struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
4832
4833 BUG_ON(!vma);
4834 WARN_ON(vma->pin_count == 0);
4835 WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
4836
4837 --vma->pin_count;
4838 }
4839
4840 int
4841 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4842 struct drm_file *file)
4843 {
4844 struct drm_i915_gem_busy *args = data;
4845 struct drm_gem_object *gobj;
4846 struct drm_i915_gem_object *obj;
4847 int ret;
4848
4849 ret = i915_mutex_lock_interruptible(dev);
4850 if (ret)
4851 return ret;
4852
4853 gobj = drm_gem_object_lookup(dev, file, args->handle);
4854 if (gobj == NULL) {
4855 ret = -ENOENT;
4856 goto unlock;
4857 }
4858 obj = to_intel_bo(gobj);
4859
4860 /* Count all active objects as busy, even if they are currently not used
4861 * by the gpu. Users of this interface expect objects to eventually
4862 * become non-busy without any further actions, therefore emit any
4863 * necessary flushes here.
4864 */
4865 ret = i915_gem_object_flush_active(obj);
4866 if (ret)
4867 goto unref;
4868
4869 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4870 args->busy = obj->active << 16;
4871 if (obj->last_write_req)
4872 args->busy |= obj->last_write_req->ring->id;
4873
4874 unref:
4875 drm_gem_object_unreference(&obj->base);
4876 unlock:
4877 mutex_unlock(&dev->struct_mutex);
4878 return ret;
4879 }
4880
4881 int
4882 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4883 struct drm_file *file_priv)
4884 {
4885 return i915_gem_ring_throttle(dev, file_priv);
4886 }
4887
4888 int
4889 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4890 struct drm_file *file_priv)
4891 {
4892 struct drm_i915_private *dev_priv = dev->dev_private;
4893 struct drm_i915_gem_madvise *args = data;
4894 struct drm_gem_object *gobj;
4895 struct drm_i915_gem_object *obj;
4896 int ret;
4897
4898 switch (args->madv) {
4899 case I915_MADV_DONTNEED:
4900 case I915_MADV_WILLNEED:
4901 break;
4902 default:
4903 return -EINVAL;
4904 }
4905
4906 ret = i915_mutex_lock_interruptible(dev);
4907 if (ret)
4908 return ret;
4909
4910 gobj = drm_gem_object_lookup(dev, file_priv, args->handle);
4911 if (gobj == NULL) {
4912 ret = -ENOENT;
4913 goto unlock;
4914 }
4915 obj = to_intel_bo(gobj);
4916
4917 if (i915_gem_obj_is_pinned(obj)) {
4918 ret = -EINVAL;
4919 goto out;
4920 }
4921
4922 if (obj->pages &&
4923 obj->tiling_mode != I915_TILING_NONE &&
4924 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4925 if (obj->madv == I915_MADV_WILLNEED)
4926 i915_gem_object_unpin_pages(obj);
4927 if (args->madv == I915_MADV_WILLNEED)
4928 i915_gem_object_pin_pages(obj);
4929 }
4930
4931 if (obj->madv != __I915_MADV_PURGED)
4932 obj->madv = args->madv;
4933
4934 /* if the object is no longer attached, discard its backing storage */
4935 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
4936 i915_gem_object_truncate(obj);
4937
4938 args->retained = obj->madv != __I915_MADV_PURGED;
4939
4940 out:
4941 drm_gem_object_unreference(&obj->base);
4942 unlock:
4943 mutex_unlock(&dev->struct_mutex);
4944 return ret;
4945 }
4946
4947 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4948 const struct drm_i915_gem_object_ops *ops)
4949 {
4950 int i;
4951
4952 INIT_LIST_HEAD(&obj->global_list);
4953 for (i = 0; i < I915_NUM_RINGS; i++)
4954 INIT_LIST_HEAD(&obj->ring_list[i]);
4955 INIT_LIST_HEAD(&obj->obj_exec_link);
4956 INIT_LIST_HEAD(&obj->vma_list);
4957 INIT_LIST_HEAD(&obj->batch_pool_link);
4958
4959 obj->ops = ops;
4960
4961 obj->fence_reg = I915_FENCE_REG_NONE;
4962 obj->madv = I915_MADV_WILLNEED;
4963
4964 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4965 }
4966
4967 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4968 .get_pages = i915_gem_object_get_pages_gtt,
4969 .put_pages = i915_gem_object_put_pages_gtt,
4970 };
4971
4972 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4973 size_t size)
4974 {
4975 #ifdef __NetBSD__
4976 struct drm_i915_private *const dev_priv = dev->dev_private;
4977 #endif
4978 struct drm_i915_gem_object *obj;
4979 #ifndef __NetBSD__
4980 struct address_space *mapping;
4981 gfp_t mask;
4982 #endif
4983
4984 obj = i915_gem_object_alloc(dev);
4985 if (obj == NULL)
4986 return NULL;
4987
4988 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4989 i915_gem_object_free(obj);
4990 return NULL;
4991 }
4992
4993 #ifdef __NetBSD__
4994 uao_set_pgfl(obj->base.filp, dev_priv->gtt.pgfl);
4995 #else
4996 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4997 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4998 /* 965gm cannot relocate objects above 4GiB. */
4999 mask &= ~__GFP_HIGHMEM;
5000 mask |= __GFP_DMA32;
5001 }
5002
5003 mapping = file_inode(obj->base.filp)->i_mapping;
5004 mapping_set_gfp_mask(mapping, mask);
5005 #endif
5006
5007 i915_gem_object_init(obj, &i915_gem_object_ops);
5008
5009 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
5010 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
5011
5012 if (HAS_LLC(dev)) {
5013 /* On some devices, we can have the GPU use the LLC (the CPU
5014 * cache) for about a 10% performance improvement
5015 * compared to uncached. Graphics requests other than
5016 * display scanout are coherent with the CPU in
5017 * accessing this cache. This means in this mode we
5018 * don't need to clflush on the CPU side, and on the
5019 * GPU side we only need to flush internal caches to
5020 * get data visible to the CPU.
5021 *
5022 * However, we maintain the display planes as UC, and so
5023 * need to rebind when first used as such.
5024 */
5025 obj->cache_level = I915_CACHE_LLC;
5026 } else
5027 obj->cache_level = I915_CACHE_NONE;
5028
5029 trace_i915_gem_object_create(obj);
5030
5031 return obj;
5032 }
5033
5034 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
5035 {
5036 /* If we are the last user of the backing storage (be it shmemfs
5037 * pages or stolen etc), we know that the pages are going to be
5038 * immediately released. In this case, we can then skip copying
5039 * back the contents from the GPU.
5040 */
5041
5042 if (obj->madv != I915_MADV_WILLNEED)
5043 return false;
5044
5045 if (obj->base.filp == NULL)
5046 return true;
5047
5048 /* At first glance, this looks racy, but then again so would be
5049 * userspace racing mmap against close. However, the first external
5050 * reference to the filp can only be obtained through the
5051 * i915_gem_mmap_ioctl() which safeguards us against the user
5052 * acquiring such a reference whilst we are in the middle of
5053 * freeing the object.
5054 */
5055 #ifdef __NetBSD__
5056 /* XXX This number might be a fencepost. */
5057 return obj->base.filp->uo_refs == 1;
5058 #else
5059 return atomic_long_read(&obj->base.filp->f_count) == 1;
5060 #endif
5061 }
5062
5063 void i915_gem_free_object(struct drm_gem_object *gem_obj)
5064 {
5065 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
5066 struct drm_device *dev = obj->base.dev;
5067 struct drm_i915_private *dev_priv = dev->dev_private;
5068 struct i915_vma *vma, *next;
5069
5070 intel_runtime_pm_get(dev_priv);
5071
5072 trace_i915_gem_object_destroy(obj);
5073
5074 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
5075 int ret;
5076
5077 vma->pin_count = 0;
5078 ret = i915_vma_unbind(vma);
5079 if (WARN_ON(ret == -ERESTARTSYS)) {
5080 bool was_interruptible;
5081
5082 was_interruptible = dev_priv->mm.interruptible;
5083 dev_priv->mm.interruptible = false;
5084
5085 WARN_ON(i915_vma_unbind(vma));
5086
5087 dev_priv->mm.interruptible = was_interruptible;
5088 }
5089 }
5090
5091 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
5092 * before progressing. */
5093 if (obj->stolen)
5094 i915_gem_object_unpin_pages(obj);
5095
5096 WARN_ON(obj->frontbuffer_bits);
5097
5098 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
5099 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
5100 obj->tiling_mode != I915_TILING_NONE)
5101 i915_gem_object_unpin_pages(obj);
5102
5103 if (WARN_ON(obj->pages_pin_count))
5104 obj->pages_pin_count = 0;
5105 if (discard_backing_storage(obj))
5106 obj->madv = I915_MADV_DONTNEED;
5107 i915_gem_object_put_pages(obj);
5108 i915_gem_object_free_mmap_offset(obj);
5109
5110 BUG_ON(obj->pages);
5111
5112 if (obj->base.import_attach)
5113 drm_prime_gem_destroy(&obj->base, NULL);
5114
5115 if (obj->ops->release)
5116 obj->ops->release(obj);
5117
5118 drm_gem_object_release(&obj->base);
5119 i915_gem_info_remove_obj(dev_priv, obj->base.size);
5120
5121 kfree(obj->bit_17);
5122 i915_gem_object_free(obj);
5123
5124 intel_runtime_pm_put(dev_priv);
5125 }
5126
5127 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
5128 struct i915_address_space *vm)
5129 {
5130 struct i915_vma *vma;
5131 list_for_each_entry(vma, &obj->vma_list, vma_link) {
5132 if (i915_is_ggtt(vma->vm) &&
5133 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5134 continue;
5135 if (vma->vm == vm)
5136 return vma;
5137 }
5138 return NULL;
5139 }
5140
5141 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
5142 const struct i915_ggtt_view *view)
5143 {
5144 struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
5145 struct i915_vma *vma;
5146
5147 if (WARN_ONCE(!view, "no view specified"))
5148 return ERR_PTR(-EINVAL);
5149
5150 list_for_each_entry(vma, &obj->vma_list, vma_link)
5151 if (vma->vm == ggtt &&
5152 i915_ggtt_view_equal(&vma->ggtt_view, view))
5153 return vma;
5154 return NULL;
5155 }
5156
5157 void i915_gem_vma_destroy(struct i915_vma *vma)
5158 {
5159 struct i915_address_space *vm = NULL;
5160 WARN_ON(vma->node.allocated);
5161
5162 /* Keep the vma as a placeholder in the execbuffer reservation lists */
5163 if (!list_empty(&vma->exec_list))
5164 return;
5165
5166 vm = vma->vm;
5167
5168 if (!i915_is_ggtt(vm))
5169 i915_ppgtt_put(i915_vm_to_ppgtt(vm));
5170
5171 list_del(&vma->vma_link);
5172
5173 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
5174 }
5175
5176 static void
5177 i915_gem_stop_ringbuffers(struct drm_device *dev)
5178 {
5179 struct drm_i915_private *dev_priv = dev->dev_private;
5180 struct intel_engine_cs *ring;
5181 int i;
5182
5183 for_each_ring(ring, dev_priv, i)
5184 dev_priv->gt.stop_ring(ring);
5185 }
5186
5187 int
5188 i915_gem_suspend(struct drm_device *dev)
5189 {
5190 struct drm_i915_private *dev_priv = dev->dev_private;
5191 int ret = 0;
5192
5193 mutex_lock(&dev->struct_mutex);
5194 ret = i915_gpu_idle(dev);
5195 if (ret)
5196 goto err;
5197
5198 i915_gem_retire_requests(dev);
5199
5200 i915_gem_stop_ringbuffers(dev);
5201 mutex_unlock(&dev->struct_mutex);
5202
5203 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
5204 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
5205 flush_delayed_work(&dev_priv->mm.idle_work);
5206
5207 /* Assert that we sucessfully flushed all the work and
5208 * reset the GPU back to its idle, low power state.
5209 */
5210 WARN_ON(dev_priv->mm.busy);
5211
5212 return 0;
5213
5214 err:
5215 mutex_unlock(&dev->struct_mutex);
5216 return ret;
5217 }
5218
5219 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
5220 {
5221 struct intel_engine_cs *ring = req->ring;
5222 struct drm_device *dev = ring->dev;
5223 struct drm_i915_private *dev_priv = dev->dev_private;
5224 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
5225 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
5226 int i, ret;
5227
5228 if (!HAS_L3_DPF(dev) || !remap_info)
5229 return 0;
5230
5231 ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
5232 if (ret)
5233 return ret;
5234
5235 /*
5236 * Note: We do not worry about the concurrent register cacheline hang
5237 * here because no other code should access these registers other than
5238 * at initialization time.
5239 */
5240 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
5241 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
5242 intel_ring_emit(ring, reg_base + i);
5243 intel_ring_emit(ring, remap_info[i/4]);
5244 }
5245
5246 intel_ring_advance(ring);
5247
5248 return ret;
5249 }
5250
5251 void i915_gem_init_swizzling(struct drm_device *dev)
5252 {
5253 struct drm_i915_private *dev_priv = dev->dev_private;
5254
5255 if (INTEL_INFO(dev)->gen < 5 ||
5256 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
5257 return;
5258
5259 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
5260 DISP_TILE_SURFACE_SWIZZLING);
5261
5262 if (IS_GEN5(dev))
5263 return;
5264
5265 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
5266 if (IS_GEN6(dev))
5267 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
5268 else if (IS_GEN7(dev))
5269 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
5270 else if (IS_GEN8(dev))
5271 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
5272 else
5273 BUG();
5274 }
5275
5276 static void init_unused_ring(struct drm_device *dev, u32 base)
5277 {
5278 struct drm_i915_private *dev_priv = dev->dev_private;
5279
5280 I915_WRITE(RING_CTL(base), 0);
5281 I915_WRITE(RING_HEAD(base), 0);
5282 I915_WRITE(RING_TAIL(base), 0);
5283 I915_WRITE(RING_START(base), 0);
5284 }
5285
5286 static void init_unused_rings(struct drm_device *dev)
5287 {
5288 if (IS_I830(dev)) {
5289 init_unused_ring(dev, PRB1_BASE);
5290 init_unused_ring(dev, SRB0_BASE);
5291 init_unused_ring(dev, SRB1_BASE);
5292 init_unused_ring(dev, SRB2_BASE);
5293 init_unused_ring(dev, SRB3_BASE);
5294 } else if (IS_GEN2(dev)) {
5295 init_unused_ring(dev, SRB0_BASE);
5296 init_unused_ring(dev, SRB1_BASE);
5297 } else if (IS_GEN3(dev)) {
5298 init_unused_ring(dev, PRB1_BASE);
5299 init_unused_ring(dev, PRB2_BASE);
5300 }
5301 }
5302
5303 int i915_gem_init_rings(struct drm_device *dev)
5304 {
5305 struct drm_i915_private *dev_priv = dev->dev_private;
5306 int ret;
5307
5308 ret = intel_init_render_ring_buffer(dev);
5309 if (ret)
5310 return ret;
5311
5312 if (HAS_BSD(dev)) {
5313 ret = intel_init_bsd_ring_buffer(dev);
5314 if (ret)
5315 goto cleanup_render_ring;
5316 }
5317
5318 if (HAS_BLT(dev)) {
5319 ret = intel_init_blt_ring_buffer(dev);
5320 if (ret)
5321 goto cleanup_bsd_ring;
5322 }
5323
5324 if (HAS_VEBOX(dev)) {
5325 ret = intel_init_vebox_ring_buffer(dev);
5326 if (ret)
5327 goto cleanup_blt_ring;
5328 }
5329
5330 if (HAS_BSD2(dev)) {
5331 ret = intel_init_bsd2_ring_buffer(dev);
5332 if (ret)
5333 goto cleanup_vebox_ring;
5334 }
5335
5336 return 0;
5337
5338 cleanup_vebox_ring:
5339 intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
5340 cleanup_blt_ring:
5341 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
5342 cleanup_bsd_ring:
5343 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
5344 cleanup_render_ring:
5345 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
5346
5347 return ret;
5348 }
5349
5350 int
5351 i915_gem_init_hw(struct drm_device *dev)
5352 {
5353 struct drm_i915_private *dev_priv = dev->dev_private;
5354 struct intel_engine_cs *ring;
5355 int ret, i, j;
5356
5357 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
5358 return -EIO;
5359
5360 /* Double layer security blanket, see i915_gem_init() */
5361 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5362
5363 if (dev_priv->ellc_size)
5364 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
5365
5366 if (IS_HASWELL(dev))
5367 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
5368 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
5369
5370 if (HAS_PCH_NOP(dev)) {
5371 if (IS_IVYBRIDGE(dev)) {
5372 u32 temp = I915_READ(GEN7_MSG_CTL);
5373 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
5374 I915_WRITE(GEN7_MSG_CTL, temp);
5375 } else if (INTEL_INFO(dev)->gen >= 7) {
5376 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
5377 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
5378 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
5379 }
5380 }
5381
5382 i915_gem_init_swizzling(dev);
5383
5384 /*
5385 * At least 830 can leave some of the unused rings
5386 * "active" (ie. head != tail) after resume which
5387 * will prevent c3 entry. Makes sure all unused rings
5388 * are totally idle.
5389 */
5390 init_unused_rings(dev);
5391
5392 BUG_ON(!dev_priv->ring[RCS].default_context);
5393
5394 ret = i915_ppgtt_init_hw(dev);
5395 if (ret) {
5396 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
5397 goto out;
5398 }
5399
5400 /* Need to do basic initialisation of all rings first: */
5401 for_each_ring(ring, dev_priv, i) {
5402 ret = ring->init_hw(ring);
5403 if (ret)
5404 goto out;
5405 }
5406
5407 /* We can't enable contexts until all firmware is loaded */
5408 if (HAS_GUC_UCODE(dev)) {
5409 ret = intel_guc_ucode_load(dev);
5410 if (ret) {
5411 /*
5412 * If we got an error and GuC submission is enabled, map
5413 * the error to -EIO so the GPU will be declared wedged.
5414 * OTOH, if we didn't intend to use the GuC anyway, just
5415 * discard the error and carry on.
5416 */
5417 DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret,
5418 i915.enable_guc_submission ? "" :
5419 " (ignored)");
5420 ret = i915.enable_guc_submission ? -EIO : 0;
5421 if (ret)
5422 goto out;
5423 }
5424 }
5425
5426 /*
5427 * Increment the next seqno by 0x100 so we have a visible break
5428 * on re-initialisation
5429 */
5430 ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
5431 if (ret)
5432 goto out;
5433
5434 /* Now it is safe to go back round and do everything else: */
5435 for_each_ring(ring, dev_priv, i) {
5436 struct drm_i915_gem_request *req;
5437
5438 WARN_ON(!ring->default_context);
5439
5440 ret = i915_gem_request_alloc(ring, ring->default_context, &req);
5441 if (ret) {
5442 i915_gem_cleanup_ringbuffer(dev);
5443 goto out;
5444 }
5445
5446 if (ring->id == RCS) {
5447 for (j = 0; j < NUM_L3_SLICES(dev); j++)
5448 i915_gem_l3_remap(req, j);
5449 }
5450
5451 ret = i915_ppgtt_init_ring(req);
5452 if (ret && ret != -EIO) {
5453 DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
5454 i915_gem_request_cancel(req);
5455 i915_gem_cleanup_ringbuffer(dev);
5456 goto out;
5457 }
5458
5459 ret = i915_gem_context_enable(req);
5460 if (ret && ret != -EIO) {
5461 DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
5462 i915_gem_request_cancel(req);
5463 i915_gem_cleanup_ringbuffer(dev);
5464 goto out;
5465 }
5466
5467 i915_add_request_no_flush(req);
5468 }
5469
5470 out:
5471 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5472 return ret;
5473 }
5474
5475 int i915_gem_init(struct drm_device *dev)
5476 {
5477 struct drm_i915_private *dev_priv = dev->dev_private;
5478 int ret;
5479
5480 i915.enable_execlists = intel_sanitize_enable_execlists(dev,
5481 i915.enable_execlists);
5482
5483 idr_preload(GFP_KERNEL); /* gem context */
5484 mutex_lock(&dev->struct_mutex);
5485
5486 if (IS_VALLEYVIEW(dev)) {
5487 /* VLVA0 (potential hack), BIOS isn't actually waking us */
5488 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
5489 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
5490 VLV_GTLC_ALLOWWAKEACK), 10))
5491 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
5492 }
5493
5494 if (!i915.enable_execlists) {
5495 dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
5496 dev_priv->gt.init_rings = i915_gem_init_rings;
5497 dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
5498 dev_priv->gt.stop_ring = intel_stop_ring_buffer;
5499 } else {
5500 dev_priv->gt.execbuf_submit = intel_execlists_submission;
5501 dev_priv->gt.init_rings = intel_logical_rings_init;
5502 dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
5503 dev_priv->gt.stop_ring = intel_logical_ring_stop;
5504 }
5505
5506 /* This is just a security blanket to placate dragons.
5507 * On some systems, we very sporadically observe that the first TLBs
5508 * used by the CS may be stale, despite us poking the TLB reset. If
5509 * we hold the forcewake during initialisation these problems
5510 * just magically go away.
5511 */
5512 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5513
5514 ret = i915_gem_init_userptr(dev);
5515 if (ret)
5516 goto out_unlock;
5517
5518 i915_gem_init_global_gtt(dev);
5519
5520 ret = i915_gem_context_init(dev);
5521 if (ret)
5522 goto out_unlock;
5523
5524 ret = dev_priv->gt.init_rings(dev);
5525 if (ret)
5526 goto out_unlock;
5527
5528 ret = i915_gem_init_hw(dev);
5529 if (ret == -EIO) {
5530 /* Allow ring initialisation to fail by marking the GPU as
5531 * wedged. But we only want to do this where the GPU is angry,
5532 * for all other failure, such as an allocation failure, bail.
5533 */
5534 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
5535 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
5536 ret = 0;
5537 }
5538
5539 out_unlock:
5540 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5541 mutex_unlock(&dev->struct_mutex);
5542 idr_preload_end();
5543
5544 return ret;
5545 }
5546
5547 void
5548 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
5549 {
5550 struct drm_i915_private *dev_priv = dev->dev_private;
5551 struct intel_engine_cs *ring;
5552 int i;
5553
5554 for_each_ring(ring, dev_priv, i)
5555 dev_priv->gt.cleanup_ring(ring);
5556
5557 if (i915.enable_execlists)
5558 /*
5559 * Neither the BIOS, ourselves or any other kernel
5560 * expects the system to be in execlists mode on startup,
5561 * so we need to reset the GPU back to legacy mode.
5562 */
5563 intel_gpu_reset(dev);
5564 }
5565
5566 static void
5567 init_ring_lists(struct intel_engine_cs *ring)
5568 {
5569 INIT_LIST_HEAD(&ring->active_list);
5570 INIT_LIST_HEAD(&ring->request_list);
5571 }
5572
5573 void
5574 i915_gem_load(struct drm_device *dev)
5575 {
5576 struct drm_i915_private *dev_priv = dev->dev_private;
5577 int i;
5578
5579 dev_priv->objects =
5580 kmem_cache_create("i915_gem_object",
5581 sizeof(struct drm_i915_gem_object), 0,
5582 SLAB_HWCACHE_ALIGN,
5583 NULL);
5584 dev_priv->vmas =
5585 kmem_cache_create("i915_gem_vma",
5586 sizeof(struct i915_vma), 0,
5587 SLAB_HWCACHE_ALIGN,
5588 NULL);
5589 dev_priv->requests =
5590 kmem_cache_create("i915_gem_request",
5591 sizeof(struct drm_i915_gem_request), 0,
5592 SLAB_HWCACHE_ALIGN,
5593 NULL);
5594
5595 INIT_LIST_HEAD(&dev_priv->vm_list);
5596 INIT_LIST_HEAD(&dev_priv->context_list);
5597 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
5598 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
5599 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5600 for (i = 0; i < I915_NUM_RINGS; i++)
5601 init_ring_lists(&dev_priv->ring[i]);
5602 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
5603 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
5604 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
5605 i915_gem_retire_work_handler);
5606 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
5607 i915_gem_idle_work_handler);
5608 #ifdef __NetBSD__
5609 spin_lock_init(&dev_priv->gpu_error.reset_lock);
5610 DRM_INIT_WAITQUEUE(&dev_priv->gpu_error.reset_queue, "i915errst");
5611 #else
5612 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5613 #endif
5614
5615 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
5616
5617 if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
5618 dev_priv->num_fence_regs = 32;
5619 else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5620 dev_priv->num_fence_regs = 16;
5621 else
5622 dev_priv->num_fence_regs = 8;
5623
5624 if (intel_vgpu_active(dev))
5625 dev_priv->num_fence_regs =
5626 I915_READ(vgtif_reg(avail_rs.fence_num));
5627
5628 /*
5629 * Set initial sequence number for requests.
5630 * Using this number allows the wraparound to happen early,
5631 * catching any obvious problems.
5632 */
5633 dev_priv->next_seqno = ((u32)~0 - 0x1100);
5634 dev_priv->last_seqno = ((u32)~0 - 0x1101);
5635
5636 /* Initialize fence registers to zero */
5637 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5638 i915_gem_restore_fences(dev);
5639
5640 i915_gem_detect_bit_6_swizzle(dev);
5641 #ifdef __NetBSD__
5642 DRM_INIT_WAITQUEUE(&dev_priv->pending_flip_queue, "i915flip");
5643 spin_lock_init(&dev_priv->pending_flip_lock);
5644 #else
5645 init_waitqueue_head(&dev_priv->pending_flip_queue);
5646 #endif
5647
5648 dev_priv->mm.interruptible = true;
5649
5650 i915_gem_shrinker_init(dev_priv);
5651 #ifdef __NetBSD__
5652 linux_mutex_init(&dev_priv->fb_tracking.lock);
5653 #else
5654 mutex_init(&dev_priv->fb_tracking.lock);
5655 #endif
5656 }
5657
5658 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5659 {
5660 struct drm_i915_file_private *file_priv = file->driver_priv;
5661
5662 /* Clean up our request list when the client is going away, so that
5663 * later retire_requests won't dereference our soon-to-be-gone
5664 * file_priv.
5665 */
5666 spin_lock(&file_priv->mm.lock);
5667 while (!list_empty(&file_priv->mm.request_list)) {
5668 struct drm_i915_gem_request *request;
5669
5670 request = list_first_entry(&file_priv->mm.request_list,
5671 struct drm_i915_gem_request,
5672 client_list);
5673 list_del(&request->client_list);
5674 request->file_priv = NULL;
5675 }
5676 spin_unlock(&file_priv->mm.lock);
5677
5678 if (!list_empty(&file_priv->rps.link)) {
5679 spin_lock(&to_i915(dev)->rps.client_lock);
5680 list_del(&file_priv->rps.link);
5681 spin_unlock(&to_i915(dev)->rps.client_lock);
5682 }
5683 }
5684
5685 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5686 {
5687 struct drm_i915_file_private *file_priv;
5688 int ret;
5689
5690 DRM_DEBUG_DRIVER("\n");
5691
5692 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5693 if (!file_priv)
5694 return -ENOMEM;
5695
5696 file->driver_priv = file_priv;
5697 file_priv->dev_priv = dev->dev_private;
5698 file_priv->file = file;
5699 INIT_LIST_HEAD(&file_priv->rps.link);
5700
5701 spin_lock_init(&file_priv->mm.lock);
5702 INIT_LIST_HEAD(&file_priv->mm.request_list);
5703
5704 ret = i915_gem_context_open(dev, file);
5705 if (ret)
5706 kfree(file_priv);
5707
5708 return ret;
5709 }
5710
5711 /**
5712 * i915_gem_track_fb - update frontbuffer tracking
5713 * @old: current GEM buffer for the frontbuffer slots
5714 * @new: new GEM buffer for the frontbuffer slots
5715 * @frontbuffer_bits: bitmask of frontbuffer slots
5716 *
5717 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5718 * from @old and setting them in @new. Both @old and @new can be NULL.
5719 */
5720 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5721 struct drm_i915_gem_object *new,
5722 unsigned frontbuffer_bits)
5723 {
5724 if (old) {
5725 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5726 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5727 old->frontbuffer_bits &= ~frontbuffer_bits;
5728 }
5729
5730 if (new) {
5731 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5732 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5733 new->frontbuffer_bits |= frontbuffer_bits;
5734 }
5735 }
5736
5737 /* All the new VM stuff */
5738 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5739 struct i915_address_space *vm)
5740 {
5741 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5742 struct i915_vma *vma;
5743
5744 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5745
5746 list_for_each_entry(vma, &o->vma_list, vma_link) {
5747 if (i915_is_ggtt(vma->vm) &&
5748 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5749 continue;
5750 if (vma->vm == vm)
5751 return vma->node.start;
5752 }
5753
5754 WARN(1, "%s vma for this object not found.\n",
5755 i915_is_ggtt(vm) ? "global" : "ppgtt");
5756 return -1;
5757 }
5758
5759 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
5760 const struct i915_ggtt_view *view)
5761 {
5762 struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
5763 struct i915_vma *vma;
5764
5765 list_for_each_entry(vma, &o->vma_list, vma_link)
5766 if (vma->vm == ggtt &&
5767 i915_ggtt_view_equal(&vma->ggtt_view, view))
5768 return vma->node.start;
5769
5770 WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
5771 return -1;
5772 }
5773
5774 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5775 struct i915_address_space *vm)
5776 {
5777 struct i915_vma *vma;
5778
5779 list_for_each_entry(vma, &o->vma_list, vma_link) {
5780 if (i915_is_ggtt(vma->vm) &&
5781 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5782 continue;
5783 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5784 return true;
5785 }
5786
5787 return false;
5788 }
5789
5790 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
5791 const struct i915_ggtt_view *view)
5792 {
5793 struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
5794 struct i915_vma *vma;
5795
5796 list_for_each_entry(vma, &o->vma_list, vma_link)
5797 if (vma->vm == ggtt &&
5798 i915_ggtt_view_equal(&vma->ggtt_view, view) &&
5799 drm_mm_node_allocated(&vma->node))
5800 return true;
5801
5802 return false;
5803 }
5804
5805 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5806 {
5807 struct i915_vma *vma;
5808
5809 list_for_each_entry(vma, &o->vma_list, vma_link)
5810 if (drm_mm_node_allocated(&vma->node))
5811 return true;
5812
5813 return false;
5814 }
5815
5816 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5817 struct i915_address_space *vm)
5818 {
5819 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5820 struct i915_vma *vma;
5821
5822 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5823
5824 BUG_ON(list_empty(&o->vma_list));
5825
5826 list_for_each_entry(vma, &o->vma_list, vma_link) {
5827 if (i915_is_ggtt(vma->vm) &&
5828 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
5829 continue;
5830 if (vma->vm == vm)
5831 return vma->node.size;
5832 }
5833 return 0;
5834 }
5835
5836 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
5837 {
5838 struct i915_vma *vma;
5839 list_for_each_entry(vma, &obj->vma_list, vma_link)
5840 if (vma->pin_count > 0)
5841 return true;
5842
5843 return false;
5844 }
5845
5846 /* Allocate a new GEM object and fill it with the supplied data */
5847 struct drm_i915_gem_object *
5848 i915_gem_object_create_from_data(struct drm_device *dev,
5849 const void *data, size_t size)
5850 {
5851 struct drm_i915_gem_object *obj;
5852 #ifdef __NetBSD__
5853 struct iovec iov = { .iov_base = __UNCONST(data), .iov_len = size };
5854 struct uio uio = {
5855 .uio_iov = &iov,
5856 .uio_iovcnt = 1,
5857 .uio_offset = 0,
5858 .uio_resid = size,
5859 .uio_rw = UIO_WRITE,
5860 };
5861 #else
5862 struct sg_table *sg;
5863 #endif
5864 size_t bytes;
5865 int ret;
5866
5867 obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
5868 if (IS_ERR_OR_NULL(obj))
5869 return obj;
5870
5871 ret = i915_gem_object_set_to_cpu_domain(obj, true);
5872 if (ret)
5873 goto fail;
5874
5875 ret = i915_gem_object_get_pages(obj);
5876 if (ret)
5877 goto fail;
5878
5879 i915_gem_object_pin_pages(obj);
5880 #ifdef __NetBSD__
5881 UIO_SETUP_SYSSPACE(&uio);
5882 /* XXX errno NetBSD->Linux */
5883 ret = -ubc_uiomove(obj->base.filp, &uio, size, UVM_ADV_NORMAL,
5884 UBC_WRITE);
5885 if (ret)
5886 goto fail;
5887 bytes = size - uio.uio_resid;
5888 #else
5889 sg = obj->pages;
5890 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
5891 #endif
5892 i915_gem_object_unpin_pages(obj);
5893
5894 if (WARN_ON(bytes != size)) {
5895 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
5896 ret = -EFAULT;
5897 goto fail;
5898 }
5899
5900 return obj;
5901
5902 fail:
5903 drm_gem_object_unreference(&obj->base);
5904 return ERR_PTR(ret);
5905 }
5906