Home | History | Annotate | Line # | Download | only in gem
      1 /*	$NetBSD: i915_gem_internal.c,v 1.4 2021/12/19 11:33:30 riastradh Exp $	*/
      2 
      3 /*
      4  * SPDX-License-Identifier: MIT
      5  *
      6  * Copyright  2014-2016 Intel Corporation
      7  */
      8 
      9 #include <sys/cdefs.h>
     10 __KERNEL_RCSID(0, "$NetBSD: i915_gem_internal.c,v 1.4 2021/12/19 11:33:30 riastradh Exp $");
     11 
     12 #include <linux/scatterlist.h>
     13 #include <linux/slab.h>
     14 #include <linux/swiotlb.h>
     15 
     16 #include <drm/i915_drm.h>
     17 
     18 #include "i915_drv.h"
     19 #include "i915_gem.h"
     20 #include "i915_gem_object.h"
     21 #include "i915_scatterlist.h"
     22 #include "i915_utils.h"
     23 
     24 #ifndef __NetBSD__
     25 #define QUIET (__GFP_NORETRY | __GFP_NOWARN)
     26 #define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
     27 
     28 static void internal_free_pages(struct sg_table *st)
     29 {
     30 	struct scatterlist *sg;
     31 
     32 	for (sg = st->sgl; sg; sg = __sg_next(sg)) {
     33 		if (sg_page(sg))
     34 			__free_pages(sg_page(sg), get_order(sg->length));
     35 	}
     36 
     37 	sg_free_table(st);
     38 	kfree(st);
     39 }
     40 #endif
     41 
     42 static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
     43 {
     44 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
     45 #ifdef __NetBSD__
     46 	bus_dma_tag_t dmat = i915->drm.dmat;
     47 	struct sg_table *sgt = NULL;
     48 	size_t nsegs;
     49 	bool alloced = false, prepared = false;
     50 	int ret;
     51 
     52 	obj->mm.u.internal.rsegs = obj->mm.u.internal.nsegs = 0;
     53 
     54 	KASSERT(obj->mm.u.internal.segs == NULL);
     55 	nsegs = obj->base.size >> PAGE_SHIFT;
     56 	if (nsegs > INT_MAX ||
     57 	    nsegs > SIZE_MAX/sizeof(obj->mm.u.internal.segs[0])) {
     58 		ret = -ENOMEM;
     59 		goto out;
     60 	}
     61 	obj->mm.u.internal.segs = kmem_alloc(
     62 	    nsegs * sizeof(obj->mm.u.internal.segs[0]),
     63 	    KM_NOSLEEP);
     64 	if (obj->mm.u.internal.segs == NULL) {
     65 		ret = -ENOMEM;
     66 		goto out;
     67 	}
     68 	obj->mm.u.internal.nsegs = nsegs;
     69 
     70 	/* XXX errno NetBSD->Linux */
     71 	ret = -bus_dmamem_alloc(dmat, obj->base.size, PAGE_SIZE, 0,
     72 	    obj->mm.u.internal.segs, nsegs, &obj->mm.u.internal.rsegs,
     73 	    BUS_DMA_NOWAIT);
     74 	if (ret)
     75 		goto out;
     76 
     77 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
     78 	if (sgt == NULL) {
     79 		ret = -ENOMEM;
     80 		goto out;
     81 	}
     82 	if (sg_alloc_table_from_bus_dmamem(sgt, dmat, obj->mm.u.internal.segs,
     83 		obj->mm.u.internal.rsegs, GFP_KERNEL)) {
     84 		ret = -ENOMEM;
     85 		goto out;
     86 	}
     87 	alloced = true;
     88 
     89 	ret = i915_gem_gtt_prepare_pages(obj, sgt);
     90 	if (ret)
     91 		goto out;
     92 	prepared = true;
     93 
     94 	obj->mm.madv = I915_MADV_DONTNEED;
     95 	__i915_gem_object_set_pages(obj, sgt, i915_sg_page_sizes(sgt->sgl));
     96 
     97 	return 0;
     98 
     99 out:	if (ret) {
    100 		if (prepared)
    101 			i915_gem_gtt_finish_pages(obj, sgt);
    102 		if (alloced)
    103 			sg_free_table(sgt);
    104 		if (sgt) {
    105 			kfree(sgt);
    106 			sgt = NULL;
    107 		}
    108 		if (obj->mm.u.internal.rsegs) {
    109 			bus_dmamem_free(dmat, obj->mm.u.internal.segs,
    110 			    obj->mm.u.internal.rsegs);
    111 			obj->mm.u.internal.rsegs = 0;
    112 		}
    113 		if (obj->mm.u.internal.nsegs) {
    114 			kmem_free(obj->mm.u.internal.segs,
    115 			    (obj->mm.u.internal.nsegs *
    116 				sizeof(obj->mm.u.internal.segs[0])));
    117 			obj->mm.u.internal.nsegs = 0;
    118 			obj->mm.u.internal.segs = NULL;
    119 		}
    120 	}
    121 	return ret;
    122 #else
    123 	struct sg_table *st;
    124 	struct scatterlist *sg;
    125 	unsigned int sg_page_sizes;
    126 	unsigned int npages;
    127 	int max_order;
    128 	gfp_t gfp;
    129 
    130 	max_order = MAX_ORDER;
    131 #ifdef CONFIG_SWIOTLB
    132 	if (swiotlb_nr_tbl()) {
    133 		unsigned int max_segment;
    134 
    135 		max_segment = swiotlb_max_segment();
    136 		if (max_segment) {
    137 			max_segment = max_t(unsigned int, max_segment,
    138 					    PAGE_SIZE) >> PAGE_SHIFT;
    139 			max_order = min(max_order, ilog2(max_segment));
    140 		}
    141 	}
    142 #endif
    143 
    144 	gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
    145 	if (IS_I965GM(i915) || IS_I965G(i915)) {
    146 		/* 965gm cannot relocate objects above 4GiB. */
    147 		gfp &= ~__GFP_HIGHMEM;
    148 		gfp |= __GFP_DMA32;
    149 	}
    150 
    151 create_st:
    152 	st = kmalloc(sizeof(*st), GFP_KERNEL);
    153 	if (!st)
    154 		return -ENOMEM;
    155 
    156 	npages = obj->base.size / PAGE_SIZE;
    157 	if (sg_alloc_table(st, npages, GFP_KERNEL)) {
    158 		kfree(st);
    159 		return -ENOMEM;
    160 	}
    161 
    162 	sg = st->sgl;
    163 	st->nents = 0;
    164 	sg_page_sizes = 0;
    165 
    166 	do {
    167 		int order = min(fls(npages) - 1, max_order);
    168 		struct page *page;
    169 
    170 		do {
    171 			page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
    172 					   order);
    173 			if (page)
    174 				break;
    175 			if (!order--)
    176 				goto err;
    177 
    178 			/* Limit subsequent allocations as well */
    179 			max_order = order;
    180 		} while (1);
    181 
    182 		sg_set_page(sg, page, PAGE_SIZE << order, 0);
    183 		sg_page_sizes |= PAGE_SIZE << order;
    184 		st->nents++;
    185 
    186 		npages -= 1 << order;
    187 		if (!npages) {
    188 			sg_mark_end(sg);
    189 			break;
    190 		}
    191 
    192 		sg = __sg_next(sg);
    193 	} while (1);
    194 
    195 	if (i915_gem_gtt_prepare_pages(obj, st)) {
    196 		/* Failed to dma-map try again with single page sg segments */
    197 		if (get_order(st->sgl->length)) {
    198 			internal_free_pages(st);
    199 			max_order = 0;
    200 			goto create_st;
    201 		}
    202 		goto err;
    203 	}
    204 
    205 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
    206 
    207 	return 0;
    208 
    209 err:
    210 	sg_set_page(sg, NULL, 0, 0);
    211 	sg_mark_end(sg);
    212 	internal_free_pages(st);
    213 
    214 	return -ENOMEM;
    215 #endif
    216 }
    217 
    218 static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
    219 					       struct sg_table *pages)
    220 {
    221 	i915_gem_gtt_finish_pages(obj, pages);
    222 #ifdef __NetBSD__
    223 	sg_free_table(pages);
    224 	kfree(pages);
    225 	bus_dmamem_free(obj->base.dev->dmat, obj->mm.u.internal.segs,
    226 	    obj->mm.u.internal.rsegs);
    227 	obj->mm.u.internal.rsegs = 0;
    228 	kmem_free(obj->mm.u.internal.segs,
    229 	    obj->mm.u.internal.nsegs * sizeof(obj->mm.u.internal.segs[0]));
    230 	obj->mm.u.internal.nsegs = 0;
    231 	obj->mm.u.internal.segs = NULL;
    232 #else
    233 	internal_free_pages(pages);
    234 #endif
    235 
    236 	obj->mm.dirty = false;
    237 }
    238 
    239 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
    240 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
    241 		 I915_GEM_OBJECT_IS_SHRINKABLE,
    242 	.get_pages = i915_gem_object_get_pages_internal,
    243 	.put_pages = i915_gem_object_put_pages_internal,
    244 };
    245 
    246 /**
    247  * i915_gem_object_create_internal: create an object with volatile pages
    248  * @i915: the i915 device
    249  * @size: the size in bytes of backing storage to allocate for the object
    250  *
    251  * Creates a new object that wraps some internal memory for private use.
    252  * This object is not backed by swappable storage, and as such its contents
    253  * are volatile and only valid whilst pinned. If the object is reaped by the
    254  * shrinker, its pages and data will be discarded. Equally, it is not a full
    255  * GEM object and so not valid for access from userspace. This makes it useful
    256  * for hardware interfaces like ringbuffers (which are pinned from the time
    257  * the request is written to the time the hardware stops accessing it), but
    258  * not for contexts (which need to be preserved when not active for later
    259  * reuse). Note that it is not cleared upon allocation.
    260  */
    261 struct drm_i915_gem_object *
    262 i915_gem_object_create_internal(struct drm_i915_private *i915,
    263 				phys_addr_t size)
    264 {
    265 	static struct lock_class_key lock_class;
    266 	struct drm_i915_gem_object *obj;
    267 	unsigned int cache_level;
    268 
    269 	GEM_BUG_ON(!size);
    270 	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
    271 
    272 	if (overflows_type(size, obj->base.size))
    273 		return ERR_PTR(-E2BIG);
    274 
    275 	obj = i915_gem_object_alloc();
    276 	if (!obj)
    277 		return ERR_PTR(-ENOMEM);
    278 
    279 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
    280 	i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class);
    281 
    282 	/*
    283 	 * Mark the object as volatile, such that the pages are marked as
    284 	 * dontneed whilst they are still pinned. As soon as they are unpinned
    285 	 * they are allowed to be reaped by the shrinker, and the caller is
    286 	 * expected to repopulate - the contents of this object are only valid
    287 	 * whilst active and pinned.
    288 	 */
    289 	i915_gem_object_set_volatile(obj);
    290 
    291 	obj->read_domains = I915_GEM_DOMAIN_CPU;
    292 	obj->write_domain = I915_GEM_DOMAIN_CPU;
    293 
    294 	cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
    295 	i915_gem_object_set_cache_coherency(obj, cache_level);
    296 
    297 	return obj;
    298 }
    299