1 /* $NetBSD: scatterlist.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $ */ 2 3 /* 4 * Copyright 2016 Intel Corporation 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 * IN THE SOFTWARE. 24 */ 25 26 #include <sys/cdefs.h> 27 __KERNEL_RCSID(0, "$NetBSD: scatterlist.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $"); 28 29 #include <linux/prime_numbers.h> 30 #include <linux/random.h> 31 32 #include "i915_selftest.h" 33 #include "i915_utils.h" 34 35 #define PFN_BIAS (1 << 10) 36 37 struct pfn_table { 38 struct sg_table st; 39 unsigned long start, end; 40 }; 41 42 typedef unsigned int (*npages_fn_t)(unsigned long n, 43 unsigned long count, 44 struct rnd_state *rnd); 45 46 static noinline int expect_pfn_sg(struct pfn_table *pt, 47 npages_fn_t npages_fn, 48 struct rnd_state *rnd, 49 const char *who, 50 unsigned long timeout) 51 { 52 struct scatterlist *sg; 53 unsigned long pfn, n; 54 55 pfn = pt->start; 56 for_each_sg(pt->st.sgl, sg, pt->st.nents, n) { 57 struct page *page = sg_page(sg); 58 unsigned int npages = npages_fn(n, pt->st.nents, rnd); 59 60 if (page_to_pfn(page) != pfn) { 61 pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg)\n", 62 __func__, who, pfn, page_to_pfn(page)); 63 return -EINVAL; 64 } 65 66 if (sg->length != npages * PAGE_SIZE) { 67 pr_err("%s: %s copied wrong sg length, expected size %lu, found %u (using for_each_sg)\n", 68 __func__, who, npages * PAGE_SIZE, sg->length); 69 return -EINVAL; 70 } 71 72 if (igt_timeout(timeout, "%s timed out\n", who)) 73 return -EINTR; 74 75 pfn += npages; 76 } 77 if (pfn != pt->end) { 78 pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n", 79 __func__, who, pt->end, pfn); 80 return -EINVAL; 81 } 82 83 return 0; 84 } 85 86 static noinline int expect_pfn_sg_page_iter(struct pfn_table *pt, 87 const char *who, 88 unsigned long timeout) 89 { 90 struct sg_page_iter sgiter; 91 unsigned long pfn; 92 93 pfn = pt->start; 94 for_each_sg_page(pt->st.sgl, &sgiter, pt->st.nents, 0) { 95 struct page *page = sg_page_iter_page(&sgiter); 96 97 if (page != pfn_to_page(pfn)) { 98 pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg_page)\n", 99 __func__, who, pfn, page_to_pfn(page)); 100 return -EINVAL; 101 } 102 103 if (igt_timeout(timeout, "%s timed out\n", who)) 104 return -EINTR; 105 106 pfn++; 107 } 108 if (pfn != pt->end) { 109 pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n", 110 __func__, who, pt->end, pfn); 111 return -EINVAL; 112 } 113 114 return 0; 115 } 116 117 static noinline int expect_pfn_sgtiter(struct pfn_table *pt, 118 const char *who, 119 unsigned long timeout) 120 { 121 struct sgt_iter sgt; 122 struct page *page; 123 unsigned long pfn; 124 125 pfn = pt->start; 126 for_each_sgt_page(page, sgt, &pt->st) { 127 if (page != pfn_to_page(pfn)) { 128 pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sgt_page)\n", 129 __func__, who, pfn, page_to_pfn(page)); 130 return -EINVAL; 131 } 132 133 if (igt_timeout(timeout, "%s timed out\n", who)) 134 return -EINTR; 135 136 pfn++; 137 } 138 if (pfn != pt->end) { 139 pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n", 140 __func__, who, pt->end, pfn); 141 return -EINVAL; 142 } 143 144 return 0; 145 } 146 147 static int expect_pfn_sgtable(struct pfn_table *pt, 148 npages_fn_t npages_fn, 149 struct rnd_state *rnd, 150 const char *who, 151 unsigned long timeout) 152 { 153 int err; 154 155 err = expect_pfn_sg(pt, npages_fn, rnd, who, timeout); 156 if (err) 157 return err; 158 159 err = expect_pfn_sg_page_iter(pt, who, timeout); 160 if (err) 161 return err; 162 163 err = expect_pfn_sgtiter(pt, who, timeout); 164 if (err) 165 return err; 166 167 return 0; 168 } 169 170 static unsigned int one(unsigned long n, 171 unsigned long count, 172 struct rnd_state *rnd) 173 { 174 return 1; 175 } 176 177 static unsigned int grow(unsigned long n, 178 unsigned long count, 179 struct rnd_state *rnd) 180 { 181 return n + 1; 182 } 183 184 static unsigned int shrink(unsigned long n, 185 unsigned long count, 186 struct rnd_state *rnd) 187 { 188 return count - n; 189 } 190 191 static unsigned int random(unsigned long n, 192 unsigned long count, 193 struct rnd_state *rnd) 194 { 195 return 1 + (prandom_u32_state(rnd) % 1024); 196 } 197 198 static unsigned int random_page_size_pages(unsigned long n, 199 unsigned long count, 200 struct rnd_state *rnd) 201 { 202 /* 4K, 64K, 2M */ 203 static unsigned int page_count[] = { 204 BIT(12) >> PAGE_SHIFT, 205 BIT(16) >> PAGE_SHIFT, 206 BIT(21) >> PAGE_SHIFT, 207 }; 208 209 return page_count[(prandom_u32_state(rnd) % 3)]; 210 } 211 212 static inline bool page_contiguous(struct page *first, 213 struct page *last, 214 unsigned long npages) 215 { 216 return first + npages == last; 217 } 218 219 static int alloc_table(struct pfn_table *pt, 220 unsigned long count, unsigned long max, 221 npages_fn_t npages_fn, 222 struct rnd_state *rnd, 223 int alloc_error) 224 { 225 struct scatterlist *sg; 226 unsigned long n, pfn; 227 228 if (sg_alloc_table(&pt->st, max, 229 GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN)) 230 return alloc_error; 231 232 /* count should be less than 20 to prevent overflowing sg->length */ 233 GEM_BUG_ON(overflows_type(count * PAGE_SIZE, sg->length)); 234 235 /* Construct a table where each scatterlist contains different number 236 * of entries. The idea is to check that we can iterate the individual 237 * pages from inside the coalesced lists. 238 */ 239 pt->start = PFN_BIAS; 240 pfn = pt->start; 241 sg = pt->st.sgl; 242 for (n = 0; n < count; n++) { 243 unsigned long npages = npages_fn(n, count, rnd); 244 245 /* Nobody expects the Sparse Memmap! */ 246 if (!page_contiguous(pfn_to_page(pfn), 247 pfn_to_page(pfn + npages), 248 npages)) { 249 sg_free_table(&pt->st); 250 return -ENOSPC; 251 } 252 253 if (n) 254 sg = sg_next(sg); 255 sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0); 256 257 GEM_BUG_ON(page_to_pfn(sg_page(sg)) != pfn); 258 GEM_BUG_ON(sg->length != npages * PAGE_SIZE); 259 GEM_BUG_ON(sg->offset != 0); 260 261 pfn += npages; 262 } 263 sg_mark_end(sg); 264 pt->st.nents = n; 265 pt->end = pfn; 266 267 return 0; 268 } 269 270 static const npages_fn_t npages_funcs[] = { 271 one, 272 grow, 273 shrink, 274 random, 275 random_page_size_pages, 276 NULL, 277 }; 278 279 static int igt_sg_alloc(void *ignored) 280 { 281 IGT_TIMEOUT(end_time); 282 const unsigned long max_order = 20; /* approximating a 4GiB object */ 283 struct rnd_state prng; 284 unsigned long prime; 285 int alloc_error = -ENOMEM; 286 287 for_each_prime_number(prime, max_order) { 288 unsigned long size = BIT(prime); 289 int offset; 290 291 for (offset = -1; offset <= 1; offset++) { 292 unsigned long sz = size + offset; 293 const npages_fn_t *npages; 294 struct pfn_table pt; 295 int err; 296 297 for (npages = npages_funcs; *npages; npages++) { 298 prandom_seed_state(&prng, 299 i915_selftest.random_seed); 300 err = alloc_table(&pt, sz, sz, *npages, &prng, 301 alloc_error); 302 if (err == -ENOSPC) 303 break; 304 if (err) 305 return err; 306 307 prandom_seed_state(&prng, 308 i915_selftest.random_seed); 309 err = expect_pfn_sgtable(&pt, *npages, &prng, 310 "sg_alloc_table", 311 end_time); 312 sg_free_table(&pt.st); 313 if (err) 314 return err; 315 } 316 } 317 318 /* Test at least one continuation before accepting oom */ 319 if (size > SG_MAX_SINGLE_ALLOC) 320 alloc_error = -ENOSPC; 321 } 322 323 return 0; 324 } 325 326 static int igt_sg_trim(void *ignored) 327 { 328 IGT_TIMEOUT(end_time); 329 const unsigned long max = PAGE_SIZE; /* not prime! */ 330 struct pfn_table pt; 331 unsigned long prime; 332 int alloc_error = -ENOMEM; 333 334 for_each_prime_number(prime, max) { 335 const npages_fn_t *npages; 336 int err; 337 338 for (npages = npages_funcs; *npages; npages++) { 339 struct rnd_state prng; 340 341 prandom_seed_state(&prng, i915_selftest.random_seed); 342 err = alloc_table(&pt, prime, max, *npages, &prng, 343 alloc_error); 344 if (err == -ENOSPC) 345 break; 346 if (err) 347 return err; 348 349 if (i915_sg_trim(&pt.st)) { 350 if (pt.st.orig_nents != prime || 351 pt.st.nents != prime) { 352 pr_err("i915_sg_trim failed (nents %u, orig_nents %u), expected %lu\n", 353 pt.st.nents, pt.st.orig_nents, prime); 354 err = -EINVAL; 355 } else { 356 prandom_seed_state(&prng, 357 i915_selftest.random_seed); 358 err = expect_pfn_sgtable(&pt, 359 *npages, &prng, 360 "i915_sg_trim", 361 end_time); 362 } 363 } 364 sg_free_table(&pt.st); 365 if (err) 366 return err; 367 } 368 369 /* Test at least one continuation before accepting oom */ 370 if (prime > SG_MAX_SINGLE_ALLOC) 371 alloc_error = -ENOSPC; 372 } 373 374 return 0; 375 } 376 377 int scatterlist_mock_selftests(void) 378 { 379 static const struct i915_subtest tests[] = { 380 SUBTEST(igt_sg_alloc), 381 SUBTEST(igt_sg_trim), 382 }; 383 384 return i915_subtests(tests, NULL); 385 } 386