17ec681f3Smrg/* 27ec681f3Smrg * Copyright © 2017 Intel Corporation 37ec681f3Smrg * 47ec681f3Smrg * Permission is hereby granted, free of charge, to any person obtaining a 57ec681f3Smrg * copy of this software and associated documentation files (the "Software"), 67ec681f3Smrg * to deal in the Software without restriction, including without limitation 77ec681f3Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 87ec681f3Smrg * and/or sell copies of the Software, and to permit persons to whom the 97ec681f3Smrg * Software is furnished to do so, subject to the following conditions: 107ec681f3Smrg * 117ec681f3Smrg * The above copyright notice and this permission notice (including the next 127ec681f3Smrg * paragraph) shall be included in all copies or substantial portions of the 137ec681f3Smrg * Software. 147ec681f3Smrg * 157ec681f3Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 167ec681f3Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 177ec681f3Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 187ec681f3Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 197ec681f3Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 207ec681f3Smrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 217ec681f3Smrg * IN THE SOFTWARE. 227ec681f3Smrg */ 237ec681f3Smrg 247ec681f3Smrg#ifndef INTEL_CLFLUSH_H 257ec681f3Smrg#define INTEL_CLFLUSH_H 267ec681f3Smrg 277ec681f3Smrg#define CACHELINE_SIZE 64 287ec681f3Smrg#define CACHELINE_MASK 63 297ec681f3Smrg 307ec681f3Smrgstatic inline void 317ec681f3Smrgintel_clflush_range(void *start, size_t size) 327ec681f3Smrg{ 337ec681f3Smrg void *p = (void *) (((uintptr_t) start) & ~CACHELINE_MASK); 347ec681f3Smrg void *end = start + size; 357ec681f3Smrg 367ec681f3Smrg while (p < end) { 377ec681f3Smrg __builtin_ia32_clflush(p); 387ec681f3Smrg p += CACHELINE_SIZE; 397ec681f3Smrg } 407ec681f3Smrg} 417ec681f3Smrg 427ec681f3Smrgstatic inline void 437ec681f3Smrgintel_flush_range(void *start, size_t size) 447ec681f3Smrg{ 457ec681f3Smrg __builtin_ia32_mfence(); 467ec681f3Smrg intel_clflush_range(start, size); 477ec681f3Smrg} 487ec681f3Smrg 497ec681f3Smrgstatic inline void 507ec681f3Smrgintel_invalidate_range(void *start, size_t size) 517ec681f3Smrg{ 527ec681f3Smrg intel_clflush_range(start, size); 537ec681f3Smrg 547ec681f3Smrg /* Modern Atom CPUs (Baytrail+) have issues with clflush serialization, 557ec681f3Smrg * where mfence is not a sufficient synchronization barrier. We must 567ec681f3Smrg * double clflush the last cacheline. This guarantees it will be ordered 577ec681f3Smrg * after the preceding clflushes, and then the mfence guards against 587ec681f3Smrg * prefetches crossing the clflush boundary. 597ec681f3Smrg * 607ec681f3Smrg * See kernel commit 396f5d62d1a5fd99421855a08ffdef8edb43c76e 617ec681f3Smrg * ("drm: Restore double clflush on the last partial cacheline") 627ec681f3Smrg * and https://bugs.freedesktop.org/show_bug.cgi?id=92845. 637ec681f3Smrg */ 647ec681f3Smrg __builtin_ia32_clflush(start + size - 1); 657ec681f3Smrg __builtin_ia32_mfence(); 667ec681f3Smrg} 677ec681f3Smrg 687ec681f3Smrg#endif 69