compiler.h revision 904cdfd2
1/* 2 * Copyright 1990,91 by Thomas Roell, Dinkelscherben, Germany. 3 * 4 * Permission to use, copy, modify, distribute, and sell this software and its 5 * documentation for any purpose is hereby granted without fee, provided that 6 * the above copyright notice appear in all copies and that both that 7 * copyright notice and this permission notice appear in supporting 8 * documentation, and that the name of Thomas Roell not be used in 9 * advertising or publicity pertaining to distribution of the software without 10 * specific, written prior permission. Thomas Roell makes no representations 11 * about the suitability of this software for any purpose. It is provided 12 * "as is" without express or implied warranty. 13 * 14 * THOMAS ROELL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 16 * EVENT SHALL THOMAS ROELL BE LIABLE FOR ANY SPECIAL, INDIRECT OR 17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, 18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR 20 * PERFORMANCE OF THIS SOFTWARE. 21 * 22 */ 23/* 24 * Copyright (c) 1994-2003 by The XFree86 Project, Inc. 25 * 26 * Permission is hereby granted, free of charge, to any person obtaining a 27 * copy of this software and associated documentation files (the "Software"), 28 * to deal in the Software without restriction, including without limitation 29 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 30 * and/or sell copies of the Software, and to permit persons to whom the 31 * Software is furnished to do so, subject to the following conditions: 32 * 33 * The above copyright notice and this permission notice shall be included in 34 * all copies or substantial portions of the Software. 35 * 36 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 37 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 38 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 39 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 40 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 41 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 42 * OTHER DEALINGS IN THE SOFTWARE. 43 * 44 * Except as contained in this notice, the name of the copyright holder(s) 45 * and author(s) shall not be used in advertising or otherwise to promote 46 * the sale, use or other dealings in this Software without prior written 47 * authorization from the copyright holder(s) and author(s). 48 */ 49 50#ifndef _COMPILER_H 51 52# define _COMPILER_H 53 54#if defined(__SUNPRO_C) 55# define DO_PROTOTYPES 56#endif 57 58/* Allow drivers to use the GCC-supported __inline__ and/or __inline. */ 59# ifndef __inline__ 60# if defined(__GNUC__) 61 /* gcc has __inline__ */ 62# elif defined(__HIGHC__) 63# define __inline__ _Inline 64# else 65# define __inline__ /**/ 66# endif 67# endif /* __inline__ */ 68# ifndef __inline 69# if defined(__GNUC__) 70 /* gcc has __inline */ 71# elif defined(__HIGHC__) 72# define __inline _Inline 73# else 74# define __inline /**/ 75# endif 76# endif /* __inline */ 77 78# if defined(IODEBUG) && defined(__GNUC__) 79# define outb RealOutb 80# define outw RealOutw 81# define outl RealOutl 82# define inb RealInb 83# define inw RealInw 84# define inl RealInl 85# endif 86 87# if defined(QNX4) /* Do this for now to keep Watcom happy */ 88# define outb outp 89# define outw outpw 90# define outl outpd 91# define inb inp 92# define inw inpw 93# define inl inpd 94 95/* Define the ffs function for inlining */ 96extern int ffs(unsigned long); 97# pragma aux ffs_ = \ 98 "bsf edx, eax" \ 99 "jnz bits_set" \ 100 "xor eax, eax" \ 101 "jmp exit1" \ 102 "bits_set:" \ 103 "mov eax, edx" \ 104 "inc eax" \ 105 "exit1:" \ 106 __parm [eax] \ 107 __modify [eax edx] \ 108 __value [eax] \ 109 ; 110# endif 111 112# if defined(__SUNPRO_C) 113# define DO_PROTOTYPES 114# endif 115 116# if defined(NO_INLINE) || defined(DO_PROTOTYPES) 117 118# if !defined(__arm__) 119# if !defined(__sparc__) && !defined(__sparc) && !defined(__arm32__) \ 120 && !(defined(__alpha__) && defined(linux)) \ 121 && !(defined(__ia64__) && defined(linux)) \ 122 123extern void outb(unsigned short, unsigned char); 124extern void outw(unsigned short, unsigned short); 125extern void outl(unsigned short, unsigned int); 126extern unsigned int inb(unsigned short); 127extern unsigned int inw(unsigned short); 128extern unsigned int inl(unsigned short); 129 130# else /* __sparc__, __arm32__, __alpha__*/ 131 132extern void outb(unsigned long, unsigned char); 133extern void outw(unsigned long, unsigned short); 134extern void outl(unsigned long, unsigned int); 135extern unsigned int inb(unsigned long); 136extern unsigned int inw(unsigned long); 137extern unsigned int inl(unsigned long); 138 139# endif /* __sparc__, __arm32__, __alpha__ */ 140# endif /* __arm__ */ 141 142extern unsigned long ldq_u(unsigned long *); 143extern unsigned long ldl_u(unsigned int *); 144extern unsigned long ldw_u(unsigned short *); 145extern void stq_u(unsigned long, unsigned long *); 146extern void stl_u(unsigned long, unsigned int *); 147extern void stw_u(unsigned long, unsigned short *); 148extern void mem_barrier(void); 149extern void write_mem_barrier(void); 150extern void stl_brx(unsigned long, volatile unsigned char *, int); 151extern void stw_brx(unsigned short, volatile unsigned char *, int); 152extern unsigned long ldl_brx(volatile unsigned char *, int); 153extern unsigned short ldw_brx(volatile unsigned char *, int); 154 155# endif 156 157# ifndef NO_INLINE 158# ifdef __GNUC__ 159# if (defined(linux) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && (defined(__alpha__)) 160 161# ifdef linux 162/* for Linux on Alpha, we use the LIBC _inx/_outx routines */ 163/* note that the appropriate setup via "ioperm" needs to be done */ 164/* *before* any inx/outx is done. */ 165 166extern void (*_alpha_outb)(char val, unsigned long port); 167static __inline__ void 168outb(unsigned long port, unsigned char val) 169{ 170 _alpha_outb(val, port); 171} 172 173extern void (*_alpha_outw)(short val, unsigned long port); 174static __inline__ void 175outw(unsigned long port, unsigned short val) 176{ 177 _alpha_outw(val, port); 178} 179 180extern void (*_alpha_outl)(int val, unsigned long port); 181static __inline__ void 182outl(unsigned long port, unsigned int val) 183{ 184 _alpha_outl(val, port); 185} 186 187extern unsigned int (*_alpha_inb)(unsigned long port); 188static __inline__ unsigned int 189inb(unsigned long port) 190{ 191 return _alpha_inb(port); 192} 193 194extern unsigned int (*_alpha_inw)(unsigned long port); 195static __inline__ unsigned int 196inw(unsigned long port) 197{ 198 return _alpha_inw(port); 199} 200 201extern unsigned int (*_alpha_inl)(unsigned long port); 202static __inline__ unsigned int 203inl(unsigned long port) 204{ 205 return _alpha_inl(port); 206} 207 208# endif /* linux */ 209 210# if (defined(__FreeBSD__) || defined(__OpenBSD__)) \ 211 && !defined(DO_PROTOTYPES) 212 213/* for FreeBSD and OpenBSD on Alpha, we use the libio (resp. libalpha) */ 214/* inx/outx routines */ 215/* note that the appropriate setup via "ioperm" needs to be done */ 216/* *before* any inx/outx is done. */ 217 218extern void outb(unsigned int port, unsigned char val); 219extern void outw(unsigned int port, unsigned short val); 220extern void outl(unsigned int port, unsigned int val); 221extern unsigned char inb(unsigned int port); 222extern unsigned short inw(unsigned int port); 223extern unsigned int inl(unsigned int port); 224 225# endif /* (__FreeBSD__ || __OpenBSD__ ) && !DO_PROTOTYPES */ 226 227 228#if defined(__NetBSD__) 229#include <machine/pio.h> 230#endif /* __NetBSD__ */ 231 232/* 233 * inline functions to do unaligned accesses 234 * from linux/include/asm-alpha/unaligned.h 235 */ 236 237/* 238 * EGCS 1.1 knows about arbitrary unaligned loads. Define some 239 * packed structures to talk about such things with. 240 */ 241 242struct __una_u64 { unsigned long x __attribute__((packed)); }; 243struct __una_u32 { unsigned int x __attribute__((packed)); }; 244struct __una_u16 { unsigned short x __attribute__((packed)); }; 245 246/* 247 * Elemental unaligned loads 248 */ 249/* let's try making these things static */ 250 251static __inline__ unsigned long ldq_u(unsigned long * r11) 252{ 253# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91)) 254 const struct __una_u64 *ptr = (const struct __una_u64 *) r11; 255 return ptr->x; 256# else 257 unsigned long r1,r2; 258 __asm__("ldq_u %0,%3\n\t" 259 "ldq_u %1,%4\n\t" 260 "extql %0,%2,%0\n\t" 261 "extqh %1,%2,%1" 262 :"=&r" (r1), "=&r" (r2) 263 :"r" (r11), 264 "m" (*r11), 265 "m" (*(const unsigned long *)(7+(char *) r11))); 266 return r1 | r2; 267# endif 268} 269 270static __inline__ unsigned long ldl_u(unsigned int * r11) 271{ 272# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91)) 273 const struct __una_u32 *ptr = (const struct __una_u32 *) r11; 274 return ptr->x; 275# else 276 unsigned long r1,r2; 277 __asm__("ldq_u %0,%3\n\t" 278 "ldq_u %1,%4\n\t" 279 "extll %0,%2,%0\n\t" 280 "extlh %1,%2,%1" 281 :"=&r" (r1), "=&r" (r2) 282 :"r" (r11), 283 "m" (*r11), 284 "m" (*(const unsigned long *)(3+(char *) r11))); 285 return r1 | r2; 286# endif 287} 288 289static __inline__ unsigned long ldw_u(unsigned short * r11) 290{ 291# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91)) 292 const struct __una_u16 *ptr = (const struct __una_u16 *) r11; 293 return ptr->x; 294# else 295 unsigned long r1,r2; 296 __asm__("ldq_u %0,%3\n\t" 297 "ldq_u %1,%4\n\t" 298 "extwl %0,%2,%0\n\t" 299 "extwh %1,%2,%1" 300 :"=&r" (r1), "=&r" (r2) 301 :"r" (r11), 302 "m" (*r11), 303 "m" (*(const unsigned long *)(1+(char *) r11))); 304 return r1 | r2; 305# endif 306} 307 308/* 309 * Elemental unaligned stores 310 */ 311 312static __inline__ void stq_u(unsigned long r5, unsigned long * r11) 313{ 314# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91)) 315 struct __una_u64 *ptr = (struct __una_u64 *) r11; 316 ptr->x = r5; 317# else 318 unsigned long r1,r2,r3,r4; 319 320 __asm__("ldq_u %3,%1\n\t" 321 "ldq_u %2,%0\n\t" 322 "insqh %6,%7,%5\n\t" 323 "insql %6,%7,%4\n\t" 324 "mskqh %3,%7,%3\n\t" 325 "mskql %2,%7,%2\n\t" 326 "bis %3,%5,%3\n\t" 327 "bis %2,%4,%2\n\t" 328 "stq_u %3,%1\n\t" 329 "stq_u %2,%0" 330 :"=m" (*r11), 331 "=m" (*(unsigned long *)(7+(char *) r11)), 332 "=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4) 333 :"r" (r5), "r" (r11)); 334# endif 335} 336 337static __inline__ void stl_u(unsigned long r5, unsigned int * r11) 338{ 339# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91)) 340 struct __una_u32 *ptr = (struct __una_u32 *) r11; 341 ptr->x = r5; 342# else 343 unsigned long r1,r2,r3,r4; 344 345 __asm__("ldq_u %3,%1\n\t" 346 "ldq_u %2,%0\n\t" 347 "inslh %6,%7,%5\n\t" 348 "insll %6,%7,%4\n\t" 349 "msklh %3,%7,%3\n\t" 350 "mskll %2,%7,%2\n\t" 351 "bis %3,%5,%3\n\t" 352 "bis %2,%4,%2\n\t" 353 "stq_u %3,%1\n\t" 354 "stq_u %2,%0" 355 :"=m" (*r11), 356 "=m" (*(unsigned long *)(3+(char *) r11)), 357 "=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4) 358 :"r" (r5), "r" (r11)); 359# endif 360} 361 362static __inline__ void stw_u(unsigned long r5, unsigned short * r11) 363{ 364# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91)) 365 struct __una_u16 *ptr = (struct __una_u16 *) r11; 366 ptr->x = r5; 367# else 368 unsigned long r1,r2,r3,r4; 369 370 __asm__("ldq_u %3,%1\n\t" 371 "ldq_u %2,%0\n\t" 372 "inswh %6,%7,%5\n\t" 373 "inswl %6,%7,%4\n\t" 374 "mskwh %3,%7,%3\n\t" 375 "mskwl %2,%7,%2\n\t" 376 "bis %3,%5,%3\n\t" 377 "bis %2,%4,%2\n\t" 378 "stq_u %3,%1\n\t" 379 "stq_u %2,%0" 380 :"=m" (*r11), 381 "=m" (*(unsigned long *)(1+(char *) r11)), 382 "=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4) 383 :"r" (r5), "r" (r11)); 384# endif 385} 386 387/* to flush the I-cache before jumping to code which just got loaded */ 388# define PAL_imb 134 389# define istream_mem_barrier() \ 390 __asm__ __volatile__("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") 391# define mem_barrier() __asm__ __volatile__("mb" : : : "memory") 392# ifdef __ELF__ 393# define write_mem_barrier() __asm__ __volatile__("wmb" : : : "memory") 394# else /* ECOFF gas 2.6 doesn't know "wmb" :-( */ 395# define write_mem_barrier() mem_barrier() 396# endif 397 398 399# elif defined(linux) && defined(__ia64__) 400 401# include <inttypes.h> 402 403# include <sys/io.h> 404 405struct __una_u64 { uint64_t x __attribute__((packed)); }; 406struct __una_u32 { uint32_t x __attribute__((packed)); }; 407struct __una_u16 { uint16_t x __attribute__((packed)); }; 408 409static __inline__ unsigned long 410__uldq (const unsigned long * r11) 411{ 412 const struct __una_u64 *ptr = (const struct __una_u64 *) r11; 413 return ptr->x; 414} 415 416static __inline__ unsigned long 417__uldl (const unsigned int * r11) 418{ 419 const struct __una_u32 *ptr = (const struct __una_u32 *) r11; 420 return ptr->x; 421} 422 423static __inline__ unsigned long 424__uldw (const unsigned short * r11) 425{ 426 const struct __una_u16 *ptr = (const struct __una_u16 *) r11; 427 return ptr->x; 428} 429 430static __inline__ void 431__ustq (unsigned long r5, unsigned long * r11) 432{ 433 struct __una_u64 *ptr = (struct __una_u64 *) r11; 434 ptr->x = r5; 435} 436 437static __inline__ void 438__ustl (unsigned long r5, unsigned int * r11) 439{ 440 struct __una_u32 *ptr = (struct __una_u32 *) r11; 441 ptr->x = r5; 442} 443 444static __inline__ void 445__ustw (unsigned long r5, unsigned short * r11) 446{ 447 struct __una_u16 *ptr = (struct __una_u16 *) r11; 448 ptr->x = r5; 449} 450 451# define ldq_u(p) __uldq(p) 452# define ldl_u(p) __uldl(p) 453# define ldw_u(p) __uldw(p) 454# define stq_u(v,p) __ustq(v,p) 455# define stl_u(v,p) __ustl(v,p) 456# define stw_u(v,p) __ustw(v,p) 457 458# ifndef __INTEL_COMPILER 459# define mem_barrier() __asm__ __volatile__ ("mf" ::: "memory") 460# define write_mem_barrier() __asm__ __volatile__ ("mf" ::: "memory") 461# else 462# include "ia64intrin.h" 463# define mem_barrier() __mf() 464# define write_mem_barrier() __mf() 465# endif 466 467/* 468 * This is overkill, but for different reasons depending on where it is used. 469 * This is thus general enough to be used everywhere cache flushes are needed. 470 * It doesn't handle memory access serialisation by other processors, though. 471 */ 472# ifndef __INTEL_COMPILER 473# define ia64_flush_cache(Addr) \ 474 __asm__ __volatile__ ( \ 475 "fc.i %0;;;" \ 476 "sync.i;;;" \ 477 "mf;;;" \ 478 "srlz.i;;;" \ 479 :: "r"(Addr) : "memory") 480# else 481# define ia64_flush_cache(Addr) { \ 482 __fc(Addr);\ 483 __synci();\ 484 __mf();\ 485 __isrlz();\ 486 } 487# endif 488# undef outb 489# undef outw 490# undef outl 491# undef inb 492# undef inw 493# undef inl 494extern void outb(unsigned long port, unsigned char val); 495extern void outw(unsigned long port, unsigned short val); 496extern void outl(unsigned long port, unsigned int val); 497extern unsigned int inb(unsigned long port); 498extern unsigned int inw(unsigned long port); 499extern unsigned int inl(unsigned long port); 500 501# elif defined(linux) && defined(__amd64__) 502 503# include <inttypes.h> 504 505# define ldq_u(p) (*((unsigned long *)(p))) 506# define ldl_u(p) (*((unsigned int *)(p))) 507# define ldw_u(p) (*((unsigned short *)(p))) 508# define stq_u(v,p) (*(unsigned long *)(p)) = (v) 509# define stl_u(v,p) (*(unsigned int *)(p)) = (v) 510# define stw_u(v,p) (*(unsigned short *)(p)) = (v) 511 512# define mem_barrier() \ 513 __asm__ __volatile__ ("lock; addl $0,0(%%rsp)": : :"memory") 514# define write_mem_barrier() \ 515 __asm__ __volatile__ ("": : :"memory") 516 517 518static __inline__ void 519outb(unsigned short port, unsigned char val) 520{ 521 __asm__ __volatile__("outb %0,%1" : :"a" (val), "d" (port)); 522} 523 524 525static __inline__ void 526outw(unsigned short port, unsigned short val) 527{ 528 __asm__ __volatile__("outw %0,%1" : :"a" (val), "d" (port)); 529} 530 531static __inline__ void 532outl(unsigned short port, unsigned int val) 533{ 534 __asm__ __volatile__("outl %0,%1" : :"a" (val), "d" (port)); 535} 536 537static __inline__ unsigned int 538inb(unsigned short port) 539{ 540 unsigned char ret; 541 __asm__ __volatile__("inb %1,%0" : 542 "=a" (ret) : 543 "d" (port)); 544 return ret; 545} 546 547static __inline__ unsigned int 548inw(unsigned short port) 549{ 550 unsigned short ret; 551 __asm__ __volatile__("inw %1,%0" : 552 "=a" (ret) : 553 "d" (port)); 554 return ret; 555} 556 557static __inline__ unsigned int 558inl(unsigned short port) 559{ 560 unsigned int ret; 561 __asm__ __volatile__("inl %1,%0" : 562 "=a" (ret) : 563 "d" (port)); 564 return ret; 565} 566 567# elif (defined(linux) || defined(Lynx) || defined(sun) || \ 568 defined(__OpenBSD__) || defined(__NetBSD__) || \ 569 defined(__FreeBSD__)) && defined(__sparc__) 570 571# if !defined(Lynx) 572# ifndef ASI_PL 573# define ASI_PL 0x88 574# endif 575 576# define barrier() __asm__ __volatile__(".word 0x8143e00a": : :"memory") 577 578static __inline__ void 579outb(unsigned long port, unsigned char val) 580{ 581 __asm__ __volatile__("stba %0, [%1] %2" 582 : /* No outputs */ 583 : "r" (val), "r" (port), "i" (ASI_PL)); 584 barrier(); 585} 586 587static __inline__ void 588outw(unsigned long port, unsigned short val) 589{ 590 __asm__ __volatile__("stha %0, [%1] %2" 591 : /* No outputs */ 592 : "r" (val), "r" (port), "i" (ASI_PL)); 593 barrier(); 594} 595 596static __inline__ void 597outl(unsigned long port, unsigned int val) 598{ 599 __asm__ __volatile__("sta %0, [%1] %2" 600 : /* No outputs */ 601 : "r" (val), "r" (port), "i" (ASI_PL)); 602 barrier(); 603} 604 605static __inline__ unsigned int 606inb(unsigned long port) 607{ 608 unsigned int ret; 609 __asm__ __volatile__("lduba [%1] %2, %0" 610 : "=r" (ret) 611 : "r" (port), "i" (ASI_PL)); 612 return ret; 613} 614 615static __inline__ unsigned int 616inw(unsigned long port) 617{ 618 unsigned int ret; 619 __asm__ __volatile__("lduha [%1] %2, %0" 620 : "=r" (ret) 621 : "r" (port), "i" (ASI_PL)); 622 return ret; 623} 624 625static __inline__ unsigned int 626inl(unsigned long port) 627{ 628 unsigned int ret; 629 __asm__ __volatile__("lda [%1] %2, %0" 630 : "=r" (ret) 631 : "r" (port), "i" (ASI_PL)); 632 return ret; 633} 634 635static __inline__ unsigned char 636xf86ReadMmio8(__volatile__ void *base, const unsigned long offset) 637{ 638 unsigned long addr = ((unsigned long)base) + offset; 639 unsigned char ret; 640 641 __asm__ __volatile__("lduba [%1] %2, %0" 642 : "=r" (ret) 643 : "r" (addr), "i" (ASI_PL)); 644 return ret; 645} 646 647static __inline__ unsigned short 648xf86ReadMmio16Be(__volatile__ void *base, const unsigned long offset) 649{ 650 unsigned long addr = ((unsigned long)base) + offset; 651 unsigned short ret; 652 653 __asm__ __volatile__("lduh [%1], %0" 654 : "=r" (ret) 655 : "r" (addr)); 656 return ret; 657} 658 659static __inline__ unsigned short 660xf86ReadMmio16Le(__volatile__ void *base, const unsigned long offset) 661{ 662 unsigned long addr = ((unsigned long)base) + offset; 663 unsigned short ret; 664 665 __asm__ __volatile__("lduha [%1] %2, %0" 666 : "=r" (ret) 667 : "r" (addr), "i" (ASI_PL)); 668 return ret; 669} 670 671static __inline__ unsigned int 672xf86ReadMmio32Be(__volatile__ void *base, const unsigned long offset) 673{ 674 unsigned long addr = ((unsigned long)base) + offset; 675 unsigned int ret; 676 677 __asm__ __volatile__("ld [%1], %0" 678 : "=r" (ret) 679 : "r" (addr)); 680 return ret; 681} 682 683static __inline__ unsigned int 684xf86ReadMmio32Le(__volatile__ void *base, const unsigned long offset) 685{ 686 unsigned long addr = ((unsigned long)base) + offset; 687 unsigned int ret; 688 689 __asm__ __volatile__("lda [%1] %2, %0" 690 : "=r" (ret) 691 : "r" (addr), "i" (ASI_PL)); 692 return ret; 693} 694 695static __inline__ void 696xf86WriteMmio8(__volatile__ void *base, const unsigned long offset, 697 const unsigned int val) 698{ 699 unsigned long addr = ((unsigned long)base) + offset; 700 701 __asm__ __volatile__("stba %0, [%1] %2" 702 : /* No outputs */ 703 : "r" (val), "r" (addr), "i" (ASI_PL)); 704 barrier(); 705} 706 707static __inline__ void 708xf86WriteMmio16Be(__volatile__ void *base, const unsigned long offset, 709 const unsigned int val) 710{ 711 unsigned long addr = ((unsigned long)base) + offset; 712 713 __asm__ __volatile__("sth %0, [%1]" 714 : /* No outputs */ 715 : "r" (val), "r" (addr)); 716 barrier(); 717} 718 719static __inline__ void 720xf86WriteMmio16Le(__volatile__ void *base, const unsigned long offset, 721 const unsigned int val) 722{ 723 unsigned long addr = ((unsigned long)base) + offset; 724 725 __asm__ __volatile__("stha %0, [%1] %2" 726 : /* No outputs */ 727 : "r" (val), "r" (addr), "i" (ASI_PL)); 728 barrier(); 729} 730 731static __inline__ void 732xf86WriteMmio32Be(__volatile__ void *base, const unsigned long offset, 733 const unsigned int val) 734{ 735 unsigned long addr = ((unsigned long)base) + offset; 736 737 __asm__ __volatile__("st %0, [%1]" 738 : /* No outputs */ 739 : "r" (val), "r" (addr)); 740 barrier(); 741} 742 743static __inline__ void 744xf86WriteMmio32Le(__volatile__ void *base, const unsigned long offset, 745 const unsigned int val) 746{ 747 unsigned long addr = ((unsigned long)base) + offset; 748 749 __asm__ __volatile__("sta %0, [%1] %2" 750 : /* No outputs */ 751 : "r" (val), "r" (addr), "i" (ASI_PL)); 752 barrier(); 753} 754 755static __inline__ void 756xf86WriteMmio8NB(__volatile__ void *base, const unsigned long offset, 757 const unsigned int val) 758{ 759 unsigned long addr = ((unsigned long)base) + offset; 760 761 __asm__ __volatile__("stba %0, [%1] %2" 762 : /* No outputs */ 763 : "r" (val), "r" (addr), "i" (ASI_PL)); 764} 765 766static __inline__ void 767xf86WriteMmio16BeNB(__volatile__ void *base, const unsigned long offset, 768 const unsigned int val) 769{ 770 unsigned long addr = ((unsigned long)base) + offset; 771 772 __asm__ __volatile__("sth %0, [%1]" 773 : /* No outputs */ 774 : "r" (val), "r" (addr)); 775} 776 777static __inline__ void 778xf86WriteMmio16LeNB(__volatile__ void *base, const unsigned long offset, 779 const unsigned int val) 780{ 781 unsigned long addr = ((unsigned long)base) + offset; 782 783 __asm__ __volatile__("stha %0, [%1] %2" 784 : /* No outputs */ 785 : "r" (val), "r" (addr), "i" (ASI_PL)); 786} 787 788static __inline__ void 789xf86WriteMmio32BeNB(__volatile__ void *base, const unsigned long offset, 790 const unsigned int val) 791{ 792 unsigned long addr = ((unsigned long)base) + offset; 793 794 __asm__ __volatile__("st %0, [%1]" 795 : /* No outputs */ 796 : "r" (val), "r" (addr)); 797} 798 799static __inline__ void 800xf86WriteMmio32LeNB(__volatile__ void *base, const unsigned long offset, 801 const unsigned int val) 802{ 803 unsigned long addr = ((unsigned long)base) + offset; 804 805 __asm__ __volatile__("sta %0, [%1] %2" 806 : /* No outputs */ 807 : "r" (val), "r" (addr), "i" (ASI_PL)); 808} 809 810# endif /* !Lynx */ 811 812/* 813 * EGCS 1.1 knows about arbitrary unaligned loads. Define some 814 * packed structures to talk about such things with. 815 */ 816 817# if defined(__arch64__) || defined(__sparcv9) 818struct __una_u64 { unsigned long x __attribute__((packed)); }; 819# endif 820struct __una_u32 { unsigned int x __attribute__((packed)); }; 821struct __una_u16 { unsigned short x __attribute__((packed)); }; 822 823static __inline__ unsigned long ldq_u(unsigned long *p) 824{ 825# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91)) 826# if defined(__arch64__) || defined(__sparcv9) 827 const struct __una_u64 *ptr = (const struct __una_u64 *) p; 828# else 829 const struct __una_u32 *ptr = (const struct __una_u32 *) p; 830# endif 831 return ptr->x; 832# else 833 unsigned long ret; 834 memmove(&ret, p, sizeof(*p)); 835 return ret; 836# endif 837} 838 839static __inline__ unsigned long ldl_u(unsigned int *p) 840{ 841# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91)) 842 const struct __una_u32 *ptr = (const struct __una_u32 *) p; 843 return ptr->x; 844# else 845 unsigned int ret; 846 memmove(&ret, p, sizeof(*p)); 847 return ret; 848# endif 849} 850 851static __inline__ unsigned long ldw_u(unsigned short *p) 852{ 853# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91)) 854 const struct __una_u16 *ptr = (const struct __una_u16 *) p; 855 return ptr->x; 856# else 857 unsigned short ret; 858 memmove(&ret, p, sizeof(*p)); 859 return ret; 860# endif 861} 862 863static __inline__ void stq_u(unsigned long val, unsigned long *p) 864{ 865# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91)) 866# if defined(__arch64__) || defined(__sparcv9) 867 struct __una_u64 *ptr = (struct __una_u64 *) p; 868# else 869 struct __una_u32 *ptr = (struct __una_u32 *) p; 870# endif 871 ptr->x = val; 872# else 873 unsigned long tmp = val; 874 memmove(p, &tmp, sizeof(*p)); 875# endif 876} 877 878static __inline__ void stl_u(unsigned long val, unsigned int *p) 879{ 880# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91)) 881 struct __una_u32 *ptr = (struct __una_u32 *) p; 882 ptr->x = val; 883# else 884 unsigned int tmp = val; 885 memmove(p, &tmp, sizeof(*p)); 886# endif 887} 888 889static __inline__ void stw_u(unsigned long val, unsigned short *p) 890{ 891# if defined(__GNUC__) && ((__GNUC__ > 2) || (__GNUC_MINOR__ >= 91)) 892 struct __una_u16 *ptr = (struct __una_u16 *) p; 893 ptr->x = val; 894# else 895 unsigned short tmp = val; 896 memmove(p, &tmp, sizeof(*p)); 897# endif 898} 899 900# define mem_barrier() /* XXX: nop for now */ 901# define write_mem_barrier() /* XXX: nop for now */ 902 903# elif defined(__mips__) || ((defined(__arm32__) || defined(__arm__)) && !defined(__linux__)) 904# if defined(__arm32__) || defined(__arm__) 905# define PORT_SIZE long 906# else 907# define PORT_SIZE short 908# endif 909 910unsigned int IOPortBase; /* Memory mapped I/O port area */ 911 912static __inline__ void 913outb(unsigned PORT_SIZE port, unsigned char val) 914{ 915 *(volatile unsigned char*)(((unsigned PORT_SIZE)(port))+IOPortBase) = val; 916} 917 918static __inline__ void 919outw(unsigned PORT_SIZE port, unsigned short val) 920{ 921 *(volatile unsigned short*)(((unsigned PORT_SIZE)(port))+IOPortBase) = val; 922} 923 924static __inline__ void 925outl(unsigned PORT_SIZE port, unsigned int val) 926{ 927 *(volatile unsigned int*)(((unsigned PORT_SIZE)(port))+IOPortBase) = val; 928} 929 930static __inline__ unsigned int 931inb(unsigned PORT_SIZE port) 932{ 933 return *(volatile unsigned char*)(((unsigned PORT_SIZE)(port))+IOPortBase); 934} 935 936static __inline__ unsigned int 937inw(unsigned PORT_SIZE port) 938{ 939 return *(volatile unsigned short*)(((unsigned PORT_SIZE)(port))+IOPortBase); 940} 941 942static __inline__ unsigned int 943inl(unsigned PORT_SIZE port) 944{ 945 return *(volatile unsigned int*)(((unsigned PORT_SIZE)(port))+IOPortBase); 946} 947 948 949# if defined(__mips__) 950static __inline__ unsigned long ldq_u(unsigned long * r11) 951{ 952 unsigned long r1; 953 __asm__("lwr %0,%2\n\t" 954 "lwl %0,%3\n\t" 955 :"=&r" (r1) 956 :"r" (r11), 957 "m" (*r11), 958 "m" (*(unsigned long *)(3+(char *) r11))); 959 return r1; 960} 961 962static __inline__ unsigned long ldl_u(unsigned int * r11) 963{ 964 unsigned long r1; 965 __asm__("lwr %0,%2\n\t" 966 "lwl %0,%3\n\t" 967 :"=&r" (r1) 968 :"r" (r11), 969 "m" (*r11), 970 "m" (*(unsigned long *)(3+(char *) r11))); 971 return r1; 972} 973 974static __inline__ unsigned long ldw_u(unsigned short * r11) 975{ 976 unsigned long r1; 977 __asm__("lwr %0,%2\n\t" 978 "lwl %0,%3\n\t" 979 :"=&r" (r1) 980 :"r" (r11), 981 "m" (*r11), 982 "m" (*(unsigned long *)(1+(char *) r11))); 983 return r1; 984} 985 986# ifdef linux /* don't mess with other OSs */ 987 988/* 989 * EGCS 1.1 knows about arbitrary unaligned loads (and we don't support older 990 * versions anyway. Define some packed structures to talk about such things 991 * with. 992 */ 993 994struct __una_u32 { unsigned int x __attribute__((packed)); }; 995struct __una_u16 { unsigned short x __attribute__((packed)); }; 996 997static __inline__ void stw_u(unsigned long val, unsigned short *p) 998{ 999 struct __una_u16 *ptr = (struct __una_u16 *) p; 1000 ptr->x = val; 1001} 1002 1003static __inline__ void stl_u(unsigned long val, unsigned int *p) 1004{ 1005 struct __una_u32 *ptr = (struct __una_u32 *) p; 1006 ptr->x = val; 1007} 1008 1009# if X_BYTE_ORDER == X_BIG_ENDIAN 1010static __inline__ unsigned int 1011xf86ReadMmio32Be(__volatile__ void *base, const unsigned long offset) 1012{ 1013 unsigned long addr = ((unsigned long)base) + offset; 1014 unsigned int ret; 1015 1016 __asm__ __volatile__("lw %0, 0(%1)" 1017 : "=r" (ret) 1018 : "r" (addr)); 1019 return ret; 1020} 1021 1022static __inline__ void 1023xf86WriteMmio32Be(__volatile__ void *base, const unsigned long offset, 1024 const unsigned int val) 1025{ 1026 unsigned long addr = ((unsigned long)base) + offset; 1027 1028 __asm__ __volatile__("sw %0, 0(%1)" 1029 : /* No outputs */ 1030 : "r" (val), "r" (addr)); 1031} 1032# endif 1033 1034# define mem_barrier() \ 1035 __asm__ __volatile__( \ 1036 "# prevent instructions being moved around\n\t" \ 1037 ".set\tnoreorder\n\t" \ 1038 "# 8 nops to fool the R4400 pipeline\n\t" \ 1039 "nop;nop;nop;nop;nop;nop;nop;nop\n\t" \ 1040 ".set\treorder" \ 1041 : /* no output */ \ 1042 : /* no input */ \ 1043 : "memory") 1044# define write_mem_barrier() mem_barrier() 1045 1046# else /* !linux */ 1047 1048# define stq_u(v,p) stl_u(v,p) 1049# define stl_u(v,p) (*(unsigned char *)(p)) = (v); \ 1050 (*(unsigned char *)(p)+1) = ((v) >> 8); \ 1051 (*(unsigned char *)(p)+2) = ((v) >> 16); \ 1052 (*(unsigned char *)(p)+3) = ((v) >> 24) 1053 1054# define stw_u(v,p) (*(unsigned char *)(p)) = (v); \ 1055 (*(unsigned char *)(p)+1) = ((v) >> 8) 1056 1057# define mem_barrier() /* NOP */ 1058# endif /* !linux */ 1059# endif /* __mips__ */ 1060 1061# if defined(__arm32__) || defined(__arm__) 1062# define ldq_u(p) (*((unsigned long *)(p))) 1063# define ldl_u(p) (*((unsigned int *)(p))) 1064# define ldw_u(p) (*((unsigned short *)(p))) 1065# define stq_u(v,p) (*(unsigned long *)(p)) = (v) 1066# define stl_u(v,p) (*(unsigned int *)(p)) = (v) 1067# define stw_u(v,p) (*(unsigned short *)(p)) = (v) 1068# define mem_barrier() /* NOP */ 1069# define write_mem_barrier() /* NOP */ 1070# endif /* __arm32__ || __arm__ */ 1071 1072# elif (defined(Lynx) || defined(linux) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__FreeBSD__)) && defined(__powerpc__) 1073 1074# ifndef MAP_FAILED 1075# define MAP_FAILED ((void *)-1) 1076# endif 1077 1078extern volatile unsigned char *ioBase; 1079 1080#if defined(linux) && defined(__powerpc64__) 1081# include <linux/version.h> 1082# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) 1083# include <asm/memory.h> 1084# endif 1085#endif /* defined(linux) && defined(__powerpc64__) */ 1086#ifndef eieio /* We deal with arch-specific eieio() routines above... */ 1087# define eieio() __asm__ __volatile__ ("eieio" ::: "memory") 1088#endif /* eieio */ 1089 1090static __inline__ unsigned char 1091xf86ReadMmio8(__volatile__ void *base, const unsigned long offset) 1092{ 1093 register unsigned char val; 1094 __asm__ __volatile__( 1095 "lbzx %0,%1,%2\n\t" 1096 "eieio" 1097 : "=r" (val) 1098 : "b" (base), "r" (offset), 1099 "m" (*((volatile unsigned char *)base+offset))); 1100 return val; 1101} 1102 1103static __inline__ unsigned short 1104xf86ReadMmio16Be(__volatile__ void *base, const unsigned long offset) 1105{ 1106 register unsigned short val; 1107 __asm__ __volatile__( 1108 "lhzx %0,%1,%2\n\t" 1109 "eieio" 1110 : "=r" (val) 1111 : "b" (base), "r" (offset), 1112 "m" (*((volatile unsigned char *)base+offset))); 1113 return val; 1114} 1115 1116static __inline__ unsigned short 1117xf86ReadMmio16Le(__volatile__ void *base, const unsigned long offset) 1118{ 1119 register unsigned short val; 1120 __asm__ __volatile__( 1121 "lhbrx %0,%1,%2\n\t" 1122 "eieio" 1123 : "=r" (val) 1124 : "b" (base), "r" (offset), 1125 "m" (*((volatile unsigned char *)base+offset))); 1126 return val; 1127} 1128 1129static __inline__ unsigned int 1130xf86ReadMmio32Be(__volatile__ void *base, const unsigned long offset) 1131{ 1132 register unsigned int val; 1133 __asm__ __volatile__( 1134 "lwzx %0,%1,%2\n\t" 1135 "eieio" 1136 : "=r" (val) 1137 : "b" (base), "r" (offset), 1138 "m" (*((volatile unsigned char *)base+offset))); 1139 return val; 1140} 1141 1142static __inline__ unsigned int 1143xf86ReadMmio32Le(__volatile__ void *base, const unsigned long offset) 1144{ 1145 register unsigned int val; 1146 __asm__ __volatile__( 1147 "lwbrx %0,%1,%2\n\t" 1148 "eieio" 1149 : "=r" (val) 1150 : "b" (base), "r" (offset), 1151 "m" (*((volatile unsigned char *)base+offset))); 1152 return val; 1153} 1154 1155static __inline__ void 1156xf86WriteMmioNB8(__volatile__ void *base, const unsigned long offset, 1157 const unsigned char val) 1158{ 1159 __asm__ __volatile__( 1160 "stbx %1,%2,%3\n\t" 1161 : "=m" (*((volatile unsigned char *)base+offset)) 1162 : "r" (val), "b" (base), "r" (offset)); 1163} 1164 1165static __inline__ void 1166xf86WriteMmioNB16Le(__volatile__ void *base, const unsigned long offset, 1167 const unsigned short val) 1168{ 1169 __asm__ __volatile__( 1170 "sthbrx %1,%2,%3\n\t" 1171 : "=m" (*((volatile unsigned char *)base+offset)) 1172 : "r" (val), "b" (base), "r" (offset)); 1173} 1174 1175static __inline__ void 1176xf86WriteMmioNB16Be(__volatile__ void *base, const unsigned long offset, 1177 const unsigned short val) 1178{ 1179 __asm__ __volatile__( 1180 "sthx %1,%2,%3\n\t" 1181 : "=m" (*((volatile unsigned char *)base+offset)) 1182 : "r" (val), "b" (base), "r" (offset)); 1183} 1184 1185static __inline__ void 1186xf86WriteMmioNB32Le(__volatile__ void *base, const unsigned long offset, 1187 const unsigned int val) 1188{ 1189 __asm__ __volatile__( 1190 "stwbrx %1,%2,%3\n\t" 1191 : "=m" (*((volatile unsigned char *)base+offset)) 1192 : "r" (val), "b" (base), "r" (offset)); 1193} 1194 1195static __inline__ void 1196xf86WriteMmioNB32Be(__volatile__ void *base, const unsigned long offset, 1197 const unsigned int val) 1198{ 1199 __asm__ __volatile__( 1200 "stwx %1,%2,%3\n\t" 1201 : "=m" (*((volatile unsigned char *)base+offset)) 1202 : "r" (val), "b" (base), "r" (offset)); 1203} 1204 1205static __inline__ void 1206xf86WriteMmio8(__volatile__ void *base, const unsigned long offset, 1207 const unsigned char val) 1208{ 1209 xf86WriteMmioNB8(base, offset, val); 1210 eieio(); 1211} 1212 1213static __inline__ void 1214xf86WriteMmio16Le(__volatile__ void *base, const unsigned long offset, 1215 const unsigned short val) 1216{ 1217 xf86WriteMmioNB16Le(base, offset, val); 1218 eieio(); 1219} 1220 1221static __inline__ void 1222xf86WriteMmio16Be(__volatile__ void *base, const unsigned long offset, 1223 const unsigned short val) 1224{ 1225 xf86WriteMmioNB16Be(base, offset, val); 1226 eieio(); 1227} 1228 1229static __inline__ void 1230xf86WriteMmio32Le(__volatile__ void *base, const unsigned long offset, 1231 const unsigned int val) 1232{ 1233 xf86WriteMmioNB32Le(base, offset, val); 1234 eieio(); 1235} 1236 1237static __inline__ void 1238xf86WriteMmio32Be(__volatile__ void *base, const unsigned long offset, 1239 const unsigned int val) 1240{ 1241 xf86WriteMmioNB32Be(base, offset, val); 1242 eieio(); 1243} 1244 1245 1246static __inline__ void 1247outb(unsigned short port, unsigned char value) 1248{ 1249 if(ioBase == MAP_FAILED) return; 1250 xf86WriteMmio8((void *)ioBase, port, value); 1251} 1252 1253static __inline__ void 1254outw(unsigned short port, unsigned short value) 1255{ 1256 if(ioBase == MAP_FAILED) return; 1257 xf86WriteMmio16Le((void *)ioBase, port, value); 1258} 1259 1260static __inline__ void 1261outl(unsigned short port, unsigned int value) 1262{ 1263 if(ioBase == MAP_FAILED) return; 1264 xf86WriteMmio32Le((void *)ioBase, port, value); 1265} 1266 1267static __inline__ unsigned int 1268inb(unsigned short port) 1269{ 1270 if(ioBase == MAP_FAILED) return 0; 1271 return xf86ReadMmio8((void *)ioBase, port); 1272} 1273 1274static __inline__ unsigned int 1275inw(unsigned short port) 1276{ 1277 if(ioBase == MAP_FAILED) return 0; 1278 return xf86ReadMmio16Le((void *)ioBase, port); 1279} 1280 1281static __inline__ unsigned int 1282inl(unsigned short port) 1283{ 1284 if(ioBase == MAP_FAILED) return 0; 1285 return xf86ReadMmio32Le((void *)ioBase, port); 1286} 1287 1288# define ldq_u(p) ldl_u(p) 1289# define ldl_u(p) ((*(unsigned char *)(p)) | \ 1290 (*((unsigned char *)(p)+1)<<8) | \ 1291 (*((unsigned char *)(p)+2)<<16) | \ 1292 (*((unsigned char *)(p)+3)<<24)) 1293# define ldw_u(p) ((*(unsigned char *)(p)) | \ 1294 (*((unsigned char *)(p)+1)<<8)) 1295 1296# define stq_u(v,p) stl_u(v,p) 1297# define stl_u(v,p) (*(unsigned char *)(p)) = (v); \ 1298 (*((unsigned char *)(p)+1)) = ((v) >> 8); \ 1299 (*((unsigned char *)(p)+2)) = ((v) >> 16); \ 1300 (*((unsigned char *)(p)+3)) = ((v) >> 24) 1301# define stw_u(v,p) (*(unsigned char *)(p)) = (v); \ 1302 (*((unsigned char *)(p)+1)) = ((v) >> 8) 1303 1304# define mem_barrier() eieio() 1305# define write_mem_barrier() eieio() 1306 1307#elif defined(__arm__) && defined(__linux__) 1308 1309#define ldq_u(p) (*((unsigned long *)(p))) 1310#define ldl_u(p) (*((unsigned int *)(p))) 1311#define ldw_u(p) (*((unsigned short *)(p))) 1312#define stq_u(v,p) (*(unsigned long *)(p)) = (v) 1313#define stl_u(v,p) (*(unsigned int *)(p)) = (v) 1314#define stw_u(v,p) (*(unsigned short *)(p)) = (v) 1315#define mem_barrier() /* NOP */ 1316#define write_mem_barrier() /* NOP */ 1317 1318/* for Linux on ARM, we use the LIBC inx/outx routines */ 1319/* note that the appropriate setup via "ioperm" needs to be done */ 1320/* *before* any inx/outx is done. */ 1321 1322#include <sys/io.h> 1323 1324static __inline__ void 1325xf_outb(unsigned short port, unsigned char val) 1326{ 1327 outb(val, port); 1328} 1329 1330static __inline__ void 1331xf_outw(unsigned short port, unsigned short val) 1332{ 1333 outw(val, port); 1334} 1335 1336static __inline__ void 1337xf_outl(unsigned short port, unsigned int val) 1338{ 1339 outl(val, port); 1340} 1341 1342#define outb xf_outb 1343#define outw xf_outw 1344#define outl xf_outl 1345 1346#define arm_flush_cache(addr) \ 1347do { \ 1348 register unsigned long _beg __asm ("a1") = (unsigned long) (addr); \ 1349 register unsigned long _end __asm ("a2") = (unsigned long) (addr) + 4;\ 1350 register unsigned long _flg __asm ("a3") = 0; \ 1351 __asm __volatile ("swi 0x9f0002 @ sys_cacheflush" \ 1352 : "=r" (_beg) \ 1353 : "0" (_beg), "r" (_end), "r" (_flg)); \ 1354} while (0) 1355 1356# else /* ix86 */ 1357 1358# define ldq_u(p) (*((unsigned long *)(p))) 1359# define ldl_u(p) (*((unsigned int *)(p))) 1360# define ldw_u(p) (*((unsigned short *)(p))) 1361# define stq_u(v,p) (*(unsigned long *)(p)) = (v) 1362# define stl_u(v,p) (*(unsigned int *)(p)) = (v) 1363# define stw_u(v,p) (*(unsigned short *)(p)) = (v) 1364# define mem_barrier() /* NOP */ 1365# define write_mem_barrier() /* NOP */ 1366 1367# if !defined(__SUNPRO_C) 1368# if !defined(FAKEIT) && !defined(__mc68000__) && !defined(__arm__) && !defined(__sh__) && !defined(__hppa__) && !defined(__s390__) 1369# ifdef GCCUSESGAS 1370 1371/* 1372 * If gcc uses gas rather than the native assembler, the syntax of these 1373 * inlines has to be different. DHD 1374 */ 1375 1376static __inline__ void 1377outb(unsigned short port, unsigned char val) 1378{ 1379 __asm__ __volatile__("outb %0,%1" : :"a" (val), "d" (port)); 1380} 1381 1382 1383static __inline__ void 1384outw(unsigned short port, unsigned short val) 1385{ 1386 __asm__ __volatile__("outw %0,%1" : :"a" (val), "d" (port)); 1387} 1388 1389static __inline__ void 1390outl(unsigned short port, unsigned int val) 1391{ 1392 __asm__ __volatile__("outl %0,%1" : :"a" (val), "d" (port)); 1393} 1394 1395static __inline__ unsigned int 1396inb(unsigned short port) 1397{ 1398 unsigned char ret; 1399 __asm__ __volatile__("inb %1,%0" : 1400 "=a" (ret) : 1401 "d" (port)); 1402 return ret; 1403} 1404 1405static __inline__ unsigned int 1406inw(unsigned short port) 1407{ 1408 unsigned short ret; 1409 __asm__ __volatile__("inw %1,%0" : 1410 "=a" (ret) : 1411 "d" (port)); 1412 return ret; 1413} 1414 1415static __inline__ unsigned int 1416inl(unsigned short port) 1417{ 1418 unsigned int ret; 1419 __asm__ __volatile__("inl %1,%0" : 1420 "=a" (ret) : 1421 "d" (port)); 1422 return ret; 1423} 1424 1425# else /* GCCUSESGAS */ 1426 1427static __inline__ void 1428outb(unsigned short port, unsigned char val) 1429{ 1430 __asm__ __volatile__("out%B0 (%1)" : :"a" (val), "d" (port)); 1431} 1432 1433static __inline__ void 1434outw(unsigned short port, unsigned short val) 1435{ 1436 __asm__ __volatile__("out%W0 (%1)" : :"a" (val), "d" (port)); 1437} 1438 1439static __inline__ void 1440outl(unsigned short port, unsigned int val) 1441{ 1442 __asm__ __volatile__("out%L0 (%1)" : :"a" (val), "d" (port)); 1443} 1444 1445static __inline__ unsigned int 1446inb(unsigned short port) 1447{ 1448 unsigned char ret; 1449 __asm__ __volatile__("in%B0 (%1)" : 1450 "=a" (ret) : 1451 "d" (port)); 1452 return ret; 1453} 1454 1455static __inline__ unsigned int 1456inw(unsigned short port) 1457{ 1458 unsigned short ret; 1459 __asm__ __volatile__("in%W0 (%1)" : 1460 "=a" (ret) : 1461 "d" (port)); 1462 return ret; 1463} 1464 1465static __inline__ unsigned int 1466inl(unsigned short port) 1467{ 1468 unsigned int ret; 1469 __asm__ __volatile__("in%L0 (%1)" : 1470 "=a" (ret) : 1471 "d" (port)); 1472 return ret; 1473} 1474 1475# endif /* GCCUSESGAS */ 1476 1477# else /* !defined(FAKEIT) && !defined(__mc68000__) && !defined(__arm__) && !defined(__sh__) && !defined(__hppa__)*/ 1478 1479static __inline__ void 1480outb(unsigned short port, unsigned char val) 1481{ 1482} 1483 1484static __inline__ void 1485outw(unsigned short port, unsigned short val) 1486{ 1487} 1488 1489static __inline__ void 1490outl(unsigned short port, unsigned int val) 1491{ 1492} 1493 1494static __inline__ unsigned int 1495inb(unsigned short port) 1496{ 1497 return 0; 1498} 1499 1500static __inline__ unsigned int 1501inw(unsigned short port) 1502{ 1503 return 0; 1504} 1505 1506static __inline__ unsigned int 1507inl(unsigned short port) 1508{ 1509 return 0; 1510} 1511 1512# endif /* FAKEIT */ 1513# endif /* __SUNPRO_C */ 1514 1515# endif /* ix86 */ 1516 1517# else /* !GNUC */ 1518# if !defined(QNX4) 1519# if defined(__STDC__) && (__STDC__ == 1) 1520# ifndef asm 1521# define asm __asm 1522# endif 1523# endif 1524# ifndef SCO325 1525# if defined(__UNIXWARE__) 1526# /* avoid including <sys/types.h> for <sys/inline.h> on UnixWare */ 1527# define ushort unsigned short 1528# define ushort_t unsigned short 1529# define ulong unsigned long 1530# define ulong_t unsigned long 1531# define uint_t unsigned int 1532# define uchar_t unsigned char 1533# endif /* __UNIXWARE__ */ 1534# if !defined(sgi) && !defined(__SUNPRO_C) 1535# include <sys/inline.h> 1536# endif 1537# else 1538# include "scoasm.h" 1539# endif 1540# if (!defined(__HIGHC__) && !defined(sgi) && !defined(__SUNPRO_C)) || \ 1541 defined(__USLC__) 1542# pragma asm partial_optimization outl 1543# pragma asm partial_optimization outw 1544# pragma asm partial_optimization outb 1545# pragma asm partial_optimization inl 1546# pragma asm partial_optimization inw 1547# pragma asm partial_optimization inb 1548# endif 1549# endif 1550# define ldq_u(p) (*((unsigned long *)(p))) 1551# define ldl_u(p) (*((unsigned int *)(p))) 1552# define ldw_u(p) (*((unsigned short *)(p))) 1553# define stq_u(v,p) (*(unsigned long *)(p)) = (v) 1554# define stl_u(v,p) (*(unsigned int *)(p)) = (v) 1555# define stw_u(v,p) (*(unsigned short *)(p)) = (v) 1556# define mem_barrier() /* NOP */ 1557# define write_mem_barrier() /* NOP */ 1558# endif /* __GNUC__ */ 1559 1560# if defined(QNX4) 1561# include <sys/types.h> 1562extern unsigned inb(unsigned port); 1563extern unsigned inw(unsigned port); 1564extern unsigned inl(unsigned port); 1565extern void outb(unsigned port, unsigned val); 1566extern void outw(unsigned port, unsigned val); 1567extern void outl(unsigned port, unsigned val); 1568# endif /* QNX4 */ 1569 1570# if defined(IODEBUG) && defined(__GNUC__) 1571# undef inb 1572# undef inw 1573# undef inl 1574# undef outb 1575# undef outw 1576# undef outl 1577# define inb(a) __extension__ ({unsigned char __c=RealInb(a); ErrorF("inb(0x%03x) = 0x%02x\t@ line %4d, file %s\n", a, __c, __LINE__, __FILE__);__c;}) 1578# define inw(a) __extension__ ({unsigned short __c=RealInw(a); ErrorF("inw(0x%03x) = 0x%04x\t@ line %4d, file %s\n", a, __c, __LINE__, __FILE__);__c;}) 1579# define inl(a) __extension__ ({unsigned int __c=RealInl(a); ErrorF("inl(0x%03x) = 0x%08x\t@ line %4d, file %s\n", a, __c, __LINE__, __FILE__);__c;}) 1580 1581# define outb(a,b) (ErrorF("outb(0x%03x, 0x%02x)\t@ line %4d, file %s\n", a, b, __LINE__, __FILE__),RealOutb(a,b)) 1582# define outw(a,b) (ErrorF("outw(0x%03x, 0x%04x)\t@ line %4d, file %s\n", a, b, __LINE__, __FILE__),RealOutw(a,b)) 1583# define outl(a,b) (ErrorF("outl(0x%03x, 0x%08x)\t@ line %4d, file %s\n", a, b, __LINE__, __FILE__),RealOutl(a,b)) 1584# endif 1585 1586# endif /* NO_INLINE */ 1587 1588# ifdef __alpha__ 1589/* entry points for Mmio memory access routines */ 1590extern int (*xf86ReadMmio8)(void *, unsigned long); 1591extern int (*xf86ReadMmio16)(void *, unsigned long); 1592# ifndef STANDALONE_MMIO 1593extern int (*xf86ReadMmio32)(void *, unsigned long); 1594# else 1595/* Some DRI 3D drivers need MMIO_IN32. */ 1596static __inline__ int 1597xf86ReadMmio32(void *Base, unsigned long Offset) 1598{ 1599 __asm__ __volatile__("mb" : : : "memory"); 1600 return *(volatile unsigned int*)((unsigned long)Base+(Offset)); 1601} 1602# endif 1603extern void (*xf86WriteMmio8)(int, void *, unsigned long); 1604extern void (*xf86WriteMmio16)(int, void *, unsigned long); 1605extern void (*xf86WriteMmio32)(int, void *, unsigned long); 1606extern void (*xf86WriteMmioNB8)(int, void *, unsigned long); 1607extern void (*xf86WriteMmioNB16)(int, void *, unsigned long); 1608extern void (*xf86WriteMmioNB32)(int, void *, unsigned long); 1609extern void xf86JensenMemToBus(char *, long, long, int); 1610extern void xf86JensenBusToMem(char *, char *, unsigned long, int); 1611extern void xf86SlowBCopyFromBus(unsigned char *, unsigned char *, int); 1612extern void xf86SlowBCopyToBus(unsigned char *, unsigned char *, int); 1613 1614/* Some macros to hide the system dependencies for MMIO accesses */ 1615/* Changed to kill noise generated by gcc's -Wcast-align */ 1616# define MMIO_IN8(base, offset) (*xf86ReadMmio8)(base, offset) 1617# define MMIO_IN16(base, offset) (*xf86ReadMmio16)(base, offset) 1618# ifndef STANDALONE_MMIO 1619# define MMIO_IN32(base, offset) (*xf86ReadMmio32)(base, offset) 1620# else 1621# define MMIO_IN32(base, offset) xf86ReadMmio32(base, offset) 1622# endif 1623 1624# if defined (JENSEN_SUPPORT) 1625# define MMIO_OUT32(base, offset, val) \ 1626 (*xf86WriteMmio32)((CARD32)(val), base, offset) 1627# define MMIO_ONB32(base, offset, val) \ 1628 (*xf86WriteMmioNB32)((CARD32)(val), base, offset) 1629# else 1630# define MMIO_OUT32(base, offset, val) \ 1631 do { \ 1632 write_mem_barrier(); \ 1633 *(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset)) = (val); \ 1634 } while (0) 1635# define MMIO_ONB32(base, offset, val) \ 1636 *(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset)) = (val) 1637# endif 1638 1639# define MMIO_OUT8(base, offset, val) \ 1640 (*xf86WriteMmio8)((CARD8)(val), base, offset) 1641# define MMIO_OUT16(base, offset, val) \ 1642 (*xf86WriteMmio16)((CARD16)(val), base, offset) 1643# define MMIO_ONB8(base, offset, val) \ 1644 (*xf86WriteMmioNB8)((CARD8)(val), base, offset) 1645# define MMIO_ONB16(base, offset, val) \ 1646 (*xf86WriteMmioNB16)((CARD16)(val), base, offset) 1647# define MMIO_MOVE32(base, offset, val) \ 1648 MMIO_OUT32(base, offset, val) 1649 1650# elif defined(__powerpc__) 1651 /* 1652 * we provide byteswapping and no byteswapping functions here 1653 * with byteswapping as default, 1654 * drivers that don't need byteswapping should define PPC_MMIO_IS_BE 1655 */ 1656# define MMIO_IN8(base, offset) xf86ReadMmio8(base, offset) 1657# define MMIO_OUT8(base, offset, val) \ 1658 xf86WriteMmio8(base, offset, (CARD8)(val)) 1659# define MMIO_ONB8(base, offset, val) \ 1660 xf86WriteMmioNB8(base, offset, (CARD8)(val)) 1661 1662# if defined(PPC_MMIO_IS_BE) /* No byteswapping */ 1663# define MMIO_IN16(base, offset) xf86ReadMmio16Be(base, offset) 1664# define MMIO_IN32(base, offset) xf86ReadMmio32Be(base, offset) 1665# define MMIO_OUT16(base, offset, val) \ 1666 xf86WriteMmio16Be(base, offset, (CARD16)(val)) 1667# define MMIO_OUT32(base, offset, val) \ 1668 xf86WriteMmio32Be(base, offset, (CARD32)(val)) 1669# define MMIO_ONB16(base, offset, val) \ 1670 xf86WriteMmioNB16Be(base, offset, (CARD16)(val)) 1671# define MMIO_ONB32(base, offset, val) \ 1672 xf86WriteMmioNB32Be(base, offset, (CARD32)(val)) 1673# else /* byteswapping is the default */ 1674# define MMIO_IN16(base, offset) xf86ReadMmio16Le(base, offset) 1675# define MMIO_IN32(base, offset) xf86ReadMmio32Le(base, offset) 1676# define MMIO_OUT16(base, offset, val) \ 1677 xf86WriteMmio16Le(base, offset, (CARD16)(val)) 1678# define MMIO_OUT32(base, offset, val) \ 1679 xf86WriteMmio32Le(base, offset, (CARD32)(val)) 1680# define MMIO_ONB16(base, offset, val) \ 1681 xf86WriteMmioNB16Le(base, offset, (CARD16)(val)) 1682# define MMIO_ONB32(base, offset, val) \ 1683 xf86WriteMmioNB32Le(base, offset, (CARD32)(val)) 1684# endif 1685 1686# define MMIO_MOVE32(base, offset, val) \ 1687 xf86WriteMmio32Be(base, offset, (CARD32)(val)) 1688 1689static __inline__ void ppc_flush_icache(char *addr) 1690{ 1691 __asm__ volatile ( 1692 "dcbf 0,%0;" 1693 "sync;" 1694 "icbi 0,%0;" 1695 "sync;" 1696 "isync;" 1697 : : "r"(addr) : "memory"); 1698} 1699 1700# elif defined(__sparc__) || defined(sparc) || defined(__sparc) 1701 /* 1702 * Like powerpc, we provide byteswapping and no byteswapping functions 1703 * here with byteswapping as default, drivers that don't need byteswapping 1704 * should define SPARC_MMIO_IS_BE (perhaps create a generic macro so that we 1705 * do not need to use PPC_MMIO_IS_BE and the sparc one in all the same places 1706 * of drivers?). 1707 */ 1708# define MMIO_IN8(base, offset) xf86ReadMmio8(base, offset) 1709# define MMIO_OUT8(base, offset, val) \ 1710 xf86WriteMmio8(base, offset, (CARD8)(val)) 1711# define MMIO_ONB8(base, offset, val) \ 1712 xf86WriteMmio8NB(base, offset, (CARD8)(val)) 1713 1714# if defined(SPARC_MMIO_IS_BE) /* No byteswapping */ 1715# define MMIO_IN16(base, offset) xf86ReadMmio16Be(base, offset) 1716# define MMIO_IN32(base, offset) xf86ReadMmio32Be(base, offset) 1717# define MMIO_OUT16(base, offset, val) \ 1718 xf86WriteMmio16Be(base, offset, (CARD16)(val)) 1719# define MMIO_OUT32(base, offset, val) \ 1720 xf86WriteMmio32Be(base, offset, (CARD32)(val)) 1721# define MMIO_ONB16(base, offset, val) \ 1722 xf86WriteMmio16BeNB(base, offset, (CARD16)(val)) 1723# define MMIO_ONB32(base, offset, val) \ 1724 xf86WriteMmio32BeNB(base, offset, (CARD32)(val)) 1725# else /* byteswapping is the default */ 1726# define MMIO_IN16(base, offset) xf86ReadMmio16Le(base, offset) 1727# define MMIO_IN32(base, offset) xf86ReadMmio32Le(base, offset) 1728# define MMIO_OUT16(base, offset, val) \ 1729 xf86WriteMmio16Le(base, offset, (CARD16)(val)) 1730# define MMIO_OUT32(base, offset, val) \ 1731 xf86WriteMmio32Le(base, offset, (CARD32)(val)) 1732# define MMIO_ONB16(base, offset, val) \ 1733 xf86WriteMmio16LeNB(base, offset, (CARD16)(val)) 1734# define MMIO_ONB32(base, offset, val) \ 1735 xf86WriteMmio32LeNB(base, offset, (CARD32)(val)) 1736# endif 1737 1738# define MMIO_MOVE32(base, offset, val) \ 1739 xf86WriteMmio32Be(base, offset, (CARD32)(val)) 1740 1741# else /* !__alpha__ && !__powerpc__ && !__sparc__ */ 1742 1743# define MMIO_IN8(base, offset) \ 1744 *(volatile CARD8 *)(((CARD8*)(base)) + (offset)) 1745# define MMIO_IN16(base, offset) \ 1746 *(volatile CARD16 *)(void *)(((CARD8*)(base)) + (offset)) 1747# define MMIO_IN32(base, offset) \ 1748 *(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset)) 1749# define MMIO_OUT8(base, offset, val) \ 1750 *(volatile CARD8 *)(((CARD8*)(base)) + (offset)) = (val) 1751# define MMIO_OUT16(base, offset, val) \ 1752 *(volatile CARD16 *)(void *)(((CARD8*)(base)) + (offset)) = (val) 1753# define MMIO_OUT32(base, offset, val) \ 1754 *(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset)) = (val) 1755# define MMIO_ONB8(base, offset, val) MMIO_OUT8(base, offset, val) 1756# define MMIO_ONB16(base, offset, val) MMIO_OUT16(base, offset, val) 1757# define MMIO_ONB32(base, offset, val) MMIO_OUT32(base, offset, val) 1758 1759# define MMIO_MOVE32(base, offset, val) MMIO_OUT32(base, offset, val) 1760 1761# endif /* __alpha__ */ 1762 1763/* 1764 * With Intel, the version in os-support/misc/SlowBcopy.s is used. 1765 * This avoids port I/O during the copy (which causes problems with 1766 * some hardware). 1767 */ 1768# ifdef __alpha__ 1769# define slowbcopy_tobus(src,dst,count) xf86SlowBCopyToBus(src,dst,count) 1770# define slowbcopy_frombus(src,dst,count) xf86SlowBCopyFromBus(src,dst,count) 1771# else /* __alpha__ */ 1772# define slowbcopy_tobus(src,dst,count) xf86SlowBcopy(src,dst,count) 1773# define slowbcopy_frombus(src,dst,count) xf86SlowBcopy(src,dst,count) 1774# endif /* __alpha__ */ 1775 1776#endif /* _COMPILER_H */ 1777