intr.h revision 1.8
11.8Sscottr/* $NetBSD: intr.h,v 1.8 1997/11/07 07:33:18 scottr Exp $ */ 21.2Sscottr 31.2Sscottr/* 41.2Sscottr * Copyright (C) 1997 Scott Reynolds 51.2Sscottr * All rights reserved. 61.2Sscottr * 71.2Sscottr * Redistribution and use in source and binary forms, with or without 81.2Sscottr * modification, are permitted provided that the following conditions 91.2Sscottr * are met: 101.2Sscottr * 1. Redistributions of source code must retain the above copyright 111.2Sscottr * notice, this list of conditions and the following disclaimer. 121.2Sscottr * 2. Redistributions in binary form must reproduce the above copyright 131.2Sscottr * notice, this list of conditions and the following disclaimer in the 141.2Sscottr * documentation and/or other materials provided with the distribution. 151.6Sscottr * 3. The name of the author may not be used to endorse or promote products 161.2Sscottr * derived from this software without specific prior written permission. 171.2Sscottr * 181.2Sscottr * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 191.2Sscottr * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 201.2Sscottr * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 211.2Sscottr * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 221.2Sscottr * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 231.2Sscottr * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 241.2Sscottr * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 251.2Sscottr * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 261.2Sscottr * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 271.2Sscottr * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 281.2Sscottr */ 291.1Sscottr 301.1Sscottr#ifndef _MAC68K_INTR_H_ 311.1Sscottr#define _MAC68K_INTR_H_ 321.1Sscottr 331.1Sscottr#ifdef _KERNEL 341.1Sscottr/* 351.1Sscottr * spl functions; all but spl0 are done in-line 361.1Sscottr */ 371.1Sscottr 381.1Sscottr#define _spl(s) \ 391.1Sscottr({ \ 401.1Sscottr register int _spl_r; \ 411.1Sscottr \ 421.1Sscottr __asm __volatile ("clrl %0; movew sr,%0; movew %1,sr" : \ 431.1Sscottr "&=d" (_spl_r) : "di" (s)); \ 441.1Sscottr _spl_r; \ 451.1Sscottr}) 461.1Sscottr 471.1Sscottr#define _splraise(s) \ 481.1Sscottr({ \ 491.4Sscottr int _spl_r; \ 501.1Sscottr \ 511.4Sscottr __asm __volatile (" \ 521.4Sscottr clrl d0 ; \ 531.4Sscottr movw sr,d0 ; \ 541.4Sscottr movl d0,%0 ; \ 551.4Sscottr andw #0x700,d0 ; \ 561.4Sscottr movw %1,d1 ; \ 571.4Sscottr andw #0x700,d1 ; \ 581.4Sscottr cmpw d0,d1 ; \ 591.4Sscottr jle 1f ; \ 601.4Sscottr movw %1,sr ; \ 611.4Sscottr 1:" : \ 621.4Sscottr "&=d" (_spl_r) : \ 631.4Sscottr "di" (s) : \ 641.4Sscottr "d0", "d1"); \ 651.1Sscottr _spl_r; \ 661.1Sscottr}) 671.1Sscottr 681.1Sscottr/* spl0 requires checking for software interrupts */ 691.1Sscottr#define spl1() _spl(PSL_S|PSL_IPL1) 701.1Sscottr#define spl2() _spl(PSL_S|PSL_IPL2) 711.1Sscottr#define spl3() _spl(PSL_S|PSL_IPL3) 721.1Sscottr#define spl4() _spl(PSL_S|PSL_IPL4) 731.1Sscottr#define spl5() _spl(PSL_S|PSL_IPL5) 741.1Sscottr#define spl6() _spl(PSL_S|PSL_IPL6) 751.1Sscottr#define spl7() _spl(PSL_S|PSL_IPL7) 761.1Sscottr 771.4Sscottr/* These spl calls are _not_ to be used by machine-independent code. */ 781.4Sscottr#define spladb() splhigh() 791.4Sscottr#define splzs() splserial() 801.4Sscottr#define splsoft() spl1() 811.4Sscottr 821.1Sscottr/* 831.7Sbriggs * splnet must block hardware network interrupts 841.7Sbriggs * splimp must be > spltty 851.7Sbriggs */ 861.7Sbriggsextern unsigned short mac68k_ttyipl; 871.7Sbriggsextern unsigned short mac68k_bioipl; 881.7Sbriggsextern unsigned short mac68k_netipl; 891.7Sbriggsextern unsigned short mac68k_impipl; 901.7Sbriggsextern unsigned short mac68k_clockipl; 911.7Sbriggsextern unsigned short mac68k_statclockipl; 921.7Sbriggsextern unsigned short mac68k_schedipl; 931.7Sbriggs 941.7Sbriggs/* 951.1Sscottr * These should be used for: 961.1Sscottr * 1) ensuring mutual exclusion (why use processor level?) 971.1Sscottr * 2) allowing faster devices to take priority 981.1Sscottr * 991.1Sscottr * Note that on the Mac, most things are masked at spl1, almost 1001.1Sscottr * everything at spl2, and everything but the panic switch and 1011.1Sscottr * power at spl4. 1021.1Sscottr */ 1031.4Sscottr#define splsoftclock() splsoft() 1041.4Sscottr#define splsoftnet() splsoft() 1051.7Sbriggs#define spltty() _splraise(mac68k_ttyipl) 1061.7Sbriggs#define splbio() _splraise(mac68k_bioipl) 1071.7Sbriggs#define splnet() _splraise(mac68k_netipl) 1081.7Sbriggs#define splimp() _splraise(mac68k_impipl) 1091.7Sbriggs#define splclock() _splraise(mac68k_clockipl) 1101.7Sbriggs#define splstatclock() _splraise(mac68k_statclockipl) 1111.7Sbriggs#define splsched() _splsched(mac68k_schedipl) 1121.4Sscottr#define splserial() spl4() 1131.4Sscottr#define splhigh() spl7() 1141.1Sscottr 1151.1Sscottr/* watch out for side effects */ 1161.1Sscottr#define splx(s) ((s) & PSL_IPL ? _spl(s) : spl0()) 1171.1Sscottr 1181.1Sscottr/* 1191.1Sscottr * simulated software interrupt register 1201.1Sscottr */ 1211.1Sscottrextern volatile u_int8_t ssir; 1221.1Sscottr 1231.1Sscottr#define SIR_NET 0x01 1241.1Sscottr#define SIR_CLOCK 0x02 1251.1Sscottr#define SIR_SERIAL 0x04 1261.3Sscottr#define SIR_DTMGR 0x08 1271.8Sscottr#define SIR_ADB 0x10 1281.1Sscottr 1291.1Sscottr#define siron(mask) \ 1301.1Sscottr __asm __volatile ( "orb %0,_ssir" : : "i" (mask)) 1311.1Sscottr#define siroff(mask) \ 1321.1Sscottr __asm __volatile ( "andb %0,_ssir" : : "ir" (~(mask))); 1331.1Sscottr 1341.1Sscottr#define setsoftnet() siron(SIR_NET) 1351.1Sscottr#define setsoftclock() siron(SIR_CLOCK) 1361.1Sscottr#define setsoftserial() siron(SIR_SERIAL) 1371.3Sscottr#define setsoftdtmgr() siron(SIR_DTMGR) 1381.8Sscottr#define setsoftadb() siron(SIR_ADB) 1391.1Sscottr 1401.1Sscottr/* locore.s */ 1411.1Sscottrint spl0 __P((void)); 1421.1Sscottr#endif /* _KERNEL */ 1431.1Sscottr 1441.1Sscottr#endif /* _MAC68K_INTR_H_ */ 145