rmixl_fmnvar.h revision 1.1.2.7 1 1.1.2.7 matt /* $Id: rmixl_fmnvar.h,v 1.1.2.7 2012/01/19 09:59:08 matt Exp $ */
2 1.1.2.3 cliff /*-
3 1.1.2.3 cliff * Copyright (c) 2010 The NetBSD Foundation, Inc.
4 1.1.2.3 cliff * All rights reserved.
5 1.1.2.3 cliff *
6 1.1.2.3 cliff * This code is derived from software contributed to The NetBSD Foundation
7 1.1.2.3 cliff * by Cliff Neighbors.
8 1.1.2.3 cliff *
9 1.1.2.3 cliff * Redistribution and use in source and binary forms, with or without
10 1.1.2.3 cliff * modification, are permitted provided that the following conditions
11 1.1.2.3 cliff * are met:
12 1.1.2.3 cliff * 1. Redistributions of source code must retain the above copyright
13 1.1.2.3 cliff * notice, this list of conditions and the following disclaimer.
14 1.1.2.3 cliff * 2. Redistributions in binary form must reproduce the above copyright
15 1.1.2.3 cliff * notice, this list of conditions and the following disclaimer in the
16 1.1.2.3 cliff * documentation and/or other materials provided with the distribution.
17 1.1.2.3 cliff *
18 1.1.2.3 cliff * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 1.1.2.3 cliff * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 1.1.2.3 cliff * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 1.1.2.3 cliff * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 1.1.2.3 cliff * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 1.1.2.3 cliff * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 1.1.2.3 cliff * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 1.1.2.3 cliff * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 1.1.2.3 cliff * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 1.1.2.3 cliff * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 1.1.2.3 cliff * POSSIBILITY OF SUCH DAMAGE.
29 1.1.2.3 cliff */
30 1.1.2.1 cliff
31 1.1.2.1 cliff #ifndef _ARCH_MIPS_RMIXL_RMIXL_FMNVAR_H_
32 1.1.2.1 cliff #define _ARCH_MIPS_RMIXL_RMIXL_FMNVAR_H_
33 1.1.2.1 cliff
34 1.1.2.7 matt #include <sys/cpu.h>
35 1.1.2.1 cliff #include <mips/cpuregs.h>
36 1.1.2.1 cliff
37 1.1.2.1 cliff #define RMIXL_FMN_CODE_PSB_WAKEUP 200 /* firmware MSGRNG_CODE_BOOT_WAKEUP */
38 1.1.2.1 cliff #define RMIXL_FMN_CODE_HELLO_REQ 201
39 1.1.2.1 cliff #define RMIXL_FMN_CODE_HELLO_ACK 202
40 1.1.2.1 cliff
41 1.1.2.1 cliff #define RMIXL_FMN_HELLO_REQ_SZ 4
42 1.1.2.1 cliff #define RMIXL_FMN_HELLO_ACK_SZ 4
43 1.1.2.1 cliff
44 1.1.2.1 cliff typedef struct rmixl_fmn_msg {
45 1.1.2.1 cliff uint64_t data[4];
46 1.1.2.1 cliff } rmixl_fmn_msg_t;
47 1.1.2.1 cliff
48 1.1.2.1 cliff typedef struct rmixl_fmn_rxmsg {
49 1.1.2.6 matt uint16_t rxsid;
50 1.1.2.1 cliff u_int code;
51 1.1.2.6 matt uint8_t size;
52 1.1.2.1 cliff rmixl_fmn_msg_t msg;
53 1.1.2.1 cliff } rmixl_fmn_rxmsg_t;
54 1.1.2.1 cliff
55 1.1.2.1 cliff
56 1.1.2.1 cliff /*
57 1.1.2.1 cliff * compute FMN dest_id from MIPS cpuid
58 1.1.2.1 cliff * - each Core FMN sation has 8 buckets
59 1.1.2.1 cliff * - each Core has 4 threads
60 1.1.2.1 cliff * - here we use 1 bucket per thread
61 1.1.2.1 cliff * (the first four buckets)
62 1.1.2.1 cliff * - if we need { hi, lo } priority buckets per thread
63 1.1.2.1 cliff * need to adjust the RMIXL_FMN_DESTID macro
64 1.1.2.1 cliff * and use the 'pri' parameter
65 1.1.2.1 cliff * - i.e. for now there is only one priority
66 1.1.2.1 cliff */
67 1.1.2.6 matt #define RMIXL_CPU_CORE(cpuid) ((uint32_t)__SHIFTOUT((cpuid), __BITS(7,3)))
68 1.1.2.6 matt #define RMIXL_CPU_THREAD(cpuid) ((uint32_t)__SHIFTOUT((cpuid), __BITS(1,0)))
69 1.1.2.1 cliff
70 1.1.2.6 matt static inline uint64_t
71 1.1.2.6 matt mips_dmfc2(const u_int regnum, const u_int sel)
72 1.1.2.6 matt {
73 1.1.2.6 matt uint64_t __val;
74 1.1.2.1 cliff
75 1.1.2.6 matt __asm volatile(
76 1.1.2.6 matt ".set push" "\n\t"
77 1.1.2.6 matt ".set mips64" "\n\t"
78 1.1.2.6 matt ".set noat" "\n\t"
79 1.1.2.6 matt "dmfc2 %0,$%1,%2" "\n\t"
80 1.1.2.6 matt ".set pop" "\n\t"
81 1.1.2.6 matt : "=r"(__val) : "n"(regnum), "n"(sel));
82 1.1.2.1 cliff
83 1.1.2.6 matt return __val;
84 1.1.2.6 matt }
85 1.1.2.1 cliff
86 1.1.2.6 matt static inline void
87 1.1.2.6 matt mips_dmtc2(u_int regnum, u_int sel, uint64_t val)
88 1.1.2.6 matt {
89 1.1.2.6 matt __asm volatile(
90 1.1.2.6 matt ".set push" "\n\t"
91 1.1.2.6 matt ".set mips64" "\n\t"
92 1.1.2.6 matt ".set noat" "\n\t"
93 1.1.2.6 matt "dmtc2 %0,$%1,%2" "\n\t"
94 1.1.2.6 matt ".set pop" "\n\t"
95 1.1.2.6 matt :: "r"(val), "n"(regnum), "n"(sel));
96 1.1.2.6 matt }
97 1.1.2.1 cliff
98 1.1.2.6 matt static inline uint64_t
99 1.1.2.6 matt mips_mfc2(const u_int regnum, const u_int sel)
100 1.1.2.6 matt {
101 1.1.2.6 matt uint32_t __val;
102 1.1.2.1 cliff
103 1.1.2.6 matt __asm volatile(
104 1.1.2.6 matt ".set push" "\n\t"
105 1.1.2.6 matt ".set mips32" "\n\t"
106 1.1.2.6 matt "mfc2 %0,$%1,%2" "\n\t"
107 1.1.2.6 matt ".set pop" "\n\t"
108 1.1.2.6 matt : "=r"(__val) : "n"(regnum), "n"(sel));
109 1.1.2.6 matt return __val;
110 1.1.2.6 matt }
111 1.1.2.6 matt
112 1.1.2.6 matt static inline void
113 1.1.2.6 matt mips_mtc2(u_int regnum, u_int sel, uint32_t val)
114 1.1.2.6 matt {
115 1.1.2.6 matt __asm volatile(
116 1.1.2.6 matt ".set push" "\n\t"
117 1.1.2.6 matt ".set mips32" "\n\t"
118 1.1.2.6 matt ".set noat" "\n\t"
119 1.1.2.6 matt "mtc2 %0,$%1,%2" "\n\t"
120 1.1.2.6 matt ".set pop" "\n\t"
121 1.1.2.6 matt :: "r"(val), "n"(regnum), "n"(sel));
122 1.1.2.6 matt }
123 1.1.2.6 matt
124 1.1.2.6 matt #define COP2_PRINT_8(regno, sel) \
125 1.1.2.1 cliff do { \
126 1.1.2.6 matt printf("%s: COP2(%d,%d) = %#"PRIx64"\n", \
127 1.1.2.6 matt __func__, regno, sel, mips_dmfc2(regno, sel)); \
128 1.1.2.1 cliff } while (0)
129 1.1.2.1 cliff
130 1.1.2.6 matt #define COP2_PRINT_4(regno, sel) \
131 1.1.2.1 cliff do { \
132 1.1.2.6 matt printf("%s: COP2(%d,%d) = %#"PRIx32"\n", \
133 1.1.2.6 matt __func__, regno, sel, mips_mfc2(regno, sel)); \
134 1.1.2.1 cliff } while (0)
135 1.1.2.1 cliff
136 1.1.2.1 cliff
137 1.1.2.1 cliff /*
138 1.1.2.1 cliff * encode 'dest' for msgsnd op 'rt'
139 1.1.2.1 cliff */
140 1.1.2.6 matt #define RMIXL_MSGSND_DESC(size, code, dest_id) \
141 1.1.2.6 matt (__SHIFTOUT((dest_id), __BITS(7,0)) \
142 1.1.2.6 matt |__SHIFTOUT((code), __BITS(15,8)) \
143 1.1.2.6 matt |__SHIFTOUT((size)-1, __BITS(17,16)))
144 1.1.2.6 matt #define RMIXLP_MSGSND_DESC(size, code, dest_id, dest_vc) \
145 1.1.2.6 matt (__SHIFTOUT((dest_id), __BITS(11,0)) \
146 1.1.2.6 matt |__SHIFTOUT((size)-1, __BITS(17,16)) \
147 1.1.2.6 matt |__SHIFTOUT((dest_vc), __BITS(20,19)) \
148 1.1.2.6 matt |__SHIFTOUT((code), __BITS(31,24)))
149 1.1.2.1 cliff
150 1.1.2.1 cliff static inline void
151 1.1.2.1 cliff rmixl_msgsnd(uint32_t desc)
152 1.1.2.1 cliff {
153 1.1.2.1 cliff __asm__ volatile (
154 1.1.2.1 cliff ".set push" "\n\t"
155 1.1.2.1 cliff ".set noreorder" "\n\t"
156 1.1.2.4 matt ".set arch=xlr" "\n\t"
157 1.1.2.1 cliff "sync" "\n\t"
158 1.1.2.1 cliff "msgsnd %0" "\n\t"
159 1.1.2.1 cliff ".set pop" "\n\t"
160 1.1.2.6 matt :: "r"(desc));
161 1.1.2.6 matt }
162 1.1.2.6 matt
163 1.1.2.6 matt static inline uint32_t
164 1.1.2.6 matt rmixlp_msgsnd(uint32_t desc)
165 1.1.2.6 matt {
166 1.1.2.6 matt uint32_t rv;
167 1.1.2.6 matt
168 1.1.2.6 matt __asm__ volatile (
169 1.1.2.6 matt ".set push" "\n\t"
170 1.1.2.6 matt ".set noreorder" "\n\t"
171 1.1.2.6 matt ".set arch=xlp" "\n\t"
172 1.1.2.6 matt "sync" "\n\t"
173 1.1.2.6 matt "msgsnds %[desc],%[rv]" "\n\t"
174 1.1.2.6 matt ".set pop" "\n\t"
175 1.1.2.6 matt : [rv] "=r" (rv)
176 1.1.2.6 matt : [desc] "r" (desc));
177 1.1.2.6 matt
178 1.1.2.6 matt return rv;
179 1.1.2.1 cliff }
180 1.1.2.1 cliff
181 1.1.2.1 cliff static inline void
182 1.1.2.1 cliff rmixl_msgld(uint32_t bucket)
183 1.1.2.1 cliff {
184 1.1.2.1 cliff __asm__ volatile (
185 1.1.2.1 cliff ".set push" "\n\t"
186 1.1.2.1 cliff ".set noreorder" "\n\t"
187 1.1.2.4 matt ".set arch=xlr" "\n\t"
188 1.1.2.1 cliff "msgld %0" "\n\t"
189 1.1.2.1 cliff ".set pop" "\n\t"
190 1.1.2.6 matt :: "r"(bucket));
191 1.1.2.6 matt }
192 1.1.2.6 matt
193 1.1.2.6 matt static inline uint32_t
194 1.1.2.6 matt rmixlp_msgld(uint32_t rxq)
195 1.1.2.6 matt {
196 1.1.2.6 matt uint32_t rv;
197 1.1.2.6 matt
198 1.1.2.6 matt __asm__ volatile (
199 1.1.2.6 matt ".set push" "\n\t"
200 1.1.2.6 matt ".set noreorder" "\n\t"
201 1.1.2.6 matt ".set arch=xlp" "\n\t"
202 1.1.2.6 matt "msglds %[rxq],%[rv]" "\n\t"
203 1.1.2.6 matt ".set pop" "\n\t"
204 1.1.2.6 matt : [rv] "=r"(rv)
205 1.1.2.6 matt : [rxq] "r"(rxq));
206 1.1.2.6 matt
207 1.1.2.6 matt return rv;
208 1.1.2.1 cliff }
209 1.1.2.1 cliff
210 1.1.2.1 cliff /*
211 1.1.2.1 cliff * the seemingly-spurious add is recommended by RMI
212 1.1.2.1 cliff * see XLS PRM (rev. 3.21) 5.3.9
213 1.1.2.1 cliff */
214 1.1.2.1 cliff static inline void
215 1.1.2.6 matt rmixl_msgwait(u_int mask)
216 1.1.2.1 cliff {
217 1.1.2.1 cliff __asm__ volatile (
218 1.1.2.1 cliff ".set push" "\n\t"
219 1.1.2.1 cliff ".set noreorder" "\n\t"
220 1.1.2.4 matt ".set arch=xlr" "\n\t"
221 1.1.2.6 matt "daddu %0,%0,0" "\n\t"
222 1.1.2.1 cliff "msgwait %0" "\n\t"
223 1.1.2.1 cliff ".set pop" "\n\t"
224 1.1.2.6 matt :: "r"(mask));
225 1.1.2.1 cliff }
226 1.1.2.1 cliff
227 1.1.2.1 cliff static inline uint32_t
228 1.1.2.1 cliff rmixl_cp2_enable(void)
229 1.1.2.1 cliff {
230 1.1.2.1 cliff uint32_t rv;
231 1.1.2.6 matt uint32_t sr;
232 1.1.2.1 cliff
233 1.1.2.1 cliff KASSERT(curcpu()->ci_cpl == IPL_HIGH);
234 1.1.2.1 cliff __asm volatile(
235 1.1.2.6 matt ".set push" "\n\t"
236 1.1.2.6 matt ".set noreorder" "\n\t"
237 1.1.2.6 matt ".set noat" "\n\t"
238 1.1.2.6 matt "mfc0 %[sr],$%[c0_status]" "\n\t"
239 1.1.2.6 matt "and %[rv],%[sr],%[mask]" "\n\t"
240 1.1.2.6 matt "or %[sr],%[mask]" "\n\t"
241 1.1.2.6 matt "mtc0 %[sr],$%[c0_status]" "\n\t"
242 1.1.2.6 matt ".set pop" "\n\t"
243 1.1.2.6 matt : [rv] "=r" (rv),
244 1.1.2.6 matt [sr] "=r" (sr)
245 1.1.2.6 matt : [c0_status] "n" (MIPS_COP_0_STATUS),
246 1.1.2.6 matt [mask] "r" (MIPS_SR_COP_2_BIT));
247 1.1.2.1 cliff
248 1.1.2.6 matt return rv;
249 1.1.2.1 cliff }
250 1.1.2.1 cliff
251 1.1.2.1 cliff static inline void
252 1.1.2.1 cliff rmixl_cp2_restore(uint32_t ocu)
253 1.1.2.1 cliff {
254 1.1.2.1 cliff uint32_t cu2;
255 1.1.2.1 cliff
256 1.1.2.1 cliff KASSERT(curcpu()->ci_cpl == IPL_HIGH);
257 1.1.2.1 cliff __asm volatile(
258 1.1.2.6 matt ".set push" "\n\t"
259 1.1.2.6 matt ".set noreorder" "\n\t"
260 1.1.2.6 matt ".set noat" "\n\t"
261 1.1.2.6 matt "mfc0 %[sr],$%[c0_status]" "\n\t"
262 1.1.2.6 matt "and %[sr],%[mask]" "\n\t"
263 1.1.2.6 matt "or %[sr],%[ocu]" "\n\t"
264 1.1.2.6 matt "mtc0 %[sr],$%[c0_status]" "\n\t"
265 1.1.2.6 matt ".set pop" "\n\t"
266 1.1.2.6 matt : [sr] "=r"(cu2)
267 1.1.2.6 matt : [c0_status] "n" (MIPS_COP_0_STATUS),
268 1.1.2.6 matt [mask] "r" (~MIPS_SR_COP_2_BIT),
269 1.1.2.6 matt [ocu] "r" (ocu));
270 1.1.2.1 cliff }
271 1.1.2.1 cliff
272 1.1.2.6 matt #ifdef MIPS64_XLP
273 1.1.2.1 cliff /*
274 1.1.2.5 matt * logical station IDs for RMI XLP
275 1.1.2.5 matt */
276 1.1.2.6 matt #define RMIXLP_FMN_STID_RESERVED 0
277 1.1.2.6 matt #define RMIXLP_FMN_STID_CPU 1
278 1.1.2.6 matt #define RMIXLP_FMN_STID_POPQ 2
279 1.1.2.6 matt #define RMIXLP_FMN_STID_PCIE0 3
280 1.1.2.6 matt #define RMIXLP_FMN_STID_PCIE1 4
281 1.1.2.6 matt #define RMIXLP_FMN_STID_PCIE2 5
282 1.1.2.6 matt #define RMIXLP_FMN_STID_PCIE3 6
283 1.1.2.6 matt #define RMIXLP_FMN_STID_DMA 7
284 1.1.2.6 matt #define RMIXLP_FMN_STID_PKE 8
285 1.1.2.6 matt #define RMIXLP_FMN_STID_SAE 9
286 1.1.2.6 matt #define RMIXLP_FMN_STID_CDE 10
287 1.1.2.6 matt #define RMIXLP_FMN_STID_POE 11
288 1.1.2.6 matt #define RMIXLP_FMN_STID_NAE 12 // NAE Egress
289 1.1.2.6 matt #define RMIXLP_FMN_STID_RXE 13
290 1.1.2.6 matt #define RMIXLP_FMN_STID_SRIO 14
291 1.1.2.6 matt #define RMIXLP_FMN_STID_FMN 15
292 1.1.2.6 matt #define RMIXLP_FMN_STID_NAE_FREEIN 16
293 1.1.2.6 matt #define RMIXLP_FMN_NSTID 17
294 1.1.2.6 matt #else
295 1.1.2.6 matt #define RMIXLP_FMN_NSTID 0
296 1.1.2.6 matt #endif
297 1.1.2.5 matt
298 1.1.2.6 matt #ifdef MIPS64_XLS
299 1.1.2.5 matt /*
300 1.1.2.2 cliff * logical station IDs for RMI XLR
301 1.1.2.2 cliff * see Table 13.2 "Addressable Buckets" in the XLR PRM
302 1.1.2.1 cliff */
303 1.1.2.6 matt #define RMIXLR_FMN_STID_RESERVED 0
304 1.1.2.6 matt #define RMIXLR_FMN_STID_CORE0 1
305 1.1.2.6 matt #define RMIXLR_FMN_STID_CORE1 2
306 1.1.2.6 matt #define RMIXLR_FMN_STID_CORE2 3
307 1.1.2.6 matt #define RMIXLR_FMN_STID_CORE3 4
308 1.1.2.6 matt #define RMIXLR_FMN_STID_CORE4 5
309 1.1.2.6 matt #define RMIXLR_FMN_STID_CORE5 6
310 1.1.2.6 matt #define RMIXLR_FMN_STID_CORE6 7
311 1.1.2.6 matt #define RMIXLR_FMN_STID_CORE7 8
312 1.1.2.6 matt #define RMIXLR_FMN_STID_TXRX_0 9
313 1.1.2.6 matt #define RMIXLR_FMN_STID_TXRX_1 10
314 1.1.2.6 matt #define RMIXLR_FMN_STID_RGMII 11
315 1.1.2.6 matt #define RMIXLR_FMN_STID_DMA 12
316 1.1.2.6 matt #define RMIXLR_FMN_STID_FREE_0 13
317 1.1.2.6 matt #define RMIXLR_FMN_STID_FREE_1 14
318 1.1.2.6 matt #define RMIXLR_FMN_STID_SAE 15
319 1.1.2.2 cliff #define RMIXLR_FMN_NSTID (RMIXLR_FMN_STID_SAE+1)
320 1.1.2.6 matt #else
321 1.1.2.6 matt #define RMIXLR_FMN_NSTID 0
322 1.1.2.6 matt #endif
323 1.1.2.2 cliff
324 1.1.2.6 matt #ifdef MIPS64_XLR
325 1.1.2.2 cliff /*
326 1.1.2.2 cliff * logical station IDs for RMI XLS
327 1.1.2.2 cliff * see Table 12.1 "Stations and Addressable Buckets ..." in the XLS PRM
328 1.1.2.2 cliff */
329 1.1.2.6 matt #define RMIXLS_FMN_STID_RESERVED 0
330 1.1.2.6 matt #define RMIXLS_FMN_STID_CORE0 1
331 1.1.2.6 matt #define RMIXLS_FMN_STID_CORE1 2
332 1.1.2.6 matt #define RMIXLS_FMN_STID_CORE2 3
333 1.1.2.6 matt #define RMIXLS_FMN_STID_CORE3 4
334 1.1.2.6 matt #define RMIXLS_FMN_STID_GMAC_Q0 5
335 1.1.2.6 matt #define RMIXLS_FMN_STID_GMAC_Q1 6
336 1.1.2.6 matt #define RMIXLS_FMN_STID_DMA 7
337 1.1.2.6 matt #define RMIXLS_FMN_STID_CDE 8
338 1.1.2.6 matt #define RMIXLS_FMN_STID_PCIE 9
339 1.1.2.6 matt #define RMIXLS_FMN_STID_SAE 10
340 1.1.2.2 cliff #define RMIXLS_FMN_NSTID (RMIXLS_FMN_STID_SAE+1)
341 1.1.2.6 matt #else
342 1.1.2.6 matt #define RMIXLS_FMN_NSTID 0
343 1.1.2.6 matt #endif
344 1.1.2.2 cliff
345 1.1.2.2 cliff #define RMIXL_FMN_NSTID \
346 1.1.2.6 matt MAX(MAX(RMIXLR_FMN_NSTID, RMIXLS_FMN_NSTID), RMIXLP_FMN_NSTID)
347 1.1.2.2 cliff
348 1.1.2.6 matt typedef int (*rmixl_fmn_intr_handler_t)(void *, rmixl_fmn_rxmsg_t *);
349 1.1.2.1 cliff
350 1.1.2.7 matt void rmixl_fmn_cpu_attach(struct cpu_info *ci);
351 1.1.2.1 cliff void rmixl_fmn_init(void);
352 1.1.2.6 matt void rmixl_fmn_init_thread(void);
353 1.1.2.6 matt void * rmixl_fmn_intr_establish(size_t, rmixl_fmn_intr_handler_t, void *);
354 1.1.2.1 cliff void rmixl_fmn_intr_disestablish(void *);
355 1.1.2.1 cliff void rmixl_fmn_intr_poll(u_int, rmixl_fmn_rxmsg_t *);
356 1.1.2.7 matt
357 1.1.2.7 matt size_t rmixl_fmn_qid_to_stid(size_t);
358 1.1.2.7 matt const char *
359 1.1.2.7 matt rmixl_fmn_stid_name(size_t);
360 1.1.2.7 matt
361 1.1.2.6 matt /*
362 1.1.2.6 matt * true == succes, false = failure
363 1.1.2.6 matt */
364 1.1.2.6 matt bool rmixl_fmn_msg_send(u_int, u_int, u_int, u_int, const rmixl_fmn_msg_t *);
365 1.1.2.6 matt bool rmixl_fmn_msg_recv(u_int, rmixl_fmn_rxmsg_t *);
366 1.1.2.1 cliff
367 1.1.2.1 cliff #endif /* _ARCH_MIPS_RMIXL_RMIXL_FMNVAR_H_ */
368