rmixlvar.h revision 1.1.2.26 1 1.1.2.26 matt /* $NetBSD: rmixlvar.h,v 1.1.2.26 2012/01/19 08:03:22 matt Exp $ */
2 1.1.2.1 cliff
3 1.1.2.1 cliff /*
4 1.1.2.1 cliff * Copyright 2002 Wasabi Systems, Inc.
5 1.1.2.1 cliff * All rights reserved.
6 1.1.2.1 cliff *
7 1.1.2.1 cliff * Written by Simon Burge for Wasabi Systems, Inc.
8 1.1.2.1 cliff *
9 1.1.2.1 cliff * Redistribution and use in source and binary forms, with or without
10 1.1.2.1 cliff * modification, are permitted provided that the following conditions
11 1.1.2.1 cliff * are met:
12 1.1.2.1 cliff * 1. Redistributions of source code must retain the above copyright
13 1.1.2.1 cliff * notice, this list of conditions and the following disclaimer.
14 1.1.2.1 cliff * 2. Redistributions in binary form must reproduce the above copyright
15 1.1.2.1 cliff * notice, this list of conditions and the following disclaimer in the
16 1.1.2.1 cliff * documentation and/or other materials provided with the distribution.
17 1.1.2.1 cliff * 3. All advertising materials mentioning features or use of this software
18 1.1.2.1 cliff * must display the following acknowledgement:
19 1.1.2.1 cliff * This product includes software developed for the NetBSD Project by
20 1.1.2.1 cliff * Wasabi Systems, Inc.
21 1.1.2.1 cliff * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.1.2.1 cliff * or promote products derived from this software without specific prior
23 1.1.2.1 cliff * written permission.
24 1.1.2.1 cliff *
25 1.1.2.1 cliff * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.1.2.1 cliff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.1.2.1 cliff * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.1.2.1 cliff * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.1.2.1 cliff * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.1.2.1 cliff * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.1.2.1 cliff * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.1.2.1 cliff * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.1.2.1 cliff * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.1.2.1 cliff * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.1.2.1 cliff * POSSIBILITY OF SUCH DAMAGE.
36 1.1.2.1 cliff */
37 1.1.2.1 cliff
38 1.1.2.6 cliff #ifndef _MIPS_RMI_RMIXLVAR_H_
39 1.1.2.6 cliff #define _MIPS_RMI_RMIXLVAR_H_
40 1.1.2.6 cliff
41 1.1.2.21 matt #include <sys/bus.h>
42 1.1.2.21 matt #include <sys/extent.h>
43 1.1.2.21 matt
44 1.1.2.21 matt #include <dev/pci/pcivar.h>
45 1.1.2.21 matt
46 1.1.2.6 cliff #include <mips/cpu.h>
47 1.1.2.14 matt #include <mips/locore.h>
48 1.1.2.1 cliff
49 1.1.2.21 matt #include <mips/rmi/rmixl_firmware.h>
50 1.1.2.21 matt #include <mips/rmi/rmixlreg.h>
51 1.1.2.14 matt
52 1.1.2.21 matt void rmixl_pcr_init_core(bool);
53 1.1.2.14 matt
54 1.1.2.21 matt static inline int
55 1.1.2.21 matt cpu_rmixl_chip_type(const struct pridtab *ct)
56 1.1.2.21 matt {
57 1.1.2.21 matt return ct->cpu_cidflags & MIPS_CIDFL_RMI_TYPE;
58 1.1.2.21 matt }
59 1.1.2.19 cliff
60 1.1.2.6 cliff static inline bool
61 1.1.2.6 cliff cpu_rmixl(const struct pridtab *ct)
62 1.1.2.6 cliff {
63 1.1.2.21 matt return ct->cpu_cid == MIPS_PRID_CID_RMI;
64 1.1.2.6 cliff }
65 1.1.2.6 cliff
66 1.1.2.6 cliff static inline bool
67 1.1.2.6 cliff cpu_rmixlr(const struct pridtab *ct)
68 1.1.2.6 cliff {
69 1.1.2.25 matt #ifdef MIPS64_XLR
70 1.1.2.26 matt #if (MIPS64_XLS + MIPS64_XLP) == 0
71 1.1.2.26 matt return true;
72 1.1.2.26 matt #else
73 1.1.2.21 matt return cpu_rmixl(ct) && cpu_rmixl_chip_type(ct) == CIDFL_RMI_TYPE_XLR;
74 1.1.2.26 matt #endif
75 1.1.2.25 matt #else
76 1.1.2.25 matt return false;
77 1.1.2.25 matt #endif
78 1.1.2.6 cliff }
79 1.1.2.6 cliff
80 1.1.2.6 cliff static inline bool
81 1.1.2.6 cliff cpu_rmixls(const struct pridtab *ct)
82 1.1.2.6 cliff {
83 1.1.2.25 matt #ifdef MIPS64_XLS
84 1.1.2.26 matt #if (MIPS64_XLR + MIPS64_XLP) == 0
85 1.1.2.26 matt return true;
86 1.1.2.26 matt #else
87 1.1.2.21 matt return cpu_rmixl(ct) && cpu_rmixl_chip_type(ct) == CIDFL_RMI_TYPE_XLS;
88 1.1.2.26 matt #endif
89 1.1.2.25 matt #else
90 1.1.2.25 matt return false;
91 1.1.2.25 matt #endif
92 1.1.2.6 cliff }
93 1.1.2.6 cliff
94 1.1.2.6 cliff static inline bool
95 1.1.2.6 cliff cpu_rmixlp(const struct pridtab *ct)
96 1.1.2.6 cliff {
97 1.1.2.25 matt #ifdef MIPS64_XLP
98 1.1.2.26 matt #if (MIPS64_XLR + MIPS64_XLS) == 0
99 1.1.2.26 matt return true;
100 1.1.2.26 matt #else
101 1.1.2.21 matt return cpu_rmixl(ct) && cpu_rmixl_chip_type(ct) == CIDFL_RMI_TYPE_XLP;
102 1.1.2.26 matt #endif
103 1.1.2.25 matt #else
104 1.1.2.25 matt return false;
105 1.1.2.25 matt #endif
106 1.1.2.16 cliff }
107 1.1.2.16 cliff
108 1.1.2.13 cliff typedef enum {
109 1.1.2.13 cliff PSB_TYPE_UNKNOWN=0,
110 1.1.2.13 cliff PSB_TYPE_RMI,
111 1.1.2.13 cliff PSB_TYPE_DELL,
112 1.1.2.13 cliff } rmixlfw_psb_type_t;
113 1.1.2.13 cliff
114 1.1.2.13 cliff static inline const char *
115 1.1.2.13 cliff rmixlfw_psb_type_name(rmixlfw_psb_type_t type)
116 1.1.2.13 cliff {
117 1.1.2.13 cliff switch(type) {
118 1.1.2.13 cliff case PSB_TYPE_UNKNOWN:
119 1.1.2.13 cliff return "unknown";
120 1.1.2.13 cliff case PSB_TYPE_RMI:
121 1.1.2.13 cliff return "RMI";
122 1.1.2.13 cliff case PSB_TYPE_DELL:
123 1.1.2.13 cliff return "DELL";
124 1.1.2.13 cliff default:
125 1.1.2.13 cliff return "undefined";
126 1.1.2.13 cliff }
127 1.1.2.13 cliff }
128 1.1.2.13 cliff
129 1.1.2.21 matt typedef enum {
130 1.1.2.21 matt RMIXLP_8XX,
131 1.1.2.21 matt RMIXLP_4XX,
132 1.1.2.21 matt /* These next 4 need to be in this order */
133 1.1.2.21 matt RMIXLP_3XX,
134 1.1.2.21 matt RMIXLP_3XXL,
135 1.1.2.21 matt RMIXLP_3XXH,
136 1.1.2.21 matt RMIXLP_3XXQ,
137 1.1.2.21 matt RMIXLP_ANY, /* must be last */
138 1.1.2.21 matt } rmixlp_variant_t;
139 1.1.2.21 matt
140 1.1.2.25 matt #define RMIXLP_8XX_P (RMIXLP_8XX <= rmixl_configuration.rc_xlp_variant \
141 1.1.2.25 matt && rmixl_configuration.rc_xlp_variant <= RMIXLP_4XX)
142 1.1.2.25 matt #define RMIXLP_3XX_P (RMIXLP_3XX <= rmixl_configuration.rc_xlp_variant \
143 1.1.2.25 matt && rmixl_configuration.rc_xlp_variant <= RMIXLP_3XXQ)
144 1.1.2.25 matt
145 1.1.2.21 matt struct rmixl_region {
146 1.1.2.21 matt bus_addr_t r_pbase;
147 1.1.2.21 matt bus_size_t r_size;
148 1.1.2.21 matt };
149 1.1.2.21 matt
150 1.1.2.1 cliff struct rmixl_config {
151 1.1.2.21 matt struct rmixl_region rc_io;
152 1.1.2.23 matt struct rmixl_region rc_flash[RMIXLP_SBC_NFLASH]; /* FLASH_BAR */
153 1.1.2.21 matt struct rmixl_region rc_pci_cfg;
154 1.1.2.21 matt struct rmixl_region rc_pci_ecfg;
155 1.1.2.21 matt struct rmixl_region rc_pci_mem;
156 1.1.2.21 matt struct rmixl_region rc_pci_io;
157 1.1.2.23 matt struct rmixl_region rc_pci_link_mem[RMIXLP_SBC_NPCIE_MEM];
158 1.1.2.23 matt struct rmixl_region rc_pci_link_io[RMIXLP_SBC_NPCIE_IO];
159 1.1.2.21 matt struct rmixl_region rc_srio_mem;
160 1.1.2.23 matt struct rmixl_region rc_norflash[RMIXLP_NOR_NCS]; /* XLP */
161 1.1.2.21 matt struct mips_bus_space rc_obio_eb_memt; /* DEVIO -eb */
162 1.1.2.21 matt struct mips_bus_space rc_obio_el_memt; /* DEVIO -el */
163 1.1.2.21 matt struct mips_bus_space rc_iobus_memt; /* Peripherals IO Bus */
164 1.1.2.21 matt struct mips_bus_space rc_pci_cfg_memt; /* PCI CFG */
165 1.1.2.21 matt struct mips_bus_space rc_pci_ecfg_eb_memt; /* PCI ECFG */
166 1.1.2.21 matt struct mips_bus_space rc_pci_ecfg_el_memt; /* PCI ECFG */
167 1.1.2.21 matt struct mips_bus_space rc_pci_memt; /* PCI MEM */
168 1.1.2.21 matt struct mips_bus_space rc_pci_iot; /* PCI IO */
169 1.1.2.21 matt struct mips_bus_space rc_srio_memt; /* SRIO MEM */
170 1.1.2.21 matt struct mips_bus_dma_tag rc_dma_tag;
171 1.1.2.21 matt struct mips_pci_chipset rc_pci_chipset; /* pci_chipset_t */
172 1.1.2.21 matt bus_space_handle_t rc_pci_cfg_memh;
173 1.1.2.21 matt bus_space_handle_t rc_pci_ecfg_eb_memh;
174 1.1.2.21 matt bus_space_handle_t rc_pci_ecfg_el_memh;
175 1.1.2.21 matt bus_dma_tag_t rc_dmat64;
176 1.1.2.21 matt bus_dma_tag_t rc_dmat32;
177 1.1.2.21 matt bus_dma_tag_t rc_dmat29;
178 1.1.2.21 matt struct extent * rc_phys_ex; /* Note: MB units */
179 1.1.2.21 matt struct extent * rc_obio_eb_ex;
180 1.1.2.21 matt struct extent * rc_obio_el_ex;
181 1.1.2.21 matt struct extent * rc_iobus_ex;
182 1.1.2.21 matt struct extent * rc_pci_mem_ex;
183 1.1.2.21 matt struct extent * rc_pci_io_ex;
184 1.1.2.21 matt struct extent * rc_srio_mem_ex;
185 1.1.2.24 matt uint64_t rc_gpio_available;
186 1.1.2.21 matt rmixlfw_info_t rc_psb_info;
187 1.1.2.21 matt rmixlfw_psb_type_t rc_psb_type;
188 1.1.2.21 matt volatile struct rmixlfw_cpu_wakeup_info *
189 1.1.2.21 matt rc_cpu_wakeup_info;
190 1.1.2.21 matt const void * rc_cpu_wakeup_end;
191 1.1.2.25 matt const char * rc_cpuname;
192 1.1.2.25 matt int rc_mallocsafe;
193 1.1.2.25 matt rmixlp_variant_t rc_xlp_variant;
194 1.1.2.25 matt uint8_t rc_ncores;
195 1.1.2.1 cliff };
196 1.1.2.1 cliff
197 1.1.2.1 cliff extern struct rmixl_config rmixl_configuration;
198 1.1.2.1 cliff
199 1.1.2.25 matt void rmixl_flash_eb_bus_mem_init(bus_space_tag_t, void *);
200 1.1.2.25 matt void rmixl_flash_el_bus_mem_init(bus_space_tag_t, void *);
201 1.1.2.25 matt void rmixl_iobus_bus_mem_init(bus_space_tag_t, void *);
202 1.1.2.25 matt void rmixl_obio_eb_bus_mem_init(bus_space_tag_t, void *);
203 1.1.2.25 matt void rmixl_obio_el_bus_mem_init(bus_space_tag_t, void *);
204 1.1.2.25 matt void rmixl_pci_cfg_el_bus_mem_init(bus_space_tag_t, void *);
205 1.1.2.25 matt void rmixl_pci_cfg_eb_bus_mem_init(bus_space_tag_t, void *);
206 1.1.2.25 matt void rmixl_pci_ecfg_el_bus_mem_init(bus_space_tag_t, void *);
207 1.1.2.25 matt void rmixl_pci_ecfg_eb_bus_mem_init(bus_space_tag_t, void *);
208 1.1.2.25 matt void rmixl_pci_eb_bus_mem_init(bus_space_tag_t, void *);
209 1.1.2.25 matt void rmixl_pci_el_bus_mem_init(bus_space_tag_t, void *);
210 1.1.2.25 matt void rmixl_pci_bus_io_init(bus_space_tag_t, void *);
211 1.1.2.1 cliff
212 1.1.2.21 matt void rmixlp_pcie_pc_init(void);
213 1.1.2.21 matt
214 1.1.2.25 matt void rmixl_addr_error_init(void);
215 1.1.2.25 matt int rmixl_addr_error_check(void);
216 1.1.2.5 cliff
217 1.1.2.25 matt uint64_t rmixl_mfcr(u_int);
218 1.1.2.25 matt void rmixl_mtcr(uint64_t, u_int);
219 1.1.2.21 matt
220 1.1.2.25 matt void rmixl_eirr_ack(uint64_t, uint64_t, uint64_t);
221 1.1.2.5 cliff
222 1.1.2.25 matt void rmixl_fmn_init(void);
223 1.1.2.25 matt
224 1.1.2.25 matt void rmixl_init_early_cons(struct rmixl_config *, bool);
225 1.1.2.25 matt void rmixl_mach_xlp_init(struct rmixl_config *);
226 1.1.2.25 matt void rmixl_mach_xlsxlr_init(struct rmixl_config *);
227 1.1.2.25 matt void rmixl_mach_freq_init(struct rmixl_config *, bool, bool);
228 1.1.2.25 matt void rmixl_mach_init_parse_args(int, char **);
229 1.1.2.25 matt void rmixl_mach_init_common(struct rmixl_config *, vaddr_t, uint64_t,
230 1.1.2.25 matt bool, bool);
231 1.1.2.25 matt uint64_t rmixl_physaddr_init(void);
232 1.1.2.25 matt uint64_t rmixlfw_init(int64_t);
233 1.1.2.13 cliff
234 1.1.2.13 cliff /*
235 1.1.2.13 cliff * rmixl_cache_err_dis:
236 1.1.2.13 cliff * - disable Cache, Data ECC, Snoop Tag Parity, Tag Parity errors
237 1.1.2.13 cliff * - clear the cache error log
238 1.1.2.13 cliff * - return previous value from RMIXL_PCR_L1D_CONFIG0
239 1.1.2.13 cliff */
240 1.1.2.13 cliff static inline uint64_t
241 1.1.2.13 cliff rmixl_cache_err_dis(void)
242 1.1.2.13 cliff {
243 1.1.2.13 cliff uint64_t r;
244 1.1.2.13 cliff
245 1.1.2.13 cliff r = rmixl_mfcr(RMIXL_PCR_L1D_CONFIG0);
246 1.1.2.13 cliff rmixl_mtcr(RMIXL_PCR_L1D_CONFIG0, r & ~0x2e);
247 1.1.2.13 cliff rmixl_mtcr(RMIXL_PCR_L1D_CACHE_ERROR_LOG, 0);
248 1.1.2.13 cliff return r;
249 1.1.2.13 cliff }
250 1.1.2.13 cliff
251 1.1.2.13 cliff /*
252 1.1.2.13 cliff * rmixl_cache_err_restore:
253 1.1.2.13 cliff * - clear the cache error log, cache error overflow log,
254 1.1.2.13 cliff * and cache interrupt registers
255 1.1.2.13 cliff * - restore previous value to RMIXL_PCR_L1D_CONFIG0
256 1.1.2.13 cliff */
257 1.1.2.13 cliff static inline void
258 1.1.2.13 cliff rmixl_cache_err_restore(uint64_t r)
259 1.1.2.13 cliff {
260 1.1.2.13 cliff rmixl_mtcr(RMIXL_PCR_L1D_CACHE_ERROR_LOG, 0);
261 1.1.2.13 cliff rmixl_mtcr(RMIXL_PCR_L1D_CACHE_ERROR_OVF_LO, 0);
262 1.1.2.13 cliff rmixl_mtcr(RMIXL_PCR_L1D_CACHE_INTERRUPT, 0);
263 1.1.2.13 cliff rmixl_mtcr(RMIXL_PCR_L1D_CONFIG0, r);
264 1.1.2.13 cliff }
265 1.1.2.13 cliff
266 1.1.2.13 cliff static inline uint64_t
267 1.1.2.13 cliff rmixl_cache_err_check(void)
268 1.1.2.13 cliff {
269 1.1.2.13 cliff return rmixl_mfcr(RMIXL_PCR_L1D_CACHE_ERROR_LOG);
270 1.1.2.13 cliff }
271 1.1.2.13 cliff
272 1.1.2.13 cliff static inline int
273 1.1.2.13 cliff rmixl_probe_4(volatile uint32_t *va)
274 1.1.2.13 cliff {
275 1.1.2.13 cliff uint32_t tmp;
276 1.1.2.13 cliff uint32_t r;
277 1.1.2.13 cliff int err;
278 1.1.2.13 cliff int s;
279 1.1.2.13 cliff
280 1.1.2.13 cliff s = splhigh();
281 1.1.2.13 cliff r = rmixl_cache_err_dis();
282 1.1.2.13 cliff tmp = *va; /* probe */
283 1.1.2.13 cliff err = rmixl_cache_err_check();
284 1.1.2.13 cliff rmixl_cache_err_restore(r);
285 1.1.2.13 cliff splx(s);
286 1.1.2.13 cliff
287 1.1.2.13 cliff return (err == 0);
288 1.1.2.13 cliff }
289 1.1.2.13 cliff
290 1.1.2.21 matt static inline uint32_t
291 1.1.2.21 matt rmixlp_read_4(uint32_t tag, bus_size_t offset)
292 1.1.2.21 matt {
293 1.1.2.21 matt #if 0
294 1.1.2.21 matt const struct rmixl_config * const rcp = &rmixl_configuration;
295 1.1.2.21 matt
296 1.1.2.21 matt return bus_space_read_4(rcp->rc_pci_ecfg_memt, rcp->rc_pci_ecfg_memh,
297 1.1.2.21 matt offset);
298 1.1.2.21 matt #else
299 1.1.2.21 matt const paddr_t ecfg_addr = rmixl_configuration.rc_pci_ecfg.r_pbase
300 1.1.2.21 matt + tag + offset;
301 1.1.2.21 matt
302 1.1.2.21 matt return be32toh(*(volatile uint32_t *)MIPS_PHYS_TO_KSEG1(ecfg_addr));
303 1.1.2.21 matt #endif
304 1.1.2.21 matt }
305 1.1.2.21 matt
306 1.1.2.21 matt static inline uint64_t
307 1.1.2.21 matt rmixlp_read_8(uint32_t tag, bus_size_t offset)
308 1.1.2.21 matt {
309 1.1.2.21 matt #if 0
310 1.1.2.21 matt const struct rmixl_config * const rcp = &rmixl_configuration;
311 1.1.2.21 matt
312 1.1.2.21 matt return bus_space_read_8(rcp->rc_pci_ecfg_memt, rcp->rc_pci_ecfg_memh,
313 1.1.2.21 matt offset);
314 1.1.2.21 matt #else
315 1.1.2.21 matt const paddr_t ecfg_addr = rmixl_configuration.rc_pci_ecfg.r_pbase
316 1.1.2.21 matt + tag + offset;
317 1.1.2.21 matt
318 1.1.2.21 matt return be64toh(*(volatile uint64_t *)MIPS_PHYS_TO_KSEG1(ecfg_addr));
319 1.1.2.21 matt #endif
320 1.1.2.21 matt }
321 1.1.2.21 matt
322 1.1.2.21 matt static inline void
323 1.1.2.21 matt rmixlp_write_4(uint32_t tag, bus_size_t offset, uint32_t v)
324 1.1.2.21 matt {
325 1.1.2.21 matt const paddr_t ecfg_addr = rmixl_configuration.rc_pci_ecfg.r_pbase
326 1.1.2.21 matt + tag + offset;
327 1.1.2.21 matt
328 1.1.2.21 matt *(volatile uint32_t *)MIPS_PHYS_TO_KSEG1(ecfg_addr) = htobe32(v);
329 1.1.2.21 matt __asm __volatile("sync");
330 1.1.2.21 matt }
331 1.1.2.21 matt
332 1.1.2.21 matt static inline void
333 1.1.2.21 matt rmixlp_write_8(uint32_t tag, bus_size_t offset, uint64_t v)
334 1.1.2.21 matt {
335 1.1.2.21 matt const paddr_t ecfg_addr = rmixl_configuration.rc_pci_ecfg.r_pbase
336 1.1.2.21 matt + tag + offset;
337 1.1.2.21 matt
338 1.1.2.21 matt *(volatile uint64_t *)MIPS_PHYS_TO_KSEG1(ecfg_addr) = htobe64(v);
339 1.1.2.21 matt __asm __volatile("sync");
340 1.1.2.21 matt }
341 1.1.2.21 matt
342 1.1.2.21 matt static inline void
343 1.1.2.21 matt rmixl_physaddr_add(struct extent *ext, const char *name,
344 1.1.2.21 matt struct rmixl_region *rp, bus_addr_t xpbase, bus_size_t xsize)
345 1.1.2.21 matt {
346 1.1.2.21 matt rp->r_pbase = xpbase;
347 1.1.2.21 matt rp->r_size = xsize;
348 1.1.2.21 matt u_long base = xpbase >> 20;
349 1.1.2.21 matt u_long size = xsize >> 20;
350 1.1.2.21 matt if (extent_alloc_region(ext, base, size, EX_NOWAIT) != 0) {
351 1.1.2.21 matt panic("%s: %s: extent_alloc_region(%p, %#lx, %#lx, %#x) "
352 1.1.2.21 matt "failed", __func__, name, ext, base, size, EX_NOWAIT);
353 1.1.2.21 matt }
354 1.1.2.21 matt }
355 1.1.2.21 matt
356 1.1.2.6 cliff #endif /* _MIPS_RMI_RMIXLVAR_H_ */
357