rmixl_pcie.c revision 1.9 1 1.9 para /* $NetBSD: rmixl_pcie.c,v 1.9 2012/01/27 18:52:58 para Exp $ */
2 1.2 matt
3 1.2 matt /*
4 1.2 matt * Copyright (c) 2001 Wasabi Systems, Inc.
5 1.2 matt * All rights reserved.
6 1.2 matt *
7 1.2 matt * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 1.2 matt *
9 1.2 matt * Redistribution and use in source and binary forms, with or without
10 1.2 matt * modification, are permitted provided that the following conditions
11 1.2 matt * are met:
12 1.2 matt * 1. Redistributions of source code must retain the above copyright
13 1.2 matt * notice, this list of conditions and the following disclaimer.
14 1.2 matt * 2. Redistributions in binary form must reproduce the above copyright
15 1.2 matt * notice, this list of conditions and the following disclaimer in the
16 1.2 matt * documentation and/or other materials provided with the distribution.
17 1.2 matt * 3. All advertising materials mentioning features or use of this software
18 1.2 matt * must display the following acknowledgement:
19 1.2 matt * This product includes software developed for the NetBSD Project by
20 1.2 matt * Wasabi Systems, Inc.
21 1.2 matt * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.2 matt * or promote products derived from this software without specific prior
23 1.2 matt * written permission.
24 1.2 matt *
25 1.2 matt * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.2 matt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.2 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.2 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.2 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.2 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.2 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.2 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.2 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.2 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.2 matt * POSSIBILITY OF SUCH DAMAGE.
36 1.2 matt */
37 1.2 matt
38 1.2 matt /*
39 1.2 matt * PCI configuration support for RMI XLS SoC
40 1.2 matt */
41 1.2 matt
42 1.2 matt #include <sys/cdefs.h>
43 1.9 para __KERNEL_RCSID(0, "$NetBSD: rmixl_pcie.c,v 1.9 2012/01/27 18:52:58 para Exp $");
44 1.2 matt
45 1.2 matt #include "opt_pci.h"
46 1.2 matt #include "pci.h"
47 1.2 matt
48 1.2 matt #include <sys/cdefs.h>
49 1.2 matt
50 1.2 matt #include <sys/param.h>
51 1.8 matt #include <sys/bus.h>
52 1.8 matt #include <sys/cpu.h>
53 1.2 matt #include <sys/device.h>
54 1.2 matt #include <sys/extent.h>
55 1.8 matt #include <sys/intr.h>
56 1.8 matt #include <sys/kernel.h> /* for 'hz' */
57 1.2 matt #include <sys/malloc.h>
58 1.8 matt #include <sys/systm.h>
59 1.2 matt
60 1.2 matt #include <uvm/uvm_extern.h>
61 1.2 matt
62 1.2 matt #include <mips/rmi/rmixlreg.h>
63 1.2 matt #include <mips/rmi/rmixlvar.h>
64 1.3 matt #include <mips/rmi/rmixl_intr.h>
65 1.2 matt #include <mips/rmi/rmixl_pcievar.h>
66 1.2 matt
67 1.2 matt #include <mips/rmi/rmixl_obiovar.h>
68 1.2 matt
69 1.2 matt #include <dev/pci/pcivar.h>
70 1.2 matt #include <dev/pci/pcidevs.h>
71 1.2 matt #include <dev/pci/pciconf.h>
72 1.2 matt
73 1.2 matt #ifdef PCI_NETBSD_CONFIGURE
74 1.2 matt #include <mips/cache.h>
75 1.2 matt #endif
76 1.2 matt
77 1.2 matt #ifdef PCI_DEBUG
78 1.2 matt int rmixl_pcie_debug = PCI_DEBUG;
79 1.2 matt # define DPRINTF(x) do { if (rmixl_pcie_debug) printf x ; } while (0)
80 1.2 matt #else
81 1.2 matt # define DPRINTF(x)
82 1.2 matt #endif
83 1.2 matt
84 1.2 matt #ifndef DDB
85 1.2 matt # define STATIC static
86 1.2 matt #else
87 1.2 matt # define STATIC
88 1.2 matt #endif
89 1.2 matt
90 1.2 matt
91 1.2 matt /*
92 1.2 matt * XLS PCIe Extended Configuration Registers
93 1.2 matt */
94 1.2 matt #define RMIXL_PCIE_ECFG_UESR 0x104 /* Uncorrectable Error Status Reg */
95 1.2 matt #define RMIXL_PCIE_ECFG_UEMR 0x108 /* Uncorrectable Error Mask Reg */
96 1.2 matt #define RMIXL_PCIE_ECFG_UEVR 0x10c /* Uncorrectable Error seVerity Reg */
97 1.2 matt #define PCIE_ECFG_UEVR_DFLT \
98 1.2 matt (__BITS(18,17) | __BIT(31) | __BITS(5,4) | __BIT(0))
99 1.2 matt #define PCIE_ECFG_UExR_RESV (__BITS(31,21) | __BITS(11,6) | __BITS(3,1))
100 1.2 matt #define RMIXL_PCIE_ECFG_CESR 0x110 /* Correctable Error Status Reg */
101 1.2 matt #define RMIXL_PCIE_ECFG_CEMR 0x114 /* Correctable Error Mask Reg */
102 1.2 matt #define PCIE_ECFG_CExR_RESV (__BITS(31,14) | __BITS(11,9) | __BITS(5,1))
103 1.2 matt #define RMIXL_PCIE_ECFG_ACCR 0x118 /* Adv. Capabilities Control Reg */
104 1.2 matt #define RMIXL_PCIE_ECFG_HLRn(n) (0x11c + ((n) * 4)) /* Header Log Regs */
105 1.2 matt #define RMIXL_PCIE_ECFG_RECR 0x12c /* Root Error Command Reg */
106 1.2 matt #define PCIE_ECFG_RECR_RESV __BITS(31,3)
107 1.2 matt #define RMIXL_PCIE_ECFG_RESR 0x130 /* Root Error Status Reg */
108 1.2 matt #define PCIE_ECFG_RESR_RESV __BITS(26,7)
109 1.2 matt #define RMIXL_PCIE_ECFG_ESI 0x134 /* Error Source Identification Reg */
110 1.2 matt #define RMIXL_PCIE_ECFG_DSNCR 0x140 /* Dev Serial Number Capability Regs */
111 1.2 matt
112 1.2 matt static const struct {
113 1.2 matt u_int offset;
114 1.2 matt u_int32_t rw1c;
115 1.2 matt } pcie_ecfg_errs_tab[] = {
116 1.2 matt { RMIXL_PCIE_ECFG_UESR, (__BITS(20,12) | __BIT(4)) },
117 1.2 matt { RMIXL_PCIE_ECFG_CESR, (__BITS(20,12) | __BIT(4)) },
118 1.2 matt { RMIXL_PCIE_ECFG_HLRn(0), 0 },
119 1.2 matt { RMIXL_PCIE_ECFG_HLRn(1), 0 },
120 1.2 matt { RMIXL_PCIE_ECFG_HLRn(2), 0 },
121 1.2 matt { RMIXL_PCIE_ECFG_HLRn(3), 0 },
122 1.2 matt { RMIXL_PCIE_ECFG_RESR, __BITS(6,0) },
123 1.2 matt { RMIXL_PCIE_ECFG_ESI, 0 },
124 1.2 matt };
125 1.2 matt #define PCIE_ECFG_ERRS_OFFTAB_NENTRIES \
126 1.2 matt (sizeof(pcie_ecfg_errs_tab)/sizeof(pcie_ecfg_errs_tab[0]))
127 1.2 matt
128 1.3 matt typedef struct rmixl_pcie_int_csr {
129 1.3 matt uint r0;
130 1.3 matt uint r1;
131 1.3 matt } rmixl_pcie_int_csr_t;
132 1.3 matt
133 1.3 matt static const rmixl_pcie_int_csr_t int_enb_offset[4] = {
134 1.3 matt { RMIXL_PCIE_LINK0_INT_ENABLE0, RMIXL_PCIE_LINK0_INT_ENABLE1 },
135 1.3 matt { RMIXL_PCIE_LINK1_INT_ENABLE0, RMIXL_PCIE_LINK1_INT_ENABLE1 },
136 1.3 matt { RMIXL_PCIE_LINK2_INT_ENABLE0, RMIXL_PCIE_LINK2_INT_ENABLE1 },
137 1.3 matt { RMIXL_PCIE_LINK3_INT_ENABLE0, RMIXL_PCIE_LINK3_INT_ENABLE1 },
138 1.3 matt };
139 1.3 matt
140 1.3 matt static const rmixl_pcie_int_csr_t int_sts_offset[4] = {
141 1.3 matt { RMIXL_PCIE_LINK0_INT_STATUS0, RMIXL_PCIE_LINK0_INT_STATUS1 },
142 1.3 matt { RMIXL_PCIE_LINK1_INT_STATUS0, RMIXL_PCIE_LINK1_INT_STATUS1 },
143 1.3 matt { RMIXL_PCIE_LINK2_INT_STATUS0, RMIXL_PCIE_LINK2_INT_STATUS1 },
144 1.3 matt { RMIXL_PCIE_LINK3_INT_STATUS0, RMIXL_PCIE_LINK3_INT_STATUS1 },
145 1.3 matt };
146 1.3 matt
147 1.3 matt static const u_int msi_enb_offset[4] = {
148 1.3 matt RMIXL_PCIE_LINK0_MSI_ENABLE,
149 1.3 matt RMIXL_PCIE_LINK1_MSI_ENABLE,
150 1.3 matt RMIXL_PCIE_LINK2_MSI_ENABLE,
151 1.3 matt RMIXL_PCIE_LINK3_MSI_ENABLE
152 1.3 matt };
153 1.3 matt
154 1.3 matt #define RMIXL_PCIE_LINK_STATUS0_ERRORS __BITS(6,4)
155 1.3 matt #define RMIXL_PCIE_LINK_STATUS1_ERRORS __BITS(10,0)
156 1.3 matt #define RMIXL_PCIE_LINK_STATUS_ERRORS \
157 1.3 matt ((((uint64_t)RMIXL_PCIE_LINK_STATUS1_ERRORS) << 32) | \
158 1.3 matt (uint64_t)RMIXL_PCIE_LINK_STATUS0_ERRORS)
159 1.3 matt
160 1.3 matt #define RMIXL_PCIE_EVCNT(sc, link, bitno, cpu) \
161 1.3 matt &(sc)->sc_evcnts[link][(bitno) * (ncpu) + (cpu)]
162 1.3 matt
163 1.2 matt static int rmixl_pcie_match(device_t, cfdata_t, void *);
164 1.2 matt static void rmixl_pcie_attach(device_t, device_t, void *);
165 1.2 matt static void rmixl_pcie_init(struct rmixl_pcie_softc *);
166 1.2 matt static void rmixl_pcie_init_ecfg(struct rmixl_pcie_softc *);
167 1.2 matt static void rmixl_pcie_attach_hook(struct device *, struct device *,
168 1.2 matt struct pcibus_attach_args *);
169 1.2 matt static void rmixl_pcie_lnkcfg_4xx(rmixl_pcie_lnktab_t *, uint32_t);
170 1.2 matt static void rmixl_pcie_lnkcfg_408Lite(rmixl_pcie_lnktab_t *, uint32_t);
171 1.2 matt static void rmixl_pcie_lnkcfg_2xx(rmixl_pcie_lnktab_t *, uint32_t);
172 1.2 matt static void rmixl_pcie_lnkcfg_1xx(rmixl_pcie_lnktab_t *, uint32_t);
173 1.2 matt static void rmixl_pcie_lnkcfg(struct rmixl_pcie_softc *);
174 1.3 matt static void rmixl_pcie_intcfg(struct rmixl_pcie_softc *);
175 1.2 matt static void rmixl_pcie_errata(struct rmixl_pcie_softc *);
176 1.2 matt static void rmixl_conf_interrupt(void *, int, int, int, int, int *);
177 1.2 matt static int rmixl_pcie_bus_maxdevs(void *, int);
178 1.2 matt static pcitag_t rmixl_tag_to_ecfg(pcitag_t);
179 1.2 matt static pcitag_t rmixl_pcie_make_tag(void *, int, int, int);
180 1.2 matt static void rmixl_pcie_decompose_tag(void *, pcitag_t, int *, int *, int *);
181 1.2 matt void rmixl_pcie_tag_print(const char *restrict, void *, pcitag_t, int, vaddr_t, u_long);
182 1.2 matt static int rmixl_pcie_conf_setup(struct rmixl_pcie_softc *,
183 1.2 matt pcitag_t, int *, bus_space_tag_t *,
184 1.2 matt bus_space_handle_t *);
185 1.2 matt static pcireg_t rmixl_pcie_conf_read(void *, pcitag_t, int);
186 1.2 matt static void rmixl_pcie_conf_write(void *, pcitag_t, int, pcireg_t);
187 1.2 matt
188 1.4 dyoung static int rmixl_pcie_intr_map(const struct pci_attach_args *,
189 1.2 matt pci_intr_handle_t *);
190 1.2 matt static const char *
191 1.2 matt rmixl_pcie_intr_string(void *, pci_intr_handle_t);
192 1.2 matt static const struct evcnt *
193 1.2 matt rmixl_pcie_intr_evcnt(void *, pci_intr_handle_t);
194 1.3 matt static pci_intr_handle_t
195 1.3 matt rmixl_pcie_make_pih(u_int, u_int, u_int);
196 1.3 matt static void rmixl_pcie_decompose_pih(pci_intr_handle_t, u_int *, u_int *, u_int *);
197 1.3 matt static void rmixl_pcie_intr_disestablish(void *, void *);
198 1.2 matt static void *rmixl_pcie_intr_establish(void *, pci_intr_handle_t,
199 1.2 matt int, int (*)(void *), void *);
200 1.3 matt static rmixl_pcie_link_intr_t *
201 1.3 matt rmixl_pcie_lip_add_1(rmixl_pcie_softc_t *, u_int, int, int);
202 1.3 matt static void rmixl_pcie_lip_free_callout(rmixl_pcie_link_intr_t *);
203 1.3 matt static void rmixl_pcie_lip_free(void *);
204 1.3 matt static int rmixl_pcie_intr(void *);
205 1.3 matt static void rmixl_pcie_link_error_intr(u_int, uint32_t, uint32_t);
206 1.2 matt #if defined(DEBUG) || defined(DDB)
207 1.2 matt int rmixl_pcie_error_check(void);
208 1.2 matt #endif
209 1.2 matt static int _rmixl_pcie_error_check(void *);
210 1.2 matt static int rmixl_pcie_error_intr(void *);
211 1.2 matt
212 1.2 matt
213 1.2 matt #define RMIXL_PCIE_CONCAT3(a,b,c) a ## b ## c
214 1.2 matt #define RMIXL_PCIE_BAR_INIT(reg, bar, size, align) { \
215 1.2 matt struct extent *ext = rmixl_configuration.rc_phys_ex; \
216 1.2 matt u_long region_start; \
217 1.2 matt uint64_t ba; \
218 1.2 matt int err; \
219 1.2 matt \
220 1.2 matt err = extent_alloc(ext, (size), (align), 0UL, EX_NOWAIT, \
221 1.2 matt ®ion_start); \
222 1.2 matt if (err != 0) \
223 1.2 matt panic("%s: extent_alloc(%p, %#lx, %#lx, %#lx, %#x, %p)",\
224 1.2 matt __func__, ext, size, align, 0UL, EX_NOWAIT, \
225 1.2 matt ®ion_start); \
226 1.2 matt ba = (uint64_t)region_start; \
227 1.2 matt ba *= (1024 * 1024); \
228 1.2 matt bar = RMIXL_PCIE_CONCAT3(RMIXL_PCIE_,reg,_BAR)(ba, 1); \
229 1.2 matt DPRINTF(("PCIE %s BAR was not enabled by firmware\n" \
230 1.2 matt "enabling %s at phys %#" PRIxBUSADDR ", size %lu MB\n", \
231 1.2 matt __STRING(reg), __STRING(reg), ba, size)); \
232 1.2 matt RMIXL_IOREG_WRITE(RMIXL_IO_DEV_BRIDGE + \
233 1.3 matt RMIXL_PCIE_CONCAT3(RMIXLS_SBC_PCIE_,reg,_BAR), bar); \
234 1.2 matt bar = RMIXL_IOREG_READ(RMIXL_IO_DEV_BRIDGE + \
235 1.3 matt RMIXL_PCIE_CONCAT3(RMIXLS_SBC_PCIE_,reg,_BAR)); \
236 1.2 matt DPRINTF(("%s: %s BAR %#x\n", __func__, __STRING(reg), bar)); \
237 1.2 matt }
238 1.2 matt
239 1.2 matt
240 1.2 matt #if defined(DEBUG) || defined(DDB)
241 1.2 matt static void *rmixl_pcie_v;
242 1.2 matt #endif
243 1.2 matt
244 1.2 matt CFATTACH_DECL_NEW(rmixl_pcie, sizeof(struct rmixl_pcie_softc),
245 1.2 matt rmixl_pcie_match, rmixl_pcie_attach, NULL, NULL);
246 1.2 matt
247 1.2 matt static int rmixl_pcie_found;
248 1.2 matt
249 1.2 matt static int
250 1.2 matt rmixl_pcie_match(device_t parent, cfdata_t cf, void *aux)
251 1.2 matt {
252 1.2 matt uint32_t r;
253 1.2 matt
254 1.3 matt /*
255 1.3 matt * PCIe interface exists on XLS chips only
256 1.3 matt */
257 1.3 matt if (! cpu_rmixls(mips_options.mips_cpu))
258 1.3 matt return 0;
259 1.3 matt
260 1.2 matt /* XXX
261 1.2 matt * for now there is only one PCIe Interface on chip
262 1.2 matt * this could change with furture RMI XL family designs
263 1.2 matt */
264 1.2 matt if (rmixl_pcie_found)
265 1.2 matt return 0;
266 1.2 matt
267 1.2 matt /* read GPIO Reset Configuration register */
268 1.2 matt r = RMIXL_IOREG_READ(RMIXL_IO_DEV_GPIO + RMIXL_GPIO_RESET_CFG);
269 1.2 matt r >>= 26;
270 1.2 matt r &= 3;
271 1.2 matt if (r != 0)
272 1.2 matt return 0; /* strapped for SRIO */
273 1.2 matt
274 1.2 matt return 1;
275 1.2 matt }
276 1.2 matt
277 1.2 matt static void
278 1.2 matt rmixl_pcie_attach(device_t parent, device_t self, void *aux)
279 1.2 matt {
280 1.2 matt struct rmixl_pcie_softc *sc = device_private(self);
281 1.2 matt struct obio_attach_args *obio = aux;
282 1.2 matt struct rmixl_config *rcp = &rmixl_configuration;
283 1.2 matt struct pcibus_attach_args pba;
284 1.2 matt uint32_t bar;
285 1.2 matt
286 1.2 matt rmixl_pcie_found = 1;
287 1.2 matt sc->sc_dev = self;
288 1.2 matt
289 1.2 matt aprint_normal(" RMI XLS PCIe Interface\n");
290 1.2 matt
291 1.3 matt mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_HIGH);
292 1.3 matt
293 1.2 matt rmixl_pcie_lnkcfg(sc);
294 1.2 matt
295 1.3 matt rmixl_pcie_intcfg(sc);
296 1.3 matt
297 1.2 matt rmixl_pcie_errata(sc);
298 1.2 matt
299 1.2 matt sc->sc_29bit_dmat = obio->obio_29bit_dmat;
300 1.2 matt sc->sc_32bit_dmat = obio->obio_32bit_dmat;
301 1.2 matt sc->sc_64bit_dmat = obio->obio_64bit_dmat;
302 1.2 matt
303 1.3 matt sc->sc_tmsk = obio->obio_tmsk;
304 1.3 matt
305 1.2 matt /*
306 1.2 matt * get PCI config space base addr from SBC PCIe CFG BAR
307 1.2 matt * initialize it if necessary
308 1.2 matt */
309 1.3 matt bar = RMIXL_IOREG_READ(RMIXL_IO_DEV_BRIDGE + RMIXLS_SBC_PCIE_CFG_BAR);
310 1.2 matt DPRINTF(("%s: PCIE_CFG_BAR %#x\n", __func__, bar));
311 1.2 matt if ((bar & RMIXL_PCIE_CFG_BAR_ENB) == 0) {
312 1.2 matt u_long n = RMIXL_PCIE_CFG_SIZE / (1024 * 1024);
313 1.2 matt RMIXL_PCIE_BAR_INIT(CFG, bar, n, n);
314 1.2 matt }
315 1.3 matt rcp->rc_pci_cfg_pbase = (bus_addr_t)RMIXL_PCIE_CFG_BAR_TO_BA(bar);
316 1.3 matt rcp->rc_pci_cfg_size = (bus_size_t)RMIXL_PCIE_CFG_SIZE;
317 1.2 matt
318 1.2 matt /*
319 1.2 matt * get PCIE Extended config space base addr from SBC PCIe ECFG BAR
320 1.2 matt * initialize it if necessary
321 1.2 matt */
322 1.3 matt bar = RMIXL_IOREG_READ(RMIXL_IO_DEV_BRIDGE + RMIXLS_SBC_PCIE_ECFG_BAR);
323 1.2 matt DPRINTF(("%s: PCIE_ECFG_BAR %#x\n", __func__, bar));
324 1.2 matt if ((bar & RMIXL_PCIE_ECFG_BAR_ENB) == 0) {
325 1.2 matt u_long n = RMIXL_PCIE_ECFG_SIZE / (1024 * 1024);
326 1.2 matt RMIXL_PCIE_BAR_INIT(ECFG, bar, n, n);
327 1.2 matt }
328 1.3 matt rcp->rc_pci_ecfg_pbase = (bus_addr_t)RMIXL_PCIE_ECFG_BAR_TO_BA(bar);
329 1.3 matt rcp->rc_pci_ecfg_size = (bus_size_t)RMIXL_PCIE_ECFG_SIZE;
330 1.2 matt
331 1.2 matt /*
332 1.2 matt * get PCI MEM space base [addr, size] from SBC PCIe MEM BAR
333 1.2 matt * initialize it if necessary
334 1.2 matt */
335 1.3 matt bar = RMIXL_IOREG_READ(RMIXL_IO_DEV_BRIDGE + RMIXLS_SBC_PCIE_MEM_BAR);
336 1.2 matt DPRINTF(("%s: PCIE_MEM_BAR %#x\n", __func__, bar));
337 1.2 matt if ((bar & RMIXL_PCIE_MEM_BAR_ENB) == 0) {
338 1.2 matt u_long n = 256; /* 256 MB */
339 1.2 matt RMIXL_PCIE_BAR_INIT(MEM, bar, n, n);
340 1.2 matt }
341 1.2 matt rcp->rc_pci_mem_pbase = (bus_addr_t)RMIXL_PCIE_MEM_BAR_TO_BA(bar);
342 1.2 matt rcp->rc_pci_mem_size = (bus_size_t)RMIXL_PCIE_MEM_BAR_TO_SIZE(bar);
343 1.2 matt
344 1.2 matt /*
345 1.2 matt * get PCI IO space base [addr, size] from SBC PCIe IO BAR
346 1.2 matt * initialize it if necessary
347 1.2 matt */
348 1.3 matt bar = RMIXL_IOREG_READ(RMIXL_IO_DEV_BRIDGE + RMIXLS_SBC_PCIE_IO_BAR);
349 1.2 matt DPRINTF(("%s: PCIE_IO_BAR %#x\n", __func__, bar));
350 1.2 matt if ((bar & RMIXL_PCIE_IO_BAR_ENB) == 0) {
351 1.2 matt u_long n = 32; /* 32 MB */
352 1.2 matt RMIXL_PCIE_BAR_INIT(IO, bar, n, n);
353 1.2 matt }
354 1.2 matt rcp->rc_pci_io_pbase = (bus_addr_t)RMIXL_PCIE_IO_BAR_TO_BA(bar);
355 1.2 matt rcp->rc_pci_io_size = (bus_size_t)RMIXL_PCIE_IO_BAR_TO_SIZE(bar);
356 1.2 matt
357 1.2 matt /*
358 1.2 matt * initialize the PCI CFG, ECFG bus space tags
359 1.2 matt */
360 1.3 matt rmixl_pci_cfg_bus_mem_init(&rcp->rc_pci_cfg_memt, rcp);
361 1.3 matt sc->sc_pci_cfg_memt = &rcp->rc_pci_cfg_memt;
362 1.2 matt
363 1.3 matt rmixl_pci_ecfg_bus_mem_init(&rcp->rc_pci_ecfg_memt, rcp);
364 1.3 matt sc->sc_pci_ecfg_memt = &rcp->rc_pci_ecfg_memt;
365 1.2 matt
366 1.2 matt /*
367 1.2 matt * initialize the PCI MEM and IO bus space tags
368 1.2 matt */
369 1.3 matt rmixl_pci_bus_mem_init(&rcp->rc_pci_memt, rcp);
370 1.3 matt rmixl_pci_bus_io_init(&rcp->rc_pci_iot, rcp);
371 1.2 matt
372 1.2 matt /*
373 1.2 matt * initialize the extended configuration regs
374 1.2 matt */
375 1.2 matt rmixl_pcie_init_ecfg(sc);
376 1.2 matt
377 1.2 matt /*
378 1.2 matt * initialize the PCI chipset tag
379 1.2 matt */
380 1.2 matt rmixl_pcie_init(sc);
381 1.2 matt
382 1.2 matt /*
383 1.2 matt * attach the PCI bus
384 1.2 matt */
385 1.2 matt memset(&pba, 0, sizeof(pba));
386 1.2 matt pba.pba_memt = &rcp->rc_pci_memt;
387 1.2 matt pba.pba_iot = &rcp->rc_pci_iot;
388 1.3 matt pba.pba_dmat = sc->sc_32bit_dmat;
389 1.3 matt pba.pba_dmat64 = sc->sc_64bit_dmat;
390 1.2 matt pba.pba_pc = &sc->sc_pci_chipset;
391 1.2 matt pba.pba_bus = 0;
392 1.2 matt pba.pba_bridgetag = NULL;
393 1.2 matt pba.pba_intrswiz = 0;
394 1.2 matt pba.pba_intrtag = 0;
395 1.6 dyoung pba.pba_flags = PCI_FLAGS_IO_OKAY | PCI_FLAGS_MEM_OKAY |
396 1.2 matt PCI_FLAGS_MRL_OKAY | PCI_FLAGS_MRM_OKAY | PCI_FLAGS_MWI_OKAY;
397 1.2 matt
398 1.2 matt (void) config_found_ia(self, "pcibus", &pba, pcibusprint);
399 1.2 matt }
400 1.2 matt
401 1.2 matt /*
402 1.2 matt * rmixl_pcie_lnkcfg_4xx - link configs for XLS4xx and XLS6xx
403 1.2 matt * use IO_AD[11] and IO_AD[10], observable in
404 1.2 matt * Bits[21:20] of the GPIO Reset Configuration register
405 1.2 matt */
406 1.2 matt static void
407 1.2 matt rmixl_pcie_lnkcfg_4xx(rmixl_pcie_lnktab_t *ltp, uint32_t grcr)
408 1.2 matt {
409 1.2 matt u_int index;
410 1.2 matt static const rmixl_pcie_lnkcfg_t lnktab_4xx[4][4] = {
411 1.2 matt {{ LCFG_EP, 4}, {LCFG_NO, 0}, {LCFG_NO, 0}, {LCFG_NO, 0}},
412 1.2 matt {{ LCFG_RC, 4}, {LCFG_NO, 0}, {LCFG_NO, 0}, {LCFG_NO, 0}},
413 1.2 matt {{ LCFG_EP, 1}, {LCFG_RC, 1}, {LCFG_RC, 1}, {LCFG_RC, 1}},
414 1.2 matt {{ LCFG_RC, 1}, {LCFG_RC, 1}, {LCFG_RC, 1}, {LCFG_RC, 1}},
415 1.2 matt };
416 1.2 matt static const char *lnkstr_4xx[4] = {
417 1.3 matt "1EPx4",
418 1.3 matt "1RCx4",
419 1.3 matt "1EPx1, 3RCx1",
420 1.3 matt "4RCx1"
421 1.2 matt };
422 1.2 matt index = (grcr >> 20) & 3;
423 1.2 matt ltp->ncfgs = 4;
424 1.2 matt ltp->cfg = lnktab_4xx[index];
425 1.2 matt ltp->str = lnkstr_4xx[index];
426 1.2 matt }
427 1.2 matt
428 1.2 matt /*
429 1.2 matt * rmixl_pcie_lnkcfg_408Lite - link configs for XLS408Lite and XLS04A
430 1.2 matt * use IO_AD[11] and IO_AD[10], observable in
431 1.2 matt * Bits[21:20] of the GPIO Reset Configuration register
432 1.2 matt */
433 1.2 matt static void
434 1.2 matt rmixl_pcie_lnkcfg_408Lite(rmixl_pcie_lnktab_t *ltp, uint32_t grcr)
435 1.2 matt {
436 1.2 matt u_int index;
437 1.2 matt static const rmixl_pcie_lnkcfg_t lnktab_408Lite[4][2] = {
438 1.2 matt {{ LCFG_EP, 4}, {LCFG_NO, 0}},
439 1.2 matt {{ LCFG_RC, 4}, {LCFG_NO, 0}},
440 1.2 matt {{ LCFG_EP, 1}, {LCFG_RC, 1}},
441 1.2 matt {{ LCFG_RC, 1}, {LCFG_RC, 1}},
442 1.2 matt };
443 1.2 matt static const char *lnkstr_408Lite[4] = {
444 1.3 matt "4EPx4",
445 1.3 matt "1RCx4",
446 1.3 matt "1EPx1, 1RCx1",
447 1.3 matt "2RCx1"
448 1.2 matt };
449 1.2 matt
450 1.2 matt index = (grcr >> 20) & 3;
451 1.2 matt ltp->ncfgs = 2;
452 1.2 matt ltp->cfg = lnktab_408Lite[index];
453 1.2 matt ltp->str = lnkstr_408Lite[index];
454 1.2 matt }
455 1.2 matt
456 1.2 matt /*
457 1.2 matt * rmixl_pcie_lnkcfg_2xx - link configs for XLS2xx
458 1.2 matt * use IO_AD[10], observable in Bit[20] of the
459 1.2 matt * GPIO Reset Configuration register
460 1.2 matt */
461 1.2 matt static void
462 1.2 matt rmixl_pcie_lnkcfg_2xx(rmixl_pcie_lnktab_t *ltp, uint32_t grcr)
463 1.2 matt {
464 1.2 matt u_int index;
465 1.2 matt static const rmixl_pcie_lnkcfg_t lnktab_2xx[2][4] = {
466 1.2 matt {{ LCFG_EP, 1}, {LCFG_RC, 1}, {LCFG_RC, 1}, {LCFG_RC, 1}},
467 1.2 matt {{ LCFG_RC, 1}, {LCFG_RC, 1}, {LCFG_RC, 1}, {LCFG_RC, 1}}
468 1.2 matt };
469 1.2 matt static const char *lnkstr_2xx[2] = {
470 1.3 matt "1EPx1, 3RCx1",
471 1.3 matt "4RCx1",
472 1.2 matt };
473 1.2 matt
474 1.2 matt index = (grcr >> 20) & 1;
475 1.2 matt ltp->ncfgs = 4;
476 1.2 matt ltp->cfg = lnktab_2xx[index];
477 1.2 matt ltp->str = lnkstr_2xx[index];
478 1.2 matt }
479 1.2 matt
480 1.2 matt /*
481 1.2 matt * rmixl_pcie_lnkcfg_1xx - link configs for XLS1xx
482 1.2 matt * use IO_AD[10], observable in Bit[20] of the
483 1.2 matt * GPIO Reset Configuration register
484 1.2 matt */
485 1.2 matt static void
486 1.2 matt rmixl_pcie_lnkcfg_1xx(rmixl_pcie_lnktab_t *ltp, uint32_t grcr)
487 1.2 matt {
488 1.2 matt u_int index;
489 1.2 matt static const rmixl_pcie_lnkcfg_t lnktab_1xx[2][2] = {
490 1.2 matt {{ LCFG_EP, 1}, {LCFG_RC, 1}},
491 1.2 matt {{ LCFG_RC, 1}, {LCFG_RC, 1}}
492 1.2 matt };
493 1.2 matt static const char *lnkstr_1xx[2] = {
494 1.3 matt "1EPx1, 1RCx1",
495 1.3 matt "2RCx1",
496 1.2 matt };
497 1.2 matt
498 1.2 matt index = (grcr >> 20) & 1;
499 1.2 matt ltp->ncfgs = 2;
500 1.2 matt ltp->cfg = lnktab_1xx[index];
501 1.2 matt ltp->str = lnkstr_1xx[index];
502 1.2 matt }
503 1.2 matt
504 1.2 matt /*
505 1.2 matt * rmixl_pcie_lnkcfg - determine PCI Express Link Configuration
506 1.2 matt */
507 1.2 matt static void
508 1.2 matt rmixl_pcie_lnkcfg(struct rmixl_pcie_softc *sc)
509 1.2 matt {
510 1.2 matt uint32_t r;
511 1.2 matt
512 1.2 matt /* read GPIO Reset Configuration register */
513 1.2 matt r = RMIXL_IOREG_READ(RMIXL_IO_DEV_GPIO + RMIXL_GPIO_RESET_CFG);
514 1.2 matt DPRINTF(("%s: GPIO RCR %#x\n", __func__, r));
515 1.2 matt
516 1.3 matt switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
517 1.2 matt case MIPS_XLS104:
518 1.2 matt case MIPS_XLS108:
519 1.2 matt rmixl_pcie_lnkcfg_1xx(&sc->sc_pcie_lnktab, r);
520 1.2 matt break;
521 1.2 matt case MIPS_XLS204:
522 1.2 matt case MIPS_XLS208:
523 1.2 matt rmixl_pcie_lnkcfg_2xx(&sc->sc_pcie_lnktab, r);
524 1.2 matt break;
525 1.2 matt case MIPS_XLS404LITE:
526 1.2 matt case MIPS_XLS408LITE:
527 1.2 matt rmixl_pcie_lnkcfg_408Lite(&sc->sc_pcie_lnktab, r);
528 1.2 matt break;
529 1.2 matt case MIPS_XLS404:
530 1.2 matt case MIPS_XLS408:
531 1.2 matt case MIPS_XLS416:
532 1.2 matt case MIPS_XLS608:
533 1.2 matt case MIPS_XLS616:
534 1.2 matt /* 6xx uses same table as 4xx */
535 1.2 matt rmixl_pcie_lnkcfg_4xx(&sc->sc_pcie_lnktab, r);
536 1.2 matt break;
537 1.2 matt default:
538 1.2 matt panic("%s: unknown RMI PRID IMPL", __func__);
539 1.2 matt }
540 1.2 matt
541 1.2 matt aprint_normal("%s: link config %s\n",
542 1.2 matt device_xname(sc->sc_dev), sc->sc_pcie_lnktab.str);
543 1.2 matt }
544 1.2 matt
545 1.3 matt /*
546 1.3 matt * rmixl_pcie_intcfg - init PCIe Link interrupt enables
547 1.3 matt */
548 1.3 matt static void
549 1.3 matt rmixl_pcie_intcfg(struct rmixl_pcie_softc *sc)
550 1.3 matt {
551 1.3 matt int link;
552 1.3 matt size_t size;
553 1.3 matt rmixl_pcie_evcnt_t *ev;
554 1.3 matt
555 1.3 matt DPRINTF(("%s: disable all link interrupts\n", __func__));
556 1.3 matt for (link=0; link < sc->sc_pcie_lnktab.ncfgs; link++) {
557 1.3 matt RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PCIE_LE + int_enb_offset[link].r0,
558 1.3 matt RMIXL_PCIE_LINK_STATUS0_ERRORS);
559 1.3 matt RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PCIE_LE + int_enb_offset[link].r1,
560 1.3 matt RMIXL_PCIE_LINK_STATUS1_ERRORS);
561 1.3 matt RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PCIE_LE + msi_enb_offset[link], 0);
562 1.3 matt sc->sc_link_intr[link] = NULL;
563 1.3 matt
564 1.3 matt /*
565 1.3 matt * allocate per-cpu, per-pin interrupt event counters
566 1.3 matt */
567 1.3 matt size = ncpu * PCI_INTERRUPT_PIN_MAX * sizeof(rmixl_pcie_evcnt_t);
568 1.3 matt ev = malloc(size, M_DEVBUF, M_NOWAIT);
569 1.3 matt if (ev == NULL)
570 1.3 matt panic("%s: cannot malloc evcnts\n", __func__);
571 1.3 matt sc->sc_evcnts[link] = ev;
572 1.3 matt for (int pin=PCI_INTERRUPT_PIN_A; pin <= PCI_INTERRUPT_PIN_MAX; pin++) {
573 1.3 matt for (int cpu=0; cpu < ncpu; cpu++) {
574 1.3 matt ev = RMIXL_PCIE_EVCNT(sc, link, pin - 1, cpu);
575 1.3 matt snprintf(ev->name, sizeof(ev->name),
576 1.3 matt "cpu%d, link %d, pin %d", cpu, link, pin);
577 1.3 matt evcnt_attach_dynamic(&ev->evcnt, EVCNT_TYPE_INTR,
578 1.3 matt NULL, "rmixl_pcie", ev->name);
579 1.3 matt }
580 1.3 matt }
581 1.3 matt }
582 1.3 matt }
583 1.3 matt
584 1.2 matt static void
585 1.2 matt rmixl_pcie_errata(struct rmixl_pcie_softc *sc)
586 1.2 matt {
587 1.3 matt const mips_prid_t cpu_id = mips_options.mips_cpu_id;
588 1.2 matt u_int rev;
589 1.2 matt u_int lanes;
590 1.2 matt bool e391 = false;
591 1.2 matt
592 1.2 matt /*
593 1.2 matt * 3.9.1 PCIe Link-0 Registers Reset to Incorrect Values
594 1.2 matt * check if it allies to this CPU implementation and revision
595 1.2 matt */
596 1.2 matt rev = MIPS_PRID_REV(cpu_id);
597 1.2 matt switch (MIPS_PRID_IMPL(cpu_id)) {
598 1.2 matt case MIPS_XLS104:
599 1.2 matt case MIPS_XLS108:
600 1.2 matt break;
601 1.2 matt case MIPS_XLS204:
602 1.2 matt case MIPS_XLS208:
603 1.2 matt /* stepping A0 is affected */
604 1.2 matt if (rev == 0)
605 1.2 matt e391 = true;
606 1.2 matt break;
607 1.2 matt case MIPS_XLS404LITE:
608 1.2 matt case MIPS_XLS408LITE:
609 1.2 matt break;
610 1.2 matt case MIPS_XLS404:
611 1.2 matt case MIPS_XLS408:
612 1.2 matt case MIPS_XLS416:
613 1.2 matt /* steppings A0 and A1 are affected */
614 1.2 matt if ((rev == 0) || (rev == 1))
615 1.2 matt e391 = true;
616 1.2 matt break;
617 1.2 matt case MIPS_XLS608:
618 1.2 matt case MIPS_XLS616:
619 1.2 matt break;
620 1.2 matt default:
621 1.2 matt panic("unknown RMI PRID IMPL");
622 1.2 matt }
623 1.2 matt
624 1.2 matt /*
625 1.2 matt * for XLS we only need to check entry #0
626 1.2 matt * this may need to change for later XL family chips
627 1.2 matt */
628 1.2 matt lanes = sc->sc_pcie_lnktab.cfg[0].lanes;
629 1.2 matt
630 1.2 matt if ((e391 != false) && ((lanes == 2) || (lanes == 4))) {
631 1.2 matt /*
632 1.2 matt * attempt work around for errata 3.9.1
633 1.2 matt * "PCIe Link-0 Registers Reset to Incorrect Values"
634 1.2 matt * the registers are write-once: if the firmware already wrote,
635 1.2 matt * then our writes are ignored; hope they did it right.
636 1.2 matt */
637 1.2 matt uint32_t queuectrl;
638 1.2 matt uint32_t bufdepth;
639 1.2 matt #ifdef DIAGNOSTIC
640 1.2 matt uint32_t r;
641 1.2 matt #endif
642 1.2 matt
643 1.2 matt aprint_normal("%s: attempt work around for errata 3.9.1",
644 1.2 matt device_xname(sc->sc_dev));
645 1.2 matt if (lanes == 4) {
646 1.2 matt queuectrl = 0x00018074;
647 1.2 matt bufdepth = 0x001901D1;
648 1.2 matt } else {
649 1.2 matt queuectrl = 0x00018036;
650 1.2 matt bufdepth = 0x001900D9;
651 1.2 matt }
652 1.2 matt
653 1.2 matt RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PCIE_BE +
654 1.2 matt RMIXL_VC0_POSTED_RX_QUEUE_CTRL, queuectrl);
655 1.2 matt RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PCIE_BE +
656 1.2 matt RMIXL_VC0_POSTED_BUFFER_DEPTH, bufdepth);
657 1.2 matt
658 1.2 matt #ifdef DIAGNOSTIC
659 1.2 matt r = RMIXL_IOREG_READ(RMIXL_IO_DEV_PCIE_BE +
660 1.2 matt RMIXL_VC0_POSTED_RX_QUEUE_CTRL);
661 1.2 matt printf("\nVC0_POSTED_RX_QUEUE_CTRL %#x\n", r);
662 1.2 matt
663 1.2 matt r = RMIXL_IOREG_READ(RMIXL_IO_DEV_PCIE_BE +
664 1.2 matt RMIXL_VC0_POSTED_BUFFER_DEPTH);
665 1.2 matt printf("VC0_POSTED_BUFFER_DEPTH %#x\n", r);
666 1.2 matt #endif
667 1.2 matt }
668 1.2 matt }
669 1.2 matt
670 1.2 matt static void
671 1.2 matt rmixl_pcie_init(struct rmixl_pcie_softc *sc)
672 1.2 matt {
673 1.2 matt pci_chipset_tag_t pc = &sc->sc_pci_chipset;
674 1.2 matt #if NPCI > 0 && defined(PCI_NETBSD_CONFIGURE)
675 1.2 matt struct extent *ioext, *memext;
676 1.2 matt #endif
677 1.2 matt
678 1.2 matt pc->pc_conf_v = (void *)sc;
679 1.2 matt pc->pc_attach_hook = rmixl_pcie_attach_hook;
680 1.2 matt pc->pc_bus_maxdevs = rmixl_pcie_bus_maxdevs;
681 1.2 matt pc->pc_make_tag = rmixl_pcie_make_tag;
682 1.2 matt pc->pc_decompose_tag = rmixl_pcie_decompose_tag;
683 1.2 matt pc->pc_conf_read = rmixl_pcie_conf_read;
684 1.2 matt pc->pc_conf_write = rmixl_pcie_conf_write;
685 1.2 matt
686 1.2 matt pc->pc_intr_v = (void *)sc;
687 1.2 matt pc->pc_intr_map = rmixl_pcie_intr_map;
688 1.2 matt pc->pc_intr_string = rmixl_pcie_intr_string;
689 1.2 matt pc->pc_intr_evcnt = rmixl_pcie_intr_evcnt;
690 1.2 matt pc->pc_intr_establish = rmixl_pcie_intr_establish;
691 1.2 matt pc->pc_intr_disestablish = rmixl_pcie_intr_disestablish;
692 1.2 matt pc->pc_conf_interrupt = rmixl_conf_interrupt;
693 1.2 matt
694 1.2 matt #if NPCI > 0 && defined(PCI_NETBSD_CONFIGURE)
695 1.2 matt /*
696 1.2 matt * Configure the PCI bus.
697 1.2 matt */
698 1.2 matt struct rmixl_config *rcp = &rmixl_configuration;
699 1.2 matt
700 1.2 matt aprint_normal("%s: configuring PCI bus\n",
701 1.2 matt device_xname(sc->sc_dev));
702 1.2 matt
703 1.2 matt ioext = extent_create("pciio",
704 1.2 matt rcp->rc_pci_io_pbase,
705 1.2 matt rcp->rc_pci_io_pbase + rcp->rc_pci_io_size - 1,
706 1.9 para NULL, 0, EX_NOWAIT);
707 1.2 matt
708 1.2 matt memext = extent_create("pcimem",
709 1.2 matt rcp->rc_pci_mem_pbase,
710 1.2 matt rcp->rc_pci_mem_pbase + rcp->rc_pci_mem_size - 1,
711 1.9 para NULL, 0, EX_NOWAIT);
712 1.2 matt
713 1.3 matt pci_configure_bus(pc, ioext, memext, NULL, 0,
714 1.3 matt mips_cache_info.mci_dcache_align);
715 1.2 matt
716 1.2 matt extent_destroy(ioext);
717 1.2 matt extent_destroy(memext);
718 1.2 matt #endif
719 1.2 matt }
720 1.2 matt
721 1.2 matt static void
722 1.2 matt rmixl_pcie_init_ecfg(struct rmixl_pcie_softc *sc)
723 1.2 matt {
724 1.2 matt void *v;
725 1.2 matt pcitag_t tag;
726 1.2 matt pcireg_t r;
727 1.2 matt
728 1.2 matt v = sc;
729 1.2 matt tag = rmixl_pcie_make_tag(v, 0, 0, 0);
730 1.2 matt
731 1.2 matt #ifdef PCI_DEBUG
732 1.2 matt int i, offset;
733 1.2 matt static const int offtab[] =
734 1.2 matt { 0, 4, 8, 0xc, 0x10, 0x14, 0x18, 0x1c,
735 1.2 matt 0x2c, 0x30, 0x34 };
736 1.2 matt for (i=0; i < sizeof(offtab)/sizeof(offtab[0]); i++) {
737 1.2 matt offset = 0x100 + offtab[i];
738 1.2 matt r = rmixl_pcie_conf_read(v, tag, offset);
739 1.2 matt printf("%s: %#x: %#x\n", __func__, offset, r);
740 1.2 matt }
741 1.2 matt #endif
742 1.2 matt r = rmixl_pcie_conf_read(v, tag, 0x100);
743 1.2 matt if (r == -1)
744 1.2 matt return; /* cannot access */
745 1.2 matt
746 1.2 matt /* check pre-existing uncorrectable errs */
747 1.2 matt r = rmixl_pcie_conf_read(v, tag, RMIXL_PCIE_ECFG_UESR);
748 1.2 matt r &= ~PCIE_ECFG_UExR_RESV;
749 1.2 matt if (r != 0)
750 1.2 matt panic("%s: Uncorrectable Error Status: %#x\n",
751 1.2 matt __func__, r);
752 1.2 matt
753 1.2 matt /* unmask all uncorrectable errs */
754 1.2 matt r = rmixl_pcie_conf_read(v, tag, RMIXL_PCIE_ECFG_UEMR);
755 1.2 matt r &= ~PCIE_ECFG_UExR_RESV;
756 1.2 matt rmixl_pcie_conf_write(v, tag, RMIXL_PCIE_ECFG_UEMR, r);
757 1.2 matt
758 1.2 matt /* ensure default uncorrectable err severity confniguration */
759 1.2 matt r = rmixl_pcie_conf_read(v, tag, RMIXL_PCIE_ECFG_UEVR);
760 1.2 matt r &= ~PCIE_ECFG_UExR_RESV;
761 1.2 matt r |= PCIE_ECFG_UEVR_DFLT;
762 1.2 matt rmixl_pcie_conf_write(v, tag, RMIXL_PCIE_ECFG_UEVR, r);
763 1.2 matt
764 1.2 matt /* check pre-existing correctable errs */
765 1.2 matt r = rmixl_pcie_conf_read(v, tag, RMIXL_PCIE_ECFG_CESR);
766 1.2 matt r &= ~PCIE_ECFG_CExR_RESV;
767 1.2 matt #ifdef DIAGNOSTIC
768 1.2 matt if (r != 0)
769 1.2 matt aprint_normal("%s: Correctable Error Status: %#x\n",
770 1.2 matt device_xname(sc->sc_dev), r);
771 1.2 matt #endif
772 1.2 matt
773 1.2 matt /* unmask all correctable errs */
774 1.2 matt r = rmixl_pcie_conf_read(v, tag, RMIXL_PCIE_ECFG_CEMR);
775 1.2 matt r &= ~PCIE_ECFG_CExR_RESV;
776 1.2 matt rmixl_pcie_conf_write(v, tag, RMIXL_PCIE_ECFG_UEMR, r);
777 1.2 matt
778 1.2 matt /* check pre-existing Root Error Status */
779 1.2 matt r = rmixl_pcie_conf_read(v, tag, RMIXL_PCIE_ECFG_RESR);
780 1.2 matt r &= ~PCIE_ECFG_RESR_RESV;
781 1.2 matt if (r != 0)
782 1.2 matt panic("%s: Root Error Status: %#x\n", __func__, r);
783 1.2 matt /* XXX TMP FIXME */
784 1.2 matt
785 1.2 matt /* enable all Root errs */
786 1.2 matt r = (pcireg_t)(~PCIE_ECFG_RECR_RESV);
787 1.2 matt rmixl_pcie_conf_write(v, tag, RMIXL_PCIE_ECFG_RECR, r);
788 1.2 matt
789 1.3 matt /*
790 1.3 matt * establish ISR for PCIE Fatal Error interrupt
791 1.3 matt * - for XLS4xxLite, XLS2xx, XLS1xx only
792 1.3 matt */
793 1.3 matt switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
794 1.3 matt case MIPS_XLS104:
795 1.3 matt case MIPS_XLS108:
796 1.3 matt case MIPS_XLS204:
797 1.3 matt case MIPS_XLS208:
798 1.3 matt case MIPS_XLS404LITE:
799 1.3 matt case MIPS_XLS408LITE:
800 1.3 matt sc->sc_fatal_ih = rmixl_intr_establish(29, sc->sc_tmsk,
801 1.3 matt IPL_HIGH, RMIXL_TRIG_LEVEL, RMIXL_POLR_HIGH,
802 1.3 matt rmixl_pcie_error_intr, v, false);
803 1.3 matt break;
804 1.3 matt default:
805 1.3 matt break;
806 1.3 matt }
807 1.2 matt
808 1.2 matt #if defined(DEBUG) || defined(DDB)
809 1.2 matt rmixl_pcie_v = v;
810 1.2 matt #endif
811 1.2 matt }
812 1.2 matt
813 1.2 matt void
814 1.2 matt rmixl_conf_interrupt(void *v, int bus, int dev, int ipin, int swiz, int *iline)
815 1.2 matt {
816 1.2 matt DPRINTF(("%s: %p, %d, %d, %d, %d, %p\n",
817 1.2 matt __func__, v, bus, dev, ipin, swiz, iline));
818 1.2 matt }
819 1.2 matt
820 1.2 matt void
821 1.2 matt rmixl_pcie_attach_hook(struct device *parent, struct device *self,
822 1.2 matt struct pcibus_attach_args *pba)
823 1.2 matt {
824 1.2 matt DPRINTF(("%s: pba_bus %d, pba_bridgetag %p, pc_conf_v %p\n",
825 1.2 matt __func__, pba->pba_bus, pba->pba_bridgetag,
826 1.2 matt pba->pba_pc->pc_conf_v));
827 1.2 matt }
828 1.2 matt
829 1.2 matt int
830 1.2 matt rmixl_pcie_bus_maxdevs(void *v, int busno)
831 1.2 matt {
832 1.2 matt return (32); /* XXX depends on the family of XLS SoC */
833 1.2 matt }
834 1.2 matt
835 1.2 matt /*
836 1.2 matt * rmixl_tag_to_ecfg - convert cfg address (generic tag) to ecfg address
837 1.2 matt *
838 1.2 matt * 39:29 (reserved)
839 1.2 matt * 28 Swap (0=little, 1=big endian)
840 1.2 matt * 27:20 Bus number
841 1.2 matt * 19:15 Device number
842 1.2 matt * 14:12 Function number
843 1.2 matt * 11:8 Extended Register number
844 1.2 matt * 7:0 Register number
845 1.2 matt */
846 1.2 matt static pcitag_t
847 1.2 matt rmixl_tag_to_ecfg(pcitag_t tag)
848 1.2 matt {
849 1.2 matt KASSERT((tag & __BITS(7,0)) == 0);
850 1.2 matt return (tag << 4);
851 1.2 matt }
852 1.2 matt
853 1.2 matt /*
854 1.2 matt * XLS pci tag is a 40 bit address composed thusly:
855 1.2 matt * 39:25 (reserved)
856 1.2 matt * 24 Swap (0=little, 1=big endian)
857 1.2 matt * 23:16 Bus number
858 1.2 matt * 15:11 Device number
859 1.2 matt * 10:8 Function number
860 1.2 matt * 7:0 Register number
861 1.2 matt *
862 1.2 matt * Note: this is the "native" composition for addressing CFG space, but not for ECFG space.
863 1.2 matt */
864 1.2 matt pcitag_t
865 1.2 matt rmixl_pcie_make_tag(void *v, int bus, int dev, int fun)
866 1.2 matt {
867 1.2 matt return ((bus << 16) | (dev << 11) | (fun << 8));
868 1.2 matt }
869 1.2 matt
870 1.2 matt void
871 1.2 matt rmixl_pcie_decompose_tag(void *v, pcitag_t tag, int *bp, int *dp, int *fp)
872 1.2 matt {
873 1.2 matt if (bp != NULL)
874 1.2 matt *bp = (tag >> 16) & 0xff;
875 1.2 matt if (dp != NULL)
876 1.2 matt *dp = (tag >> 11) & 0x1f;
877 1.2 matt if (fp != NULL)
878 1.2 matt *fp = (tag >> 8) & 0x7;
879 1.2 matt }
880 1.2 matt
881 1.2 matt void
882 1.2 matt rmixl_pcie_tag_print(const char *restrict s, void *v, pcitag_t tag, int offset,
883 1.2 matt vaddr_t va, u_long r)
884 1.2 matt {
885 1.2 matt int bus, dev, fun;
886 1.2 matt
887 1.2 matt rmixl_pcie_decompose_tag(v, tag, &bus, &dev, &fun);
888 1.2 matt printf("%s: %d/%d/%d/%d - %#" PRIxVADDR ":%#lx\n",
889 1.2 matt s, bus, dev, fun, offset, va, r);
890 1.2 matt }
891 1.2 matt
892 1.2 matt static int
893 1.2 matt rmixl_pcie_conf_setup(struct rmixl_pcie_softc *sc,
894 1.2 matt pcitag_t tag, int *offp, bus_space_tag_t *bstp,
895 1.2 matt bus_space_handle_t *bshp)
896 1.2 matt {
897 1.2 matt struct rmixl_config *rcp = &rmixl_configuration;
898 1.2 matt bus_space_tag_t bst;
899 1.2 matt bus_space_handle_t bsh;
900 1.2 matt bus_size_t size;
901 1.2 matt pcitag_t mask;
902 1.2 matt bus_addr_t ba;
903 1.2 matt int err;
904 1.2 matt static bus_space_handle_t cfg_bsh;
905 1.2 matt static bus_addr_t cfg_oba = -1;
906 1.2 matt static bus_space_handle_t ecfg_bsh;
907 1.2 matt static bus_addr_t ecfg_oba = -1;
908 1.2 matt
909 1.2 matt /*
910 1.2 matt * bus space depends on offset
911 1.2 matt */
912 1.2 matt if ((*offp >= 0) && (*offp < 0x100)) {
913 1.2 matt mask = __BITS(15,0);
914 1.3 matt bst = sc->sc_pci_cfg_memt;
915 1.3 matt ba = rcp->rc_pci_cfg_pbase;
916 1.2 matt ba += (tag & ~mask);
917 1.2 matt *offp += (tag & mask);
918 1.2 matt if (ba != cfg_oba) {
919 1.2 matt size = (bus_size_t)(mask + 1);
920 1.2 matt if (cfg_oba != -1)
921 1.2 matt bus_space_unmap(bst, cfg_bsh, size);
922 1.2 matt err = bus_space_map(bst, ba, size, 0, &cfg_bsh);
923 1.2 matt if (err != 0) {
924 1.2 matt #ifdef DEBUG
925 1.2 matt panic("%s: bus_space_map err %d, CFG space",
926 1.2 matt __func__, err); /* XXX */
927 1.2 matt #endif
928 1.2 matt return -1;
929 1.2 matt }
930 1.2 matt cfg_oba = ba;
931 1.2 matt }
932 1.2 matt bsh = cfg_bsh;
933 1.2 matt } else if ((*offp >= 0x100) && (*offp <= 0x700)) {
934 1.2 matt mask = __BITS(14,0);
935 1.2 matt tag = rmixl_tag_to_ecfg(tag); /* convert to ECFG format */
936 1.3 matt bst = sc->sc_pci_ecfg_memt;
937 1.3 matt ba = rcp->rc_pci_ecfg_pbase;
938 1.2 matt ba += (tag & ~mask);
939 1.2 matt *offp += (tag & mask);
940 1.2 matt if (ba != ecfg_oba) {
941 1.2 matt size = (bus_size_t)(mask + 1);
942 1.2 matt if (ecfg_oba != -1)
943 1.2 matt bus_space_unmap(bst, ecfg_bsh, size);
944 1.2 matt err = bus_space_map(bst, ba, size, 0, &ecfg_bsh);
945 1.2 matt if (err != 0) {
946 1.3 matt #ifdef DEBUG
947 1.2 matt panic("%s: bus_space_map err %d, ECFG space",
948 1.2 matt __func__, err); /* XXX */
949 1.2 matt #endif
950 1.2 matt return -1;
951 1.2 matt }
952 1.2 matt ecfg_oba = ba;
953 1.2 matt }
954 1.2 matt bsh = ecfg_bsh;
955 1.2 matt } else {
956 1.2 matt #ifdef DEBUG
957 1.2 matt panic("%s: offset %#x: unknown", __func__, *offp);
958 1.2 matt #endif
959 1.2 matt return -1;
960 1.2 matt }
961 1.2 matt
962 1.2 matt *bstp = bst;
963 1.2 matt *bshp = bsh;
964 1.2 matt
965 1.2 matt return 0;
966 1.2 matt }
967 1.2 matt
968 1.2 matt pcireg_t
969 1.2 matt rmixl_pcie_conf_read(void *v, pcitag_t tag, int offset)
970 1.2 matt {
971 1.2 matt struct rmixl_pcie_softc *sc = v;
972 1.2 matt static bus_space_handle_t bsh;
973 1.2 matt bus_space_tag_t bst;
974 1.2 matt pcireg_t rv;
975 1.2 matt uint64_t cfg0;
976 1.2 matt
977 1.3 matt mutex_enter(&sc->sc_mutex);
978 1.2 matt
979 1.2 matt if (rmixl_pcie_conf_setup(sc, tag, &offset, &bst, &bsh) == 0) {
980 1.2 matt cfg0 = rmixl_cache_err_dis();
981 1.2 matt rv = bus_space_read_4(bst, bsh, (bus_size_t)offset);
982 1.2 matt if (rmixl_cache_err_check() != 0) {
983 1.2 matt #ifdef DIAGNOSTIC
984 1.2 matt int bus, dev, fun;
985 1.2 matt
986 1.2 matt rmixl_pcie_decompose_tag(v, tag, &bus, &dev, &fun);
987 1.2 matt printf("%s: %d/%d/%d, offset %#x: bad address\n",
988 1.2 matt __func__, bus, dev, fun, offset);
989 1.2 matt #endif
990 1.2 matt rv = (pcireg_t) -1;
991 1.2 matt }
992 1.2 matt rmixl_cache_err_restore(cfg0);
993 1.2 matt } else {
994 1.2 matt rv = -1;
995 1.2 matt }
996 1.2 matt
997 1.3 matt mutex_exit(&sc->sc_mutex);
998 1.3 matt
999 1.2 matt return rv;
1000 1.2 matt }
1001 1.2 matt
1002 1.2 matt void
1003 1.2 matt rmixl_pcie_conf_write(void *v, pcitag_t tag, int offset, pcireg_t val)
1004 1.2 matt {
1005 1.2 matt struct rmixl_pcie_softc *sc = v;
1006 1.2 matt static bus_space_handle_t bsh;
1007 1.2 matt bus_space_tag_t bst;
1008 1.2 matt uint64_t cfg0;
1009 1.2 matt
1010 1.3 matt mutex_enter(&sc->sc_mutex);
1011 1.2 matt
1012 1.2 matt if (rmixl_pcie_conf_setup(sc, tag, &offset, &bst, &bsh) == 0) {
1013 1.2 matt cfg0 = rmixl_cache_err_dis();
1014 1.2 matt bus_space_write_4(bst, bsh, (bus_size_t)offset, val);
1015 1.2 matt if (rmixl_cache_err_check() != 0) {
1016 1.2 matt #ifdef DIAGNOSTIC
1017 1.2 matt int bus, dev, fun;
1018 1.2 matt
1019 1.2 matt rmixl_pcie_decompose_tag(v, tag, &bus, &dev, &fun);
1020 1.2 matt printf("%s: %d/%d/%d, offset %#x: bad address\n",
1021 1.2 matt __func__, bus, dev, fun, offset);
1022 1.2 matt #endif
1023 1.2 matt }
1024 1.2 matt rmixl_cache_err_restore(cfg0);
1025 1.2 matt }
1026 1.2 matt
1027 1.3 matt mutex_exit(&sc->sc_mutex);
1028 1.2 matt }
1029 1.2 matt
1030 1.2 matt int
1031 1.4 dyoung rmixl_pcie_intr_map(const struct pci_attach_args *pa, pci_intr_handle_t *pih)
1032 1.2 matt {
1033 1.3 matt int device;
1034 1.3 matt u_int link;
1035 1.2 matt u_int irq;
1036 1.2 matt
1037 1.3 matt /*
1038 1.3 matt * The bus is unimportant since it can change depending on the
1039 1.3 matt * configuration. We are tied to device # of PCIe bridge we are
1040 1.3 matt * ultimately attached to.
1041 1.3 matt */
1042 1.3 matt pci_decompose_tag(pa->pa_pc, pa->pa_intrtag,
1043 1.3 matt NULL, &device, NULL);
1044 1.3 matt
1045 1.2 matt #ifdef DEBUG
1046 1.2 matt DPRINTF(("%s: ps_bus %d, pa_intrswiz %#x, pa_intrtag %#lx,"
1047 1.2 matt " pa_intrpin %d, pa_intrline %d, pa_rawintrpin %d\n",
1048 1.2 matt __func__, pa->pa_bus, pa->pa_intrswiz, pa->pa_intrtag,
1049 1.2 matt pa->pa_intrpin, pa->pa_intrline, pa->pa_rawintrpin));
1050 1.2 matt #endif
1051 1.2 matt
1052 1.2 matt /*
1053 1.3 matt * PCIe Link INT irq assignment is cpu implementation specific
1054 1.2 matt */
1055 1.3 matt switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
1056 1.3 matt case MIPS_XLS104:
1057 1.3 matt case MIPS_XLS108:
1058 1.3 matt case MIPS_XLS404LITE:
1059 1.2 matt case MIPS_XLS408LITE:
1060 1.3 matt if (device > 1)
1061 1.3 matt panic("%s: bad bus %d", __func__, device);
1062 1.3 matt link = device;
1063 1.3 matt irq = device + 26;
1064 1.3 matt break;
1065 1.3 matt case MIPS_XLS204:
1066 1.3 matt case MIPS_XLS208: {
1067 1.3 matt if (device > 3)
1068 1.3 matt panic("%s: bad bus %d", __func__, device);
1069 1.3 matt link = device;
1070 1.3 matt irq = device + (device & 2 ? 21 : 26);
1071 1.2 matt break;
1072 1.3 matt }
1073 1.3 matt case MIPS_XLS404:
1074 1.3 matt case MIPS_XLS408:
1075 1.2 matt case MIPS_XLS416:
1076 1.3 matt case MIPS_XLS608:
1077 1.2 matt case MIPS_XLS616:
1078 1.3 matt if (device > 3)
1079 1.3 matt panic("%s: bad bus %d", __func__, device);
1080 1.3 matt link = device;
1081 1.3 matt irq = device + 26;
1082 1.2 matt break;
1083 1.2 matt default:
1084 1.2 matt panic("%s: cpu IMPL %#x not supported\n",
1085 1.3 matt __func__, MIPS_PRID_IMPL(mips_options.mips_cpu_id));
1086 1.2 matt }
1087 1.2 matt
1088 1.3 matt if (pa->pa_intrpin != PCI_INTERRUPT_PIN_NONE)
1089 1.3 matt *pih = rmixl_pcie_make_pih(link, pa->pa_intrpin - 1, irq);
1090 1.3 matt else
1091 1.3 matt *pih = ~0;
1092 1.2 matt
1093 1.2 matt return 0;
1094 1.2 matt }
1095 1.2 matt
1096 1.2 matt const char *
1097 1.2 matt rmixl_pcie_intr_string(void *v, pci_intr_handle_t pih)
1098 1.2 matt {
1099 1.2 matt const char *name = "(illegal)";
1100 1.3 matt u_int link, bitno, irq;
1101 1.2 matt
1102 1.3 matt rmixl_pcie_decompose_pih(pih, &link, &bitno, &irq);
1103 1.3 matt
1104 1.3 matt switch (MIPS_PRID_IMPL(mips_options.mips_cpu_id)) {
1105 1.3 matt case MIPS_XLS104:
1106 1.3 matt case MIPS_XLS108:
1107 1.3 matt case MIPS_XLS404LITE:
1108 1.2 matt case MIPS_XLS408LITE:
1109 1.2 matt switch (irq) {
1110 1.2 matt case 26:
1111 1.2 matt case 27:
1112 1.5 cliff name = rmixl_intr_string(RMIXL_IRT_VECTOR(irq));
1113 1.2 matt break;
1114 1.2 matt }
1115 1.2 matt break;
1116 1.3 matt case MIPS_XLS204:
1117 1.3 matt case MIPS_XLS208:
1118 1.3 matt switch (irq) {
1119 1.3 matt case 23:
1120 1.3 matt case 24:
1121 1.3 matt case 26:
1122 1.3 matt case 27:
1123 1.5 cliff name = rmixl_intr_string(RMIXL_IRT_VECTOR(irq));
1124 1.3 matt break;
1125 1.3 matt }
1126 1.3 matt break;
1127 1.3 matt case MIPS_XLS404:
1128 1.3 matt case MIPS_XLS408:
1129 1.3 matt case MIPS_XLS416:
1130 1.3 matt case MIPS_XLS608:
1131 1.2 matt case MIPS_XLS616:
1132 1.2 matt switch (irq) {
1133 1.2 matt case 26:
1134 1.2 matt case 27:
1135 1.2 matt case 28:
1136 1.2 matt case 29:
1137 1.5 cliff name = rmixl_intr_string(RMIXL_IRT_VECTOR(irq));
1138 1.2 matt break;
1139 1.2 matt }
1140 1.2 matt break;
1141 1.3 matt default:
1142 1.3 matt panic("%s: cpu IMPL %#x not supported\n",
1143 1.3 matt __func__, MIPS_PRID_IMPL(mips_options.mips_cpu_id));
1144 1.2 matt }
1145 1.2 matt
1146 1.2 matt return name;
1147 1.2 matt }
1148 1.2 matt
1149 1.2 matt const struct evcnt *
1150 1.2 matt rmixl_pcie_intr_evcnt(void *v, pci_intr_handle_t pih)
1151 1.2 matt {
1152 1.2 matt return NULL;
1153 1.2 matt }
1154 1.2 matt
1155 1.3 matt static pci_intr_handle_t
1156 1.3 matt rmixl_pcie_make_pih(u_int link, u_int bitno, u_int irq)
1157 1.3 matt {
1158 1.3 matt pci_intr_handle_t pih;
1159 1.3 matt
1160 1.3 matt KASSERT(link < RMIXL_PCIE_NLINKS_MAX);
1161 1.3 matt KASSERT(bitno < 64);
1162 1.3 matt KASSERT(irq < 32);
1163 1.3 matt
1164 1.3 matt pih = (irq << 10);
1165 1.3 matt pih |= (bitno << 4);
1166 1.3 matt pih |= link;
1167 1.3 matt
1168 1.3 matt return pih;
1169 1.3 matt }
1170 1.3 matt
1171 1.3 matt static void
1172 1.3 matt rmixl_pcie_decompose_pih(pci_intr_handle_t pih, u_int *link, u_int *bitno, u_int *irq)
1173 1.3 matt {
1174 1.3 matt *link = (u_int)(pih & 0xf);
1175 1.3 matt *bitno = (u_int)((pih >> 4) & 0x3f);
1176 1.3 matt *irq = (u_int)(pih >> 10);
1177 1.3 matt
1178 1.3 matt KASSERT(*link < RMIXL_PCIE_NLINKS_MAX);
1179 1.3 matt KASSERT(*bitno < 64);
1180 1.3 matt KASSERT(*irq < 32);
1181 1.3 matt }
1182 1.3 matt
1183 1.3 matt static void
1184 1.3 matt rmixl_pcie_intr_disestablish(void *v, void *ih)
1185 1.2 matt {
1186 1.3 matt rmixl_pcie_softc_t *sc = v;
1187 1.3 matt rmixl_pcie_link_dispatch_t *dip = ih;
1188 1.3 matt rmixl_pcie_link_intr_t *lip = sc->sc_link_intr[dip->link];
1189 1.3 matt uint32_t r;
1190 1.3 matt uint32_t bit;
1191 1.3 matt u_int offset;
1192 1.3 matt u_int other;
1193 1.3 matt bool busy;
1194 1.3 matt
1195 1.3 matt DPRINTF(("%s: link=%d pin=%d irq=%d\n",
1196 1.3 matt __func__, dip->link, dip->bitno + 1, dip->irq));
1197 1.3 matt
1198 1.3 matt mutex_enter(&sc->sc_mutex);
1199 1.3 matt
1200 1.3 matt dip->func = NULL; /* mark unused, prevent further dispatch */
1201 1.3 matt
1202 1.3 matt /*
1203 1.3 matt * if no other dispatch handle is using this interrupt,
1204 1.3 matt * we can disable it
1205 1.3 matt */
1206 1.3 matt busy = false;
1207 1.3 matt for (int i=0; i < lip->dispatch_count; i++) {
1208 1.3 matt rmixl_pcie_link_dispatch_t *d = &lip->dispatch_data[i];
1209 1.3 matt if (d == dip)
1210 1.3 matt continue;
1211 1.3 matt if (d->bitno == dip->bitno) {
1212 1.3 matt busy = true;
1213 1.3 matt break;
1214 1.3 matt }
1215 1.3 matt }
1216 1.3 matt if (! busy) {
1217 1.3 matt if (dip->bitno < 32) {
1218 1.3 matt bit = 1 << dip->bitno;
1219 1.3 matt offset = int_enb_offset[dip->link].r0;
1220 1.3 matt other = int_enb_offset[dip->link].r1;
1221 1.3 matt } else {
1222 1.3 matt bit = 1 << (dip->bitno - 32);
1223 1.3 matt offset = int_enb_offset[dip->link].r1;
1224 1.3 matt other = int_enb_offset[dip->link].r0;
1225 1.3 matt }
1226 1.3 matt
1227 1.3 matt /* disable this interrupt in the PCIe bridge */
1228 1.3 matt r = RMIXL_IOREG_READ(RMIXL_IO_DEV_PCIE_LE + offset);
1229 1.3 matt r &= ~bit;
1230 1.3 matt RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PCIE_LE + offset, r);
1231 1.3 matt
1232 1.3 matt /*
1233 1.3 matt * if both ENABLE0 and ENABLE1 are 0
1234 1.3 matt * disable the link interrupt
1235 1.3 matt */
1236 1.3 matt if (r == 0) {
1237 1.3 matt /* check the other reg */
1238 1.3 matt if (RMIXL_IOREG_READ(RMIXL_IO_DEV_PCIE_LE + other) == 0) {
1239 1.3 matt DPRINTF(("%s: disable link %d\n", __func__, lip->link));
1240 1.3 matt
1241 1.3 matt /* tear down interrupt on this link */
1242 1.3 matt rmixl_intr_disestablish(lip->ih);
1243 1.3 matt
1244 1.3 matt /* commit NULL interrupt set */
1245 1.3 matt sc->sc_link_intr[dip->link] = NULL;
1246 1.3 matt
1247 1.3 matt /* schedule delayed free of the old link interrupt set */
1248 1.3 matt rmixl_pcie_lip_free_callout(lip);
1249 1.3 matt }
1250 1.3 matt }
1251 1.3 matt }
1252 1.3 matt
1253 1.3 matt mutex_exit(&sc->sc_mutex);
1254 1.2 matt }
1255 1.2 matt
1256 1.2 matt static void *
1257 1.2 matt rmixl_pcie_intr_establish(void *v, pci_intr_handle_t pih, int ipl,
1258 1.3 matt int (*func)(void *), void *arg)
1259 1.2 matt {
1260 1.3 matt rmixl_pcie_softc_t *sc = v;
1261 1.3 matt u_int link, bitno, irq;
1262 1.3 matt uint32_t r;
1263 1.3 matt rmixl_pcie_link_intr_t *lip;
1264 1.3 matt rmixl_pcie_link_dispatch_t *dip = NULL;
1265 1.3 matt uint32_t bit;
1266 1.3 matt u_int offset;
1267 1.3 matt
1268 1.3 matt if (pih == ~0) {
1269 1.3 matt DPRINTF(("%s: bad pih=%#lx, implies PCI_INTERRUPT_PIN_NONE\n",
1270 1.3 matt __func__, pih));
1271 1.3 matt return NULL;
1272 1.3 matt }
1273 1.3 matt
1274 1.3 matt rmixl_pcie_decompose_pih(pih, &link, &bitno, &irq);
1275 1.3 matt DPRINTF(("%s: link=%d pin=%d irq=%d\n",
1276 1.3 matt __func__, link, bitno + 1, irq));
1277 1.3 matt
1278 1.3 matt mutex_enter(&sc->sc_mutex);
1279 1.3 matt
1280 1.3 matt lip = rmixl_pcie_lip_add_1(sc, link, irq, ipl);
1281 1.3 matt if (lip == NULL)
1282 1.3 matt return NULL;
1283 1.3 matt
1284 1.3 matt /*
1285 1.3 matt * initializae our new interrupt, the last element in dispatch_data[]
1286 1.3 matt */
1287 1.3 matt dip = &lip->dispatch_data[lip->dispatch_count - 1];
1288 1.3 matt dip->link = link;
1289 1.3 matt dip->bitno = bitno;
1290 1.3 matt dip->irq = irq;
1291 1.3 matt dip->func = func;
1292 1.3 matt dip->arg = arg;
1293 1.3 matt dip->counts = RMIXL_PCIE_EVCNT(sc, link, bitno, 0);
1294 1.3 matt
1295 1.3 matt if (bitno < 32) {
1296 1.3 matt offset = int_enb_offset[link].r0;
1297 1.3 matt bit = 1 << bitno;
1298 1.3 matt } else {
1299 1.3 matt offset = int_enb_offset[link].r1;
1300 1.3 matt bit = 1 << (bitno - 32);
1301 1.3 matt }
1302 1.3 matt
1303 1.3 matt /* commit the new link interrupt set */
1304 1.3 matt sc->sc_link_intr[link] = lip;
1305 1.3 matt
1306 1.3 matt /* enable this interrupt in the PCIe bridge */
1307 1.3 matt r = RMIXL_IOREG_READ(RMIXL_IO_DEV_PCIE_LE + offset);
1308 1.3 matt r |= bit;
1309 1.3 matt RMIXL_IOREG_WRITE(RMIXL_IO_DEV_PCIE_LE + offset, r);
1310 1.3 matt
1311 1.3 matt mutex_exit(&sc->sc_mutex);
1312 1.3 matt return dip;
1313 1.3 matt }
1314 1.3 matt
1315 1.3 matt rmixl_pcie_link_intr_t *
1316 1.3 matt rmixl_pcie_lip_add_1(rmixl_pcie_softc_t *sc, u_int link, int irq, int ipl)
1317 1.3 matt {
1318 1.3 matt rmixl_pcie_link_intr_t *lip_old = sc->sc_link_intr[link];
1319 1.3 matt rmixl_pcie_link_intr_t *lip_new;
1320 1.3 matt u_int dispatch_count;
1321 1.3 matt size_t size;
1322 1.3 matt
1323 1.3 matt dispatch_count = 1;
1324 1.3 matt size = sizeof(rmixl_pcie_link_intr_t);
1325 1.3 matt if (lip_old != NULL) {
1326 1.3 matt /*
1327 1.3 matt * count only those dispatch elements still in use
1328 1.3 matt * unused ones will be pruned during copy
1329 1.3 matt * i.e. we are "lazy" there is no rmixl_pcie_lip_sub_1
1330 1.3 matt */
1331 1.3 matt for (int i=0; i < lip_old->dispatch_count; i++) {
1332 1.3 matt if (lip_old->dispatch_data[i].func != NULL) {
1333 1.3 matt dispatch_count++;
1334 1.3 matt size += sizeof(rmixl_pcie_link_intr_t);
1335 1.3 matt }
1336 1.3 matt }
1337 1.3 matt }
1338 1.3 matt
1339 1.3 matt /*
1340 1.3 matt * allocate and initialize link intr struct
1341 1.3 matt * with one or more dispatch handles
1342 1.3 matt */
1343 1.3 matt lip_new = malloc(size, M_DEVBUF, M_NOWAIT);
1344 1.3 matt if (lip_new == NULL) {
1345 1.3 matt #ifdef DIAGNOSTIC
1346 1.3 matt printf("%s: cannot malloc\n", __func__);
1347 1.3 matt #endif
1348 1.3 matt return NULL;
1349 1.3 matt }
1350 1.3 matt
1351 1.3 matt if (lip_old == NULL) {
1352 1.3 matt /* initialize the link interrupt struct */
1353 1.3 matt lip_new->sc = sc;
1354 1.3 matt lip_new->link = link;
1355 1.3 matt lip_new->ipl = ipl;
1356 1.3 matt lip_new->ih = rmixl_intr_establish(irq, sc->sc_tmsk,
1357 1.3 matt ipl, RMIXL_TRIG_LEVEL, RMIXL_POLR_HIGH,
1358 1.3 matt rmixl_pcie_intr, lip_new, false);
1359 1.3 matt if (lip_new->ih == NULL)
1360 1.3 matt panic("%s: cannot establish irq %d", __func__, irq);
1361 1.3 matt } else {
1362 1.3 matt /*
1363 1.3 matt * all intrs on a link get same ipl and sc
1364 1.3 matt * first intr established sets the standard
1365 1.3 matt */
1366 1.3 matt KASSERT(sc == lip_old->sc);
1367 1.3 matt if (sc != lip_old->sc) {
1368 1.3 matt printf("%s: sc %p mismatch\n", __func__, sc);
1369 1.3 matt free(lip_new, M_DEVBUF);
1370 1.3 matt return NULL;
1371 1.3 matt }
1372 1.3 matt KASSERT (ipl == lip_old->ipl);
1373 1.3 matt if (ipl != lip_old->ipl) {
1374 1.3 matt printf("%s: ipl %d mismatch\n", __func__, ipl);
1375 1.3 matt free(lip_new, M_DEVBUF);
1376 1.3 matt return NULL;
1377 1.3 matt }
1378 1.3 matt /*
1379 1.3 matt * copy lip_old to lip_new, skipping unused dispatch elemets
1380 1.3 matt */
1381 1.3 matt memcpy(lip_new, lip_old, sizeof(rmixl_pcie_link_intr_t));
1382 1.3 matt for (int j=0, i=0; i < lip_old->dispatch_count; i++) {
1383 1.3 matt if (lip_old->dispatch_data[i].func != NULL) {
1384 1.3 matt memcpy(&lip_new->dispatch_data[j],
1385 1.3 matt &lip_old->dispatch_data[i],
1386 1.3 matt sizeof(rmixl_pcie_link_dispatch_t));
1387 1.3 matt j++;
1388 1.3 matt }
1389 1.3 matt }
1390 1.3 matt
1391 1.3 matt /*
1392 1.3 matt * schedule delayed free of old link interrupt set
1393 1.3 matt */
1394 1.3 matt rmixl_pcie_lip_free_callout(lip_old);
1395 1.3 matt }
1396 1.3 matt lip_new->dispatch_count = dispatch_count;
1397 1.3 matt
1398 1.3 matt return lip_new;
1399 1.3 matt }
1400 1.3 matt
1401 1.3 matt /*
1402 1.3 matt * delay free of the old link interrupt set
1403 1.3 matt * to allow anyone still using it to do so safely
1404 1.3 matt * XXX 2 seconds should be plenty?
1405 1.3 matt */
1406 1.3 matt static void
1407 1.3 matt rmixl_pcie_lip_free_callout(rmixl_pcie_link_intr_t *lip)
1408 1.3 matt {
1409 1.3 matt callout_init(&lip->callout, 0);
1410 1.3 matt callout_reset(&lip->callout, 2 * hz, rmixl_pcie_lip_free, lip);
1411 1.2 matt }
1412 1.2 matt
1413 1.2 matt static void
1414 1.3 matt rmixl_pcie_lip_free(void *arg)
1415 1.3 matt {
1416 1.3 matt rmixl_pcie_link_intr_t *lip = arg;
1417 1.3 matt
1418 1.3 matt callout_destroy(&lip->callout);
1419 1.3 matt free(lip, M_DEVBUF);
1420 1.3 matt }
1421 1.3 matt
1422 1.3 matt static int
1423 1.3 matt rmixl_pcie_intr(void *arg)
1424 1.3 matt {
1425 1.3 matt rmixl_pcie_link_intr_t *lip = arg;
1426 1.3 matt u_int link = lip->link;
1427 1.3 matt int rv = 0;
1428 1.3 matt
1429 1.3 matt uint32_t status0 = RMIXL_IOREG_READ(RMIXL_IO_DEV_PCIE_LE + int_sts_offset[link].r0);
1430 1.3 matt uint32_t status1 = RMIXL_IOREG_READ(RMIXL_IO_DEV_PCIE_LE + int_sts_offset[link].r1);
1431 1.3 matt uint64_t status = ((uint64_t)status1 << 32) | status0;
1432 1.3 matt DPRINTF(("%s: %d:%#"PRIx64"\n", __func__, link, status));
1433 1.3 matt
1434 1.3 matt if (status != 0) {
1435 1.3 matt rmixl_pcie_link_dispatch_t *dip;
1436 1.3 matt
1437 1.3 matt if (status & RMIXL_PCIE_LINK_STATUS_ERRORS)
1438 1.3 matt rmixl_pcie_link_error_intr(link, status0, status1);
1439 1.3 matt
1440 1.3 matt for (u_int i=0; i < lip->dispatch_count; i++) {
1441 1.3 matt dip = &lip->dispatch_data[i];
1442 1.3 matt int (*func)(void *) = dip->func;
1443 1.3 matt if (func != NULL) {
1444 1.3 matt uint64_t bit = 1 << dip->bitno;
1445 1.3 matt if ((status & bit) != 0) {
1446 1.3 matt (void)(*func)(dip->arg);
1447 1.3 matt dip->counts[cpu_index(curcpu())].evcnt.ev_count++;
1448 1.3 matt rv = 1;
1449 1.3 matt }
1450 1.3 matt }
1451 1.3 matt }
1452 1.3 matt }
1453 1.3 matt
1454 1.3 matt return rv;
1455 1.3 matt }
1456 1.3 matt
1457 1.3 matt static void
1458 1.3 matt rmixl_pcie_link_error_intr(u_int link, uint32_t status0, uint32_t status1)
1459 1.2 matt {
1460 1.3 matt printf("%s: mask %#"PRIx64"\n",
1461 1.3 matt __func__, RMIXL_PCIE_LINK_STATUS_ERRORS);
1462 1.3 matt printf("%s: PCIe Link Error: link=%d status0=%#x status1=%#x\n",
1463 1.3 matt __func__, link, status0, status1);
1464 1.3 matt #if defined(DDB) && defined(DEBUG)
1465 1.3 matt Debugger();
1466 1.3 matt #endif
1467 1.2 matt }
1468 1.2 matt
1469 1.2 matt #if defined(DEBUG) || defined(DDB)
1470 1.2 matt /* this function exists to facilitate call from ddb */
1471 1.2 matt int
1472 1.2 matt rmixl_pcie_error_check(void)
1473 1.2 matt {
1474 1.2 matt if (rmixl_pcie_v != 0)
1475 1.2 matt return _rmixl_pcie_error_check(rmixl_pcie_v);
1476 1.2 matt return -1;
1477 1.2 matt }
1478 1.2 matt #endif
1479 1.2 matt
1480 1.2 matt STATIC int
1481 1.2 matt _rmixl_pcie_error_check(void *v)
1482 1.2 matt {
1483 1.2 matt int i, offset;
1484 1.2 matt pcireg_t r;
1485 1.2 matt pcitag_t tag;
1486 1.2 matt int err=0;
1487 1.2 matt #ifdef DIAGNOSTIC
1488 1.2 matt pcireg_t regs[PCIE_ECFG_ERRS_OFFTAB_NENTRIES];
1489 1.2 matt #endif
1490 1.2 matt
1491 1.2 matt tag = rmixl_pcie_make_tag(v, 0, 0, 0); /* XXX */
1492 1.2 matt
1493 1.2 matt for (i=0; i < PCIE_ECFG_ERRS_OFFTAB_NENTRIES; i++) {
1494 1.2 matt offset = pcie_ecfg_errs_tab[i].offset;
1495 1.2 matt r = rmixl_pcie_conf_read(v, tag, offset);
1496 1.2 matt #ifdef DIAGNOSTIC
1497 1.2 matt regs[i] = r;
1498 1.2 matt #endif
1499 1.2 matt if (r != 0) {
1500 1.2 matt pcireg_t rw1c = r & pcie_ecfg_errs_tab[i].rw1c;
1501 1.2 matt if (rw1c != 0) {
1502 1.2 matt /* attempt to clear the error */
1503 1.2 matt rmixl_pcie_conf_write(v, tag, offset, rw1c);
1504 1.2 matt };
1505 1.2 matt if (offset == RMIXL_PCIE_ECFG_CESR)
1506 1.2 matt err |= 1; /* correctable */
1507 1.2 matt else
1508 1.2 matt err |= 2; /* uncorrectable */
1509 1.2 matt }
1510 1.2 matt }
1511 1.2 matt #ifdef DIAGNOSTIC
1512 1.2 matt if (err != 0) {
1513 1.2 matt for (i=0; i < PCIE_ECFG_ERRS_OFFTAB_NENTRIES; i++) {
1514 1.2 matt offset = pcie_ecfg_errs_tab[i].offset;
1515 1.2 matt printf("%s: %#x: %#x\n", __func__, offset, regs[i]);
1516 1.2 matt }
1517 1.2 matt }
1518 1.2 matt #endif
1519 1.2 matt
1520 1.2 matt return err;
1521 1.2 matt }
1522 1.2 matt
1523 1.2 matt static int
1524 1.2 matt rmixl_pcie_error_intr(void *v)
1525 1.2 matt {
1526 1.2 matt if (_rmixl_pcie_error_check(v) < 2)
1527 1.2 matt return 0; /* correctable */
1528 1.2 matt
1529 1.2 matt /* uncorrectable */
1530 1.2 matt #if DDB
1531 1.2 matt Debugger();
1532 1.2 matt #endif
1533 1.2 matt
1534 1.2 matt /* XXX reset and recover? */
1535 1.2 matt
1536 1.2 matt panic("%s\n", __func__);
1537 1.2 matt }
1538 1.3 matt
1539 1.3 matt /*
1540 1.3 matt * rmixl_physaddr_init_pcie:
1541 1.3 matt * called from rmixl_physaddr_init to get region addrs & sizes
1542 1.3 matt * from PCIE CFG, ECFG, IO, MEM BARs
1543 1.3 matt */
1544 1.3 matt void
1545 1.3 matt rmixl_physaddr_init_pcie(struct extent *ext)
1546 1.3 matt {
1547 1.3 matt u_long base;
1548 1.3 matt u_long size;
1549 1.3 matt uint32_t r;
1550 1.3 matt
1551 1.3 matt r = RMIXL_IOREG_READ(RMIXLS_SBC_PCIE_CFG_BAR);
1552 1.3 matt if ((r & RMIXL_PCIE_CFG_BAR_ENB) != 0) {
1553 1.3 matt base = (u_long)(RMIXL_PCIE_CFG_BAR_TO_BA((uint64_t)r)
1554 1.3 matt / (1024 * 1024));
1555 1.3 matt size = (u_long)RMIXL_PCIE_CFG_SIZE / (1024 * 1024);
1556 1.3 matt DPRINTF(("%s: %d: %s: 0x%08x -- 0x%010lx:%ld MB\n", __func__,
1557 1.3 matt __LINE__, "CFG", r, base * 1024 * 1024, size));
1558 1.3 matt if (extent_alloc_region(ext, base, size, EX_NOWAIT) != 0)
1559 1.3 matt panic("%s: extent_alloc_region(%p, %#lx, %#lx, %#x) "
1560 1.3 matt "failed", __func__, ext, base, size, EX_NOWAIT);
1561 1.3 matt }
1562 1.3 matt
1563 1.3 matt r = RMIXL_IOREG_READ(RMIXLS_SBC_PCIE_ECFG_BAR);
1564 1.3 matt if ((r & RMIXL_PCIE_ECFG_BAR_ENB) != 0) {
1565 1.3 matt base = (u_long)(RMIXL_PCIE_ECFG_BAR_TO_BA((uint64_t)r)
1566 1.3 matt / (1024 * 1024));
1567 1.3 matt size = (u_long)RMIXL_PCIE_ECFG_SIZE / (1024 * 1024);
1568 1.3 matt DPRINTF(("%s: %d: %s: 0x%08x -- 0x%010lx:%ld MB\n", __func__,
1569 1.3 matt __LINE__, "ECFG", r, base * 1024 * 1024, size));
1570 1.3 matt if (extent_alloc_region(ext, base, size, EX_NOWAIT) != 0)
1571 1.3 matt panic("%s: extent_alloc_region(%p, %#lx, %#lx, %#x) "
1572 1.3 matt "failed", __func__, ext, base, size, EX_NOWAIT);
1573 1.3 matt }
1574 1.3 matt
1575 1.3 matt r = RMIXL_IOREG_READ(RMIXLS_SBC_PCIE_MEM_BAR);
1576 1.3 matt if ((r & RMIXL_PCIE_MEM_BAR_ENB) != 0) {
1577 1.3 matt base = (u_long)(RMIXL_PCIE_MEM_BAR_TO_BA((uint64_t)r)
1578 1.3 matt / (1024 * 1024));
1579 1.3 matt size = (u_long)(RMIXL_PCIE_MEM_BAR_TO_SIZE((uint64_t)r)
1580 1.3 matt / (1024 * 1024));
1581 1.3 matt DPRINTF(("%s: %d: %s: 0x%08x -- 0x%010lx:%ld MB\n", __func__,
1582 1.3 matt __LINE__, "MEM", r, base * 1024 * 1024, size));
1583 1.3 matt if (extent_alloc_region(ext, base, size, EX_NOWAIT) != 0)
1584 1.3 matt panic("%s: extent_alloc_region(%p, %#lx, %#lx, %#x) "
1585 1.3 matt "failed", __func__, ext, base, size, EX_NOWAIT);
1586 1.3 matt }
1587 1.3 matt
1588 1.3 matt r = RMIXL_IOREG_READ(RMIXLS_SBC_PCIE_IO_BAR);
1589 1.3 matt if ((r & RMIXL_PCIE_IO_BAR_ENB) != 0) {
1590 1.3 matt base = (u_long)(RMIXL_PCIE_IO_BAR_TO_BA((uint64_t)r)
1591 1.3 matt / (1024 * 1024));
1592 1.3 matt size = (u_long)(RMIXL_PCIE_IO_BAR_TO_SIZE((uint64_t)r)
1593 1.3 matt / (1024 * 1024));
1594 1.3 matt DPRINTF(("%s: %d: %s: 0x%08x -- 0x%010lx:%ld MB\n", __func__,
1595 1.3 matt __LINE__, "IO", r, base * 1024 * 1024, size));
1596 1.3 matt if (extent_alloc_region(ext, base, size, EX_NOWAIT) != 0)
1597 1.3 matt panic("%s: extent_alloc_region(%p, %#lx, %#lx, %#x) "
1598 1.3 matt "failed", __func__, ext, base, size, EX_NOWAIT);
1599 1.3 matt }
1600 1.3 matt }
1601