acpi_pci_layerscape_gen4.c revision 1.3 1 /* $NetBSD: acpi_pci_layerscape_gen4.c,v 1.3 2020/06/15 18:57:39 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jared McNeill <jmcneill (at) invisible.ca>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * NXP Layerscape PCIe Gen4 controller (not ECAM compliant)
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: acpi_pci_layerscape_gen4.c,v 1.3 2020/06/15 18:57:39 ad Exp $");
38
39 #include <sys/param.h>
40 #include <sys/bus.h>
41 #include <sys/device.h>
42 #include <sys/intr.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/extent.h>
46 #include <sys/kmem.h>
47 #include <sys/mutex.h>
48 #include <sys/cpu.h>
49
50 #include <dev/pci/pcireg.h>
51 #include <dev/pci/pcivar.h>
52 #include <dev/pci/pciconf.h>
53
54 #include <dev/acpi/acpivar.h>
55 #include <dev/acpi/acpi_pci.h>
56 #include <dev/acpi/acpi_mcfg.h>
57
58 #include <arm/acpi/acpi_pci_machdep.h>
59
60 #define PAB_CTRL 0x808
61 #define PAB_CTRL_PAGE_SEL __BITS(18,13)
62 #define PAB_AXI_AMAP_PEX_WIN_L(x) (0xba8 + 0x10 * (x))
63 #define PAB_AXI_AMAP_PEX_WIN_H(x) (0xbac + 0x10 * (x))
64 #define INDIRECT_ADDR_BOUNDARY 0xc00
65
66 #define LUT_BASE 0x80000
67 #define LUT_GCR 0x28
68 #define LUT_GCR_RRE __BIT(0)
69
70 #define REG_TO_PAGE_INDEX(reg) (((reg) >> 10) & 0x3ff)
71 #define REG_TO_PAGE_ADDR(reg) (((reg) & 0x3ff) | INDIRECT_ADDR_BOUNDARY)
72
73 #define PAB_TARGET_BUS(b) ((b) << 24)
74 #define PAB_TARGET_DEV(d) ((d) << 19)
75 #define PAB_TARGET_FUNC(f) ((f) << 16)
76
77 struct acpi_pci_layerscape_gen4 {
78 bus_space_tag_t bst;
79 bus_space_handle_t bsh;
80 bus_space_handle_t win_bsh;
81 uint8_t rev;
82 kmutex_t lock;
83 };
84
85 static void
86 acpi_pci_layerscape_gen4_ccsr_setpage(struct acpi_pci_layerscape_gen4 *pcie, u_int page_index)
87 {
88 uint32_t val;
89
90 val = bus_space_read_4(pcie->bst, pcie->bsh, PAB_CTRL);
91 val &= ~PAB_CTRL_PAGE_SEL;
92 val |= __SHIFTIN(page_index, PAB_CTRL_PAGE_SEL);
93 bus_space_write_4(pcie->bst, pcie->bsh, PAB_CTRL, val);
94 }
95
96 static uint32_t
97 acpi_pci_layerscape_gen4_ccsr_read4(struct acpi_pci_layerscape_gen4 *pcie, bus_size_t reg)
98 {
99 const bool indirect = reg >= INDIRECT_ADDR_BOUNDARY;
100 const u_int page_index = indirect ? REG_TO_PAGE_INDEX(reg) : 0;
101 const bus_size_t page_addr = indirect ? REG_TO_PAGE_ADDR(reg) : reg;
102
103 acpi_pci_layerscape_gen4_ccsr_setpage(pcie, page_index);
104 return bus_space_read_4(pcie->bst, pcie->bsh, page_addr);
105 }
106
107 static void
108 acpi_pci_layerscape_gen4_ccsr_write4(struct acpi_pci_layerscape_gen4 *pcie,
109 bus_size_t reg, pcireg_t data)
110 {
111 const bool indirect = reg >= INDIRECT_ADDR_BOUNDARY;
112 const u_int page_index = indirect ? REG_TO_PAGE_INDEX(reg) : 0;
113 const bus_size_t page_addr = indirect ? REG_TO_PAGE_ADDR(reg) : reg;
114
115 acpi_pci_layerscape_gen4_ccsr_setpage(pcie, page_index);
116 bus_space_write_4(pcie->bst, pcie->bsh, page_addr, data);
117 }
118
119 static void
120 acpi_pci_layerscape_gen4_select_target(struct acpi_pci_layerscape_gen4 *pcie,
121 pci_chipset_tag_t pc, pcitag_t tag)
122 {
123 int b, d, f;
124
125 pci_decompose_tag(pc, tag, &b, &d, &f);
126
127 const uint32_t target = PAB_TARGET_BUS(b) |
128 PAB_TARGET_DEV(d) | PAB_TARGET_FUNC(f);
129
130 acpi_pci_layerscape_gen4_ccsr_write4(pcie, PAB_AXI_AMAP_PEX_WIN_L(0), target);
131 acpi_pci_layerscape_gen4_ccsr_write4(pcie, PAB_AXI_AMAP_PEX_WIN_H(0), 0);
132 }
133
134 static bool
135 acpi_pci_layerscape_gen4_is_tag_okay(pci_chipset_tag_t pc, pcitag_t tag, int reg)
136 {
137 struct acpi_pci_context *ap = pc->pc_conf_v;
138 int b, d, f;
139
140 pci_decompose_tag(pc, tag, &b, &d, &f);
141
142 if (b <= ap->ap_bus + 1 && d > 0)
143 return false;
144
145 if (b != ap->ap_bus)
146 return acpimcfg_conf_valid(pc, tag, reg);
147
148 return true;
149 }
150
151 static int
152 acpi_pci_layerscape_gen4_conf_read(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t *data)
153 {
154 struct acpi_pci_context *ap = pc->pc_conf_v;
155 struct acpi_pci_layerscape_gen4 *pcie = ap->ap_conf_priv;
156 int b, d, f;
157
158 pci_decompose_tag(pc, tag, &b, &d, &f);
159
160 if (!acpi_pci_layerscape_gen4_is_tag_okay(pc, tag, reg)) {
161 *data = -1;
162 return EINVAL;
163 }
164
165 mutex_enter(&pcie->lock);
166
167 if (pcie->rev == 0x10 && reg == PCI_ID_REG)
168 bus_space_write_4(pcie->bst, pcie->bsh, LUT_BASE + LUT_GCR, 0);
169
170 if (b == ap->ap_bus) {
171 *data = acpi_pci_layerscape_gen4_ccsr_read4(pcie, reg);
172 } else {
173 acpi_pci_layerscape_gen4_select_target(pcie, pc, tag);
174 *data = bus_space_read_4(pcie->bst, pcie->win_bsh, reg);
175 }
176
177 if (pcie->rev == 0x10 && reg == PCI_ID_REG)
178 bus_space_write_4(pcie->bst, pcie->bsh, LUT_BASE + LUT_GCR, LUT_GCR_RRE);
179
180 mutex_exit(&pcie->lock);
181
182 return 0;
183 }
184
185 static int
186 acpi_pci_layerscape_gen4_conf_write(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t data)
187 {
188 struct acpi_pci_context *ap = pc->pc_conf_v;
189 struct acpi_pci_layerscape_gen4 *pcie = ap->ap_conf_priv;
190 int b, d, f;
191
192 pci_decompose_tag(pc, tag, &b, &d, &f);
193
194 if (!acpi_pci_layerscape_gen4_is_tag_okay(pc, tag, reg))
195 return EINVAL;
196
197 mutex_enter(&pcie->lock);
198
199 if (b == ap->ap_bus) {
200 acpi_pci_layerscape_gen4_ccsr_write4(pcie, reg, data);
201 } else {
202 acpi_pci_layerscape_gen4_select_target(pcie, pc, tag);
203 bus_space_write_4(pcie->bst, pcie->win_bsh, reg, data);
204 }
205
206 mutex_exit(&pcie->lock);
207
208 return 0;
209 }
210
211 static UINT64
212 acpi_pci_layerscape_win_base(ACPI_INTEGER seg)
213 {
214 ACPI_TABLE_MCFG *mcfg;
215 ACPI_MCFG_ALLOCATION *ama;
216 ACPI_STATUS rv;
217 uint32_t off;
218 int i;
219
220 rv = AcpiGetTable(ACPI_SIG_MCFG, 0, (ACPI_TABLE_HEADER **)&mcfg);
221 if (ACPI_FAILURE(rv))
222 return 0;
223
224 off = sizeof(ACPI_TABLE_MCFG);
225 ama = ACPI_ADD_PTR(ACPI_MCFG_ALLOCATION, mcfg, off);
226 for (i = 0; off + sizeof(ACPI_MCFG_ALLOCATION) <= mcfg->Header.Length; i++) {
227 if (ama->PciSegment == seg)
228 return ama->Address;
229 off += sizeof(ACPI_MCFG_ALLOCATION);
230 ama = ACPI_ADD_PTR(ACPI_MCFG_ALLOCATION, mcfg, off);
231 }
232
233 return 0; /* not found */
234 }
235
236 static ACPI_STATUS
237 acpi_pci_layerscape_gen4_map(ACPI_HANDLE handle, UINT32 level, void *ctx, void **retval)
238 {
239 struct acpi_pci_context *ap = ctx;
240 struct acpi_resources res;
241 struct acpi_mem *mem;
242 struct acpi_pci_layerscape_gen4 *pcie;
243 bus_space_handle_t bsh;
244 ACPI_HANDLE parent;
245 ACPI_INTEGER seg;
246 ACPI_STATUS rv;
247 UINT64 win_base;
248 int error;
249
250 rv = AcpiGetParent(handle, &parent);
251 if (ACPI_FAILURE(rv))
252 return rv;
253 rv = acpi_eval_integer(parent, "_SEG", &seg);
254 if (ACPI_FAILURE(rv))
255 seg = 0;
256 if (ap->ap_seg != seg)
257 return AE_OK;
258
259 rv = acpi_resource_parse(ap->ap_dev, handle, "_CRS", &res, &acpi_resource_parse_ops_quiet);
260 if (ACPI_FAILURE(rv))
261 return rv;
262
263 mem = acpi_res_mem(&res, 0);
264 if (mem == NULL) {
265 acpi_resource_cleanup(&res);
266 return AE_NOT_FOUND;
267 }
268
269 win_base = acpi_pci_layerscape_win_base(seg);
270 if (win_base == 0) {
271 aprint_error_dev(ap->ap_dev, "couldn't find MCFG entry for segment %ld\n", seg);
272 return AE_NOT_FOUND;
273 }
274
275 error = bus_space_map(ap->ap_bst, mem->ar_base, mem->ar_length,
276 _ARM_BUS_SPACE_MAP_STRONGLY_ORDERED, &bsh);
277 if (error != 0)
278 return AE_NO_MEMORY;
279
280 pcie = kmem_alloc(sizeof(*pcie), KM_SLEEP);
281 pcie->bst = ap->ap_bst;
282 pcie->bsh = bsh;
283 mutex_init(&pcie->lock, MUTEX_DEFAULT, IPL_HIGH);
284
285 error = bus_space_map(ap->ap_bst, win_base, PCI_EXTCONF_SIZE,
286 _ARM_BUS_SPACE_MAP_STRONGLY_ORDERED, &pcie->win_bsh);
287 if (error != 0)
288 return AE_NO_MEMORY;
289
290 const pcireg_t cr = bus_space_read_4(pcie->bst, pcie->bsh, PCI_CLASS_REG);
291 pcie->rev = PCI_REVISION(cr);
292
293 ap->ap_conf_read = acpi_pci_layerscape_gen4_conf_read;
294 ap->ap_conf_write = acpi_pci_layerscape_gen4_conf_write;
295 ap->ap_conf_priv = pcie;
296
297 aprint_verbose_dev(ap->ap_dev,
298 "PCIe segment %lu: Layerscape Gen4 rev. %#x found at %#lx-%#lx\n",
299 seg, pcie->rev, mem->ar_base, mem->ar_base + mem->ar_length - 1);
300
301 return AE_CTRL_TERMINATE;
302 }
303
304 void
305 acpi_pci_layerscape_gen4_init(struct acpi_pci_context *ap)
306 {
307 ACPI_STATUS rv;
308
309 rv = AcpiGetDevices(__UNCONST("NXP0016"), acpi_pci_layerscape_gen4_map, ap, NULL);
310 if (ACPI_FAILURE(rv))
311 return;
312 }
313