gic_v2m.c revision 1.9.2.1 1 /* $NetBSD: gic_v2m.c,v 1.9.2.1 2020/12/14 14:37:48 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jared McNeill <jmcneill (at) invisible.ca>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #define _INTR_PRIVATE
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: gic_v2m.c,v 1.9.2.1 2020/12/14 14:37:48 thorpej Exp $");
36
37 #include <sys/param.h>
38 #include <sys/kmem.h>
39 #include <sys/bitops.h>
40
41 #include <dev/pci/pcireg.h>
42 #include <dev/pci/pcivar.h>
43
44 #include <arm/pic/picvar.h>
45 #include <arm/cortex/gic_v2m.h>
46
47 static uint64_t
48 gic_v2m_msi_addr(struct gic_v2m_frame *frame, int spi)
49 {
50 if ((frame->frame_flags & GIC_V2M_FLAG_GRAVITON) != 0)
51 return frame->frame_reg + ((spi - 32) << 3);
52
53 return frame->frame_reg + GIC_MSI_SETSPI;
54 }
55
56 static uint32_t
57 gic_v2m_msi_data(struct gic_v2m_frame *frame, int spi)
58 {
59 if ((frame->frame_flags & GIC_V2M_FLAG_GRAVITON) != 0)
60 return 0;
61
62 return spi;
63 }
64
65 static int
66 gic_v2m_msi_alloc_spi(struct gic_v2m_frame *frame, int count,
67 const struct pci_attach_args *pa)
68 {
69 struct pci_attach_args *new_pa;
70 int spi, n;
71
72 for (spi = frame->frame_base;
73 spi < frame->frame_base + frame->frame_count; ) {
74 if (frame->frame_pa[spi] == NULL) {
75 for (n = 1; n < count; n++)
76 if (frame->frame_pa[spi + n] != NULL)
77 goto next_spi;
78
79 for (n = 0; n < count; n++) {
80 new_pa = kmem_alloc(sizeof(*new_pa), KM_SLEEP);
81 memcpy(new_pa, pa, sizeof(*new_pa));
82 frame->frame_pa[spi + n] = new_pa;
83 }
84
85 return spi;
86 }
87 next_spi:
88 spi += count;
89 }
90
91 return -1;
92 }
93
94 static void
95 gic_v2m_msi_free_spi(struct gic_v2m_frame *frame, int spi)
96 {
97 struct pci_attach_args *pa;
98
99 pa = frame->frame_pa[spi];
100 frame->frame_pa[spi] = NULL;
101
102 if (pa != NULL)
103 kmem_free(pa, sizeof(*pa));
104 }
105
106 static int
107 gic_v2m_msi_available_spi(struct gic_v2m_frame *frame)
108 {
109 int spi, n;
110
111 for (spi = frame->frame_base, n = 0;
112 spi < frame->frame_base + frame->frame_count;
113 spi++) {
114 if (frame->frame_pa[spi] == NULL)
115 n++;
116 }
117
118 return n;
119 }
120
121 static void
122 gic_v2m_msi_enable(struct gic_v2m_frame *frame, int spi, int count)
123 {
124 const struct pci_attach_args *pa = frame->frame_pa[spi];
125 pci_chipset_tag_t pc = pa->pa_pc;
126 pcitag_t tag = pa->pa_tag;
127 pcireg_t ctl;
128 int off;
129
130 if (!pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL))
131 panic("gic_v2m_msi_enable: device is not MSI-capable");
132
133 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
134 ctl &= ~PCI_MSI_CTL_MSI_ENABLE;
135 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
136
137 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
138 ctl &= ~PCI_MSI_CTL_MME_MASK;
139 ctl |= __SHIFTIN(ilog2(count), PCI_MSI_CTL_MME_MASK);
140 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
141
142 const uint64_t addr = gic_v2m_msi_addr(frame, spi);
143 const uint32_t data = gic_v2m_msi_data(frame, spi);
144
145 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
146 if (ctl & PCI_MSI_CTL_64BIT_ADDR) {
147 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_LO,
148 addr & 0xffffffff);
149 pci_conf_write(pc, tag, off + PCI_MSI_MADDR64_HI,
150 (addr >> 32) & 0xffffffff);
151 pci_conf_write(pc, tag, off + PCI_MSI_MDATA64, data);
152 } else {
153 pci_conf_write(pc, tag, off + PCI_MSI_MADDR,
154 addr & 0xffffffff);
155 pci_conf_write(pc, tag, off + PCI_MSI_MDATA, data);
156 }
157 ctl |= PCI_MSI_CTL_MSI_ENABLE;
158 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
159 }
160
161 static void
162 gic_v2m_msi_disable(struct gic_v2m_frame *frame, int spi)
163 {
164 const struct pci_attach_args *pa = frame->frame_pa[spi];
165 pci_chipset_tag_t pc = pa->pa_pc;
166 pcitag_t tag = pa->pa_tag;
167 pcireg_t ctl;
168 int off;
169
170 if (!pci_get_capability(pc, tag, PCI_CAP_MSI, &off, NULL))
171 panic("gic_v2m_msi_disable: device is not MSI-capable");
172
173 ctl = pci_conf_read(pc, tag, off + PCI_MSI_CTL);
174 ctl &= ~PCI_MSI_CTL_MSI_ENABLE;
175 pci_conf_write(pc, tag, off + PCI_MSI_CTL, ctl);
176 }
177
178 static void
179 gic_v2m_msix_enable(struct gic_v2m_frame *frame, int spi, int msix_vec,
180 bus_space_tag_t bst, bus_space_handle_t bsh)
181 {
182 const struct pci_attach_args *pa = frame->frame_pa[spi];
183 pci_chipset_tag_t pc = pa->pa_pc;
184 pcitag_t tag = pa->pa_tag;
185 pcireg_t ctl;
186 uint32_t val;
187 int off;
188
189 if (!pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL))
190 panic("gic_v2m_msix_enable: device is not MSI-X-capable");
191
192 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
193 ctl &= ~PCI_MSIX_CTL_ENABLE;
194 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
195
196 const uint64_t addr = gic_v2m_msi_addr(frame, spi);
197 const uint32_t data = gic_v2m_msi_data(frame, spi);
198 const uint64_t entry_base = PCI_MSIX_TABLE_ENTRY_SIZE * msix_vec;
199 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_LO, (uint32_t)addr);
200 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_ADDR_HI, (uint32_t)(addr >> 32));
201 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_DATA, data);
202 val = bus_space_read_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL);
203 val &= ~PCI_MSIX_VECTCTL_MASK;
204 bus_space_write_4(bst, bsh, entry_base + PCI_MSIX_TABLE_ENTRY_VECTCTL, val);
205
206 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
207 ctl |= PCI_MSIX_CTL_ENABLE;
208 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
209 }
210
211 static void
212 gic_v2m_msix_disable(struct gic_v2m_frame *frame, int spi)
213 {
214 const struct pci_attach_args *pa = frame->frame_pa[spi];
215 pci_chipset_tag_t pc = pa->pa_pc;
216 pcitag_t tag = pa->pa_tag;
217 pcireg_t ctl;
218 int off;
219
220 if (!pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL))
221 panic("gic_v2m_msix_disable: device is not MSI-X-capable");
222
223 ctl = pci_conf_read(pc, tag, off + PCI_MSIX_CTL);
224 ctl &= ~PCI_MSIX_CTL_ENABLE;
225 pci_conf_write(pc, tag, off + PCI_MSIX_CTL, ctl);
226 }
227
228 static pci_intr_handle_t *
229 gic_v2m_msi_alloc(struct arm_pci_msi *msi, int *count,
230 const struct pci_attach_args *pa, bool exact)
231 {
232 struct gic_v2m_frame * const frame = msi->msi_priv;
233 pci_intr_handle_t *vectors;
234 int n, off;
235
236 if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, &off, NULL))
237 return NULL;
238
239 const int avail = gic_v2m_msi_available_spi(frame);
240 if (exact && *count > avail)
241 return NULL;
242
243 while (*count > avail) {
244 if (avail < *count)
245 (*count) >>= 1;
246 }
247 if (*count == 0)
248 return NULL;
249
250 const int spi_base = gic_v2m_msi_alloc_spi(frame, *count, pa);
251 if (spi_base == -1)
252 return NULL;
253
254 vectors = kmem_alloc(sizeof(*vectors) * *count, KM_SLEEP);
255 for (n = 0; n < *count; n++) {
256 const int spi = spi_base + n;
257 vectors[n] = ARM_PCI_INTR_MSI |
258 __SHIFTIN(spi, ARM_PCI_INTR_IRQ) |
259 __SHIFTIN(n, ARM_PCI_INTR_MSI_VEC) |
260 __SHIFTIN(msi->msi_id, ARM_PCI_INTR_FRAME);
261 }
262
263 gic_v2m_msi_enable(frame, spi_base, *count);
264
265 return vectors;
266 }
267
268 static pci_intr_handle_t *
269 gic_v2m_msix_alloc(struct arm_pci_msi *msi, u_int *table_indexes, int *count,
270 const struct pci_attach_args *pa, bool exact)
271 {
272 struct gic_v2m_frame * const frame = msi->msi_priv;
273 pci_intr_handle_t *vectors;
274 bus_space_tag_t bst;
275 bus_space_handle_t bsh;
276 bus_size_t bsz;
277 uint32_t table_offset, table_size;
278 int n, off, bar, error;
279 pcireg_t tbl;
280
281 if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, &off, NULL))
282 return NULL;
283
284 const int avail = gic_v2m_msi_available_spi(frame);
285 if (exact && *count > avail)
286 return NULL;
287
288 while (*count > avail) {
289 if (avail < *count)
290 (*count) >>= 1;
291 }
292 if (*count == 0)
293 return NULL;
294
295 tbl = pci_conf_read(pa->pa_pc, pa->pa_tag, off + PCI_MSIX_TBLOFFSET);
296 bar = PCI_BAR0 + (4 * (tbl & PCI_MSIX_TBLBIR_MASK));
297 table_offset = tbl & PCI_MSIX_TBLOFFSET_MASK;
298 table_size = pci_msix_count(pa->pa_pc, pa->pa_tag) * PCI_MSIX_TABLE_ENTRY_SIZE;
299 if (table_size == 0)
300 return NULL;
301
302 error = pci_mapreg_submap(pa, bar, pci_mapreg_type(pa->pa_pc, pa->pa_tag, bar),
303 BUS_SPACE_MAP_LINEAR, roundup(table_size, PAGE_SIZE), table_offset,
304 &bst, &bsh, NULL, &bsz);
305 if (error)
306 return NULL;
307
308 const int spi_base = gic_v2m_msi_alloc_spi(frame, *count, pa);
309 if (spi_base == -1) {
310 bus_space_unmap(bst, bsh, bsz);
311 return NULL;
312 }
313
314 vectors = kmem_alloc(sizeof(*vectors) * *count, KM_SLEEP);
315 for (n = 0; n < *count; n++) {
316 const int spi = spi_base + n;
317 const int msix_vec = table_indexes ? table_indexes[n] : n;
318 vectors[msix_vec] = ARM_PCI_INTR_MSIX |
319 __SHIFTIN(spi, ARM_PCI_INTR_IRQ) |
320 __SHIFTIN(msix_vec, ARM_PCI_INTR_MSI_VEC) |
321 __SHIFTIN(msi->msi_id, ARM_PCI_INTR_FRAME);
322
323 gic_v2m_msix_enable(frame, spi, msix_vec, bst, bsh);
324 }
325
326 bus_space_unmap(bst, bsh, bsz);
327
328 return vectors;
329 }
330
331 static void *
332 gic_v2m_msi_intr_establish(struct arm_pci_msi *msi,
333 pci_intr_handle_t ih, int ipl, int (*func)(void *), void *arg, const char *xname)
334 {
335 struct gic_v2m_frame * const frame = msi->msi_priv;
336
337 const int spi = __SHIFTOUT(ih, ARM_PCI_INTR_IRQ);
338 const int mpsafe = (ih & ARM_PCI_INTR_MPSAFE) ? IST_MPSAFE : 0;
339
340 return pic_establish_intr(frame->frame_pic, spi, ipl,
341 IST_EDGE | mpsafe, func, arg, xname);
342 }
343
344 static void
345 gic_v2m_msi_intr_release(struct arm_pci_msi *msi, pci_intr_handle_t *pih,
346 int count)
347 {
348 struct gic_v2m_frame * const frame = msi->msi_priv;
349 int n;
350
351 for (n = 0; n < count; n++) {
352 const int spi = __SHIFTOUT(pih[n], ARM_PCI_INTR_IRQ);
353 if (pih[n] & ARM_PCI_INTR_MSIX)
354 gic_v2m_msix_disable(frame, spi);
355 if (pih[n] & ARM_PCI_INTR_MSI)
356 gic_v2m_msi_disable(frame, spi);
357 gic_v2m_msi_free_spi(frame, spi);
358 struct intrsource * const is =
359 frame->frame_pic->pic_sources[spi];
360 if (is != NULL)
361 pic_disestablish_source(is);
362 }
363 }
364
365 int
366 gic_v2m_init(struct gic_v2m_frame *frame, device_t dev, uint32_t frame_id)
367 {
368 struct arm_pci_msi *msi = &frame->frame_msi;
369
370 msi->msi_dev = dev;
371 msi->msi_priv = frame;
372 msi->msi_alloc = gic_v2m_msi_alloc;
373 msi->msix_alloc = gic_v2m_msix_alloc;
374 msi->msi_intr_establish = gic_v2m_msi_intr_establish;
375 msi->msi_intr_release = gic_v2m_msi_intr_release;
376
377 return arm_pci_msi_add(msi);
378 }
379