pci_machdep.c revision 1.34 1 /* $NetBSD: pci_machdep.c,v 1.34 2025/10/04 03:26:40 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1995, 1996 Carnegie-Mellon University.
34 * All rights reserved.
35 *
36 * Author: Chris G. Demetriou
37 *
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
57 */
58
59 /*
60 * Machine-specific functions for PCI autoconfiguration.
61 */
62
63 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
64
65 __KERNEL_RCSID(0, "$NetBSD: pci_machdep.c,v 1.34 2025/10/04 03:26:40 thorpej Exp $");
66
67 #include <sys/types.h>
68 #include <sys/param.h>
69 #include <sys/time.h>
70 #include <sys/systm.h>
71 #include <sys/errno.h>
72 #include <sys/device.h>
73 #include <sys/cpu.h>
74
75 #include <dev/isa/isavar.h>
76 #include <dev/pci/pcireg.h>
77 #include <dev/pci/pcivar.h>
78 #include <dev/pci/pcidevs.h>
79
80 #include "vga_pci.h"
81 #if NVGA_PCI
82 #include <dev/ic/mc6845reg.h>
83 #include <dev/ic/pcdisplayvar.h>
84 #include <dev/pci/vga_pcivar.h>
85 #endif
86
87 #include "tga.h"
88 #if NTGA
89 #include <dev/pci/tgavar.h>
90 #endif
91
92 #include <machine/rpb.h>
93
94 void
95 pci_display_console(bus_space_tag_t iot, bus_space_tag_t memt, pci_chipset_tag_t pc, int bus, int device, int function)
96 {
97 #if NVGA_PCI || NTGA
98 pcitag_t tag;
99 pcireg_t id;
100 int match, nmatch;
101 #endif
102 #if NVGA_PCI
103 pcireg_t class;
104 #endif
105 int (*fn)(bus_space_tag_t, bus_space_tag_t, pci_chipset_tag_t,
106 int, int, int);
107
108 #if NVGA_PCI || NTGA
109 tag = pci_make_tag(pc, bus, device, function);
110 id = pci_conf_read(pc, tag, PCI_ID_REG);
111 if (id == 0 || id == 0xffffffff)
112 panic("pci_display_console: no device at %d/%d/%d",
113 bus, device, function);
114 # if NVGA_PCI
115 class = pci_conf_read(pc, tag, PCI_CLASS_REG);
116 # endif
117
118 match = 0;
119 #endif
120 fn = NULL;
121
122 #if NVGA_PCI
123 nmatch = DEVICE_IS_VGA_PCI(class, id);
124 if (nmatch > match) {
125 match = nmatch;
126 fn = vga_pci_cnattach;
127 }
128 #endif
129 #if NTGA
130 nmatch = DEVICE_IS_TGA(class, id);
131 if (nmatch > match)
132 nmatch = tga_cnmatch(iot, memt, pc, tag);
133 if (nmatch > match) {
134 match = nmatch;
135 fn = tga_cnattach;
136 }
137 #endif
138
139 if (fn != NULL)
140 (*fn)(iot, memt, pc, bus, device, function);
141 else
142 panic("pci_display_console: unconfigured device at %d/%d/%d",
143 bus, device, function);
144 }
145
146 void
147 device_pci_register(device_t dev, void *aux)
148 {
149 struct pci_attach_args *pa = aux;
150 struct ctb *ctb;
151
152 /* set properties for PCI framebuffers */
153 ctb = (struct ctb *)(((char *)hwrpb) + hwrpb->rpb_ctb_off);
154 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY &&
155 ctb->ctb_term_type == CTB_GRAPHICS) {
156 /* XXX should consider multiple displays? */
157 device_setprop_bool(dev, "is_console", true);
158 }
159 }
160
161 void
162 alpha_pci_intr_init(void *core, bus_space_tag_t iot, bus_space_tag_t memt,
163 pci_chipset_tag_t pc)
164 {
165 __link_set_decl(alpha_pci_intr_impls, struct alpha_pci_intr_impl);
166 struct alpha_pci_intr_impl * const *impl;
167
168 __link_set_foreach(impl, alpha_pci_intr_impls) {
169 if ((*impl)->systype == cputype) {
170 (*impl)->intr_init(core, iot, memt, pc);
171 return;
172 }
173 }
174 panic("%s: unknown systype %d", __func__, cputype);
175 }
176
177 void
178 alpha_pci_intr_alloc(pci_chipset_tag_t pc, unsigned int maxstrays)
179 {
180 unsigned int i;
181 struct evcnt *ev;
182 const char *cp;
183
184 pc->pc_shared_intrs = alpha_shared_intr_alloc(pc->pc_nirq);
185
186 for (i = 0; i < pc->pc_nirq; i++) {
187 alpha_shared_intr_set_maxstrays(pc->pc_shared_intrs, i,
188 maxstrays);
189 alpha_shared_intr_set_private(pc->pc_shared_intrs, i,
190 pc->pc_intr_v);
191
192 ev = alpha_shared_intr_evcnt(pc->pc_shared_intrs, i);
193 cp = alpha_shared_intr_string(pc->pc_shared_intrs, i);
194
195 evcnt_attach_dynamic(ev, EVCNT_TYPE_INTR, NULL,
196 pc->pc_intr_desc, cp);
197 }
198 }
199
200 int
201 alpha_pci_generic_intr_map(const struct pci_attach_args * const pa,
202 pci_intr_handle_t * const ihp)
203 {
204 pcitag_t const bustag = pa->pa_intrtag;
205 int const buspin = pa->pa_intrpin;
206 int const line = pa->pa_intrline;
207 pci_chipset_tag_t const pc = pa->pa_pc;
208 int bus, device, function;
209
210 if (buspin == 0) {
211 /* No IRQ used. */
212 return 1;
213 }
214 if (buspin < 0 || buspin > 4) {
215 printf("%s: bad interrupt pin %d\n", __func__, buspin);
216 return 1;
217 }
218
219 pci_decompose_tag(pc, bustag, &bus, &device, &function);
220
221 /*
222 * The console firmware places the interrupt mapping in the "line"
223 * value. A valaue of (char)-1 indicates there is no mapping.
224 */
225 if (line == 0xff) {
226 printf("%s: no mapping for %d/%d/%d\n", __func__,
227 bus, device, function);
228 return 1;
229 }
230
231 if (line < 0 || line >= pc->pc_nirq) {
232 printf("%s: bad line %d for %d/%d/%d\n", __func__,
233 line, bus, device, function);
234 return 1;
235 }
236
237 alpha_pci_intr_handle_init(ihp, line, 0);
238 return 0;
239 }
240
241 const char *
242 alpha_pci_generic_intr_string(pci_chipset_tag_t const pc,
243 pci_intr_handle_t const ih, char * const buf, size_t const len)
244 {
245 const u_int irq = alpha_pci_intr_handle_get_irq(&ih);
246
247 KASSERT(irq < pc->pc_nirq);
248
249 snprintf(buf, len, "%s irq %u", pc->pc_intr_desc, irq);
250 return buf;
251 }
252
253 const struct evcnt *
254 alpha_pci_generic_intr_evcnt(pci_chipset_tag_t const pc,
255 pci_intr_handle_t const ih)
256 {
257 const u_int irq = alpha_pci_intr_handle_get_irq(&ih);
258
259 KASSERT(irq < pc->pc_nirq);
260
261 return alpha_shared_intr_evcnt(pc->pc_shared_intrs, irq);
262 }
263
264 static struct cpu_info *
265 alpha_pci_generic_intr_select_cpu(pci_chipset_tag_t const pc, u_int const irq,
266 u_int const flags)
267 {
268 struct cpu_info *ci, *best_ci;
269 CPU_INFO_ITERATOR cii;
270
271 KASSERT(mutex_owned(&cpu_lock));
272
273 /*
274 * If the back-end didn't tell us where we can route, then
275 * they all go to the primary CPU.
276 */
277 if (pc->pc_eligible_cpus == 0) {
278 return &cpu_info_primary;
279 }
280
281 /*
282 * If the interrupt already has a CPU assigned, keep on using it,
283 * unless the CPU has become ineligible.
284 */
285 ci = alpha_shared_intr_get_cpu(pc->pc_shared_intrs, irq);
286 if (ci != NULL) {
287 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0 ||
288 CPU_IS_PRIMARY(ci)) {
289 return ci;
290 }
291 }
292
293 /*
294 * Pick the CPU with the fewest handlers.
295 */
296 best_ci = NULL;
297 for (CPU_INFO_FOREACH(cii, ci)) {
298 if ((pc->pc_eligible_cpus & __BIT(ci->ci_cpuid)) == 0) {
299 /* This CPU is not eligible in hardware. */
300 continue;
301 }
302 if (ci->ci_schedstate.spc_flags & SPCF_NOINTR) {
303 /* This CPU is not eligible in software. */
304 continue;
305 }
306 if (best_ci == NULL ||
307 ci->ci_nintrhand < best_ci->ci_nintrhand) {
308 best_ci = ci;
309 }
310 }
311
312 /* If we found one, cool... */
313 if (best_ci != NULL) {
314 return best_ci;
315 }
316
317 /* ...if not, well I guess we'll just fall back on the primary. */
318 return &cpu_info_primary;
319 }
320
321 void *
322 alpha_pci_generic_intr_establish(pci_chipset_tag_t const pc,
323 pci_intr_handle_t const ih, int const level,
324 int (*func)(void *), void *arg)
325 {
326 const u_int irq = alpha_pci_intr_handle_get_irq(&ih);
327 const u_int flags = alpha_pci_intr_handle_get_flags(&ih);
328 void *cookie;
329
330 KASSERT(irq < pc->pc_nirq);
331
332 cookie = alpha_shared_intr_alloc_intrhand(pc->pc_shared_intrs,
333 irq, IST_LEVEL, level, flags, func, arg, pc->pc_intr_desc);
334
335 if (cookie == NULL)
336 return NULL;
337
338 mutex_enter(&cpu_lock);
339
340 struct cpu_info *target_ci =
341 alpha_pci_generic_intr_select_cpu(pc, irq, flags);
342 struct cpu_info *current_ci =
343 alpha_shared_intr_get_cpu(pc->pc_shared_intrs, irq);
344
345 const bool first_handler =
346 ! alpha_shared_intr_isactive(pc->pc_shared_intrs, irq);
347
348 /*
349 * If this is the first handler on this interrupt, or if the
350 * target CPU has changed, then program the route if the
351 * hardware supports it.
352 */
353 if (first_handler || target_ci != current_ci) {
354 alpha_shared_intr_set_cpu(pc->pc_shared_intrs, irq, target_ci);
355 if (pc->pc_intr_set_affinity != NULL) {
356 pc->pc_intr_set_affinity(pc, irq, target_ci);
357 }
358 }
359
360 if (! alpha_shared_intr_link(pc->pc_shared_intrs, cookie,
361 pc->pc_intr_desc)) {
362 mutex_exit(&cpu_lock);
363 alpha_shared_intr_free_intrhand(cookie);
364 return NULL;
365 }
366
367 if (first_handler) {
368 scb_set(pc->pc_vecbase + SCB_IDXTOVEC(irq),
369 alpha_pci_generic_iointr, pc);
370 pc->pc_intr_enable(pc, irq);
371 }
372
373 mutex_exit(&cpu_lock);
374
375 return cookie;
376 }
377
378 void
379 alpha_pci_generic_intr_disestablish(pci_chipset_tag_t const pc,
380 void * const cookie)
381 {
382 struct alpha_shared_intrhand * const ih = cookie;
383 const u_int irq = ih->ih_num;
384
385 mutex_enter(&cpu_lock);
386
387 if (alpha_shared_intr_firstactive(pc->pc_shared_intrs, irq)) {
388 pc->pc_intr_disable(pc, irq);
389 alpha_shared_intr_set_dfltsharetype(pc->pc_shared_intrs,
390 irq, IST_NONE);
391 scb_free(pc->pc_vecbase + SCB_IDXTOVEC(irq));
392 }
393
394 alpha_shared_intr_unlink(pc->pc_shared_intrs, cookie, pc->pc_intr_desc);
395
396 mutex_exit(&cpu_lock);
397
398 alpha_shared_intr_free_intrhand(cookie);
399 }
400
401 void
402 alpha_pci_generic_iointr(void * const arg, unsigned long const vec)
403 {
404 pci_chipset_tag_t const pc = arg;
405 const u_int irq = SCB_VECTOIDX(vec - pc->pc_vecbase);
406
407 if (!alpha_shared_intr_dispatch(pc->pc_shared_intrs, irq)) {
408 alpha_shared_intr_stray(pc->pc_shared_intrs, irq,
409 pc->pc_intr_desc);
410 if (ALPHA_SHARED_INTR_DISABLE(pc->pc_shared_intrs, irq)) {
411 pc->pc_intr_disable(pc, irq);
412 }
413 } else {
414 alpha_shared_intr_reset_strays(pc->pc_shared_intrs, irq);
415 }
416 }
417
418 void
419 alpha_pci_generic_intr_redistribute(pci_chipset_tag_t const pc)
420 {
421 struct cpu_info *current_ci, *new_ci;
422 unsigned int irq;
423
424 KASSERT(mutex_owned(&cpu_lock));
425 KASSERT(mp_online);
426
427 /* If we can't set affinity, then there's nothing to do. */
428 if (pc->pc_eligible_cpus == 0 || pc->pc_intr_set_affinity == NULL) {
429 return;
430 }
431
432 /*
433 * Look at each IRQ, and allocate a new CPU for each IRQ
434 * that's being serviced by a now-shielded CPU.
435 */
436 for (irq = 0; irq < pc->pc_nirq; irq++) {
437 current_ci =
438 alpha_shared_intr_get_cpu(pc->pc_shared_intrs, irq);
439 if (current_ci == NULL ||
440 (current_ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) {
441 continue;
442 }
443
444 new_ci = alpha_pci_generic_intr_select_cpu(pc, irq, 0);
445 if (new_ci == current_ci) {
446 /* Can't shield this one. */
447 continue;
448 }
449
450 alpha_shared_intr_set_cpu(pc->pc_shared_intrs, irq, new_ci);
451 pc->pc_intr_set_affinity(pc, irq, new_ci);
452 }
453
454 /* XXX should now re-balance */
455 }
456
457 #define ALPHA_PCI_INTR_HANDLE_IRQ __BITS(0,31)
458 #define ALPHA_PCI_INTR_HANDLE_FLAGS __BITS(32,63)
459
460 void
461 alpha_pci_intr_handle_init(pci_intr_handle_t * const ihp, u_int const irq,
462 u_int const flags)
463 {
464 ihp->value = __SHIFTIN(irq, ALPHA_PCI_INTR_HANDLE_IRQ) |
465 __SHIFTIN(flags, ALPHA_PCI_INTR_HANDLE_FLAGS);
466 }
467
468 void
469 alpha_pci_intr_handle_set_irq(pci_intr_handle_t * const ihp, u_int const irq)
470 {
471 ihp->value = (ihp->value & ALPHA_PCI_INTR_HANDLE_FLAGS) |
472 __SHIFTIN(irq, ALPHA_PCI_INTR_HANDLE_IRQ);
473 }
474
475 u_int
476 alpha_pci_intr_handle_get_irq(const pci_intr_handle_t * const ihp)
477 {
478 return __SHIFTOUT(ihp->value, ALPHA_PCI_INTR_HANDLE_IRQ);
479 }
480
481 void
482 alpha_pci_intr_handle_set_flags(pci_intr_handle_t * const ihp,
483 u_int const flags)
484 {
485 ihp->value = (ihp->value & ALPHA_PCI_INTR_HANDLE_IRQ) |
486 __SHIFTIN(flags, ALPHA_PCI_INTR_HANDLE_FLAGS);
487 }
488
489 u_int
490 alpha_pci_intr_handle_get_flags(const pci_intr_handle_t * const ihp)
491 {
492 return __SHIFTOUT(ihp->value, ALPHA_PCI_INTR_HANDLE_FLAGS);
493 }
494
495 /*
496 * MI PCI back-end entry points.
497 */
498
499 void
500 pci_attach_hook(device_t const parent, device_t const self,
501 struct pcibus_attach_args * const pba)
502 {
503 pci_chipset_tag_t const pc = pba->pba_pc;
504
505 if (pc->pc_attach_hook != NULL) {
506 pc->pc_attach_hook(parent, self, pba);
507 }
508 }
509
510 int
511 pci_bus_maxdevs(pci_chipset_tag_t const pc, int const busno)
512 {
513 if (pc->pc_bus_maxdevs == NULL) {
514 return 32;
515 }
516
517 return pc->pc_bus_maxdevs(pc->pc_conf_v, busno);
518 }
519
520 pcitag_t
521 pci_make_tag(pci_chipset_tag_t const pc, int const bus, int const dev,
522 int const func)
523 {
524 if (__predict_true(pc->pc_make_tag == NULL)) {
525 /* Just use the standard Type 1 address format. */
526 return __SHIFTIN(bus, PCI_CONF_TYPE1_BUS) |
527 __SHIFTIN(dev, PCI_CONF_TYPE1_DEVICE) |
528 __SHIFTIN(func, PCI_CONF_TYPE1_FUNCTION);
529 }
530
531 return pc->pc_make_tag(pc->pc_conf_v, bus, dev, func);
532 }
533
534 void
535 pci_decompose_tag(pci_chipset_tag_t const pc, pcitag_t const tag,
536 int * const busp, int * const devp, int * const funcp)
537 {
538 if (__predict_true(pc->pc_decompose_tag == NULL)) {
539 if (busp != NULL)
540 *busp = __SHIFTOUT(tag, PCI_CONF_TYPE1_BUS);
541 if (devp != NULL)
542 *devp = __SHIFTOUT(tag, PCI_CONF_TYPE1_DEVICE);
543 if (funcp != NULL)
544 *funcp = __SHIFTOUT(tag, PCI_CONF_TYPE1_FUNCTION);
545 return;
546 }
547
548 pc->pc_decompose_tag(pc->pc_conf_v, tag, busp, devp, funcp);
549 }
550
551 pcireg_t
552 pci_conf_read(pci_chipset_tag_t const pc, pcitag_t const tag, int const reg)
553 {
554 KASSERT(pc->pc_conf_read != NULL);
555 return pc->pc_conf_read(pc->pc_conf_v, tag, reg);
556 }
557
558 void
559 pci_conf_write(pci_chipset_tag_t const pc, pcitag_t const tag, int const reg,
560 pcireg_t const val)
561 {
562 KASSERT(pc->pc_conf_write != NULL);
563 pc->pc_conf_write(pc->pc_conf_v, tag, reg, val);
564 }
565
566 int
567 pci_intr_map(const struct pci_attach_args * const pa,
568 pci_intr_handle_t * const ihp)
569 {
570 pci_chipset_tag_t const pc = pa->pa_pc;
571
572 KASSERT(pc->pc_intr_map != NULL);
573 return pc->pc_intr_map(pa, ihp);
574 }
575
576 const char *
577 pci_intr_string(pci_chipset_tag_t const pc, pci_intr_handle_t const ih,
578 char * const buf, size_t const len)
579 {
580 KASSERT(pc->pc_intr_string != NULL);
581 return pc->pc_intr_string(pc, ih, buf, len);
582 }
583
584 const struct evcnt *
585 pci_intr_evcnt(pci_chipset_tag_t const pc, pci_intr_handle_t const ih)
586 {
587 KASSERT(pc->pc_intr_evcnt != NULL);
588 return pc->pc_intr_evcnt(pc, ih);
589 }
590
591 void *
592 pci_intr_establish(pci_chipset_tag_t const pc, pci_intr_handle_t const ih,
593 int const ipl, int (*func)(void *), void *arg)
594 {
595 KASSERT(pc->pc_intr_establish != NULL);
596 return pc->pc_intr_establish(pc, ih, ipl, func, arg);
597 }
598
599 void
600 pci_intr_disestablish(pci_chipset_tag_t const pc, void * const cookie)
601 {
602 KASSERT(pc->pc_intr_disestablish != NULL);
603 pc->pc_intr_disestablish(pc, cookie);
604 }
605
606 int
607 pci_intr_setattr(pci_chipset_tag_t const pc __unused,
608 pci_intr_handle_t * const ihp, int const attr, uint64_t const data)
609 {
610 u_int flags = alpha_pci_intr_handle_get_flags(ihp);
611
612 switch (attr) {
613 case PCI_INTR_MPSAFE:
614 if (data)
615 flags |= ALPHA_INTR_MPSAFE;
616 else
617 flags &= ~ALPHA_INTR_MPSAFE;
618 break;
619
620 default:
621 return ENODEV;
622 }
623
624 alpha_pci_intr_handle_set_flags(ihp, flags);
625 return 0;
626 }
627