astro.c revision 1.2.4.1 1 /* $NetBSD: astro.c,v 1.2.4.1 2021/03/22 02:00:56 thorpej Exp $ */
2
3 /* $OpenBSD: astro.c,v 1.8 2007/10/06 23:50:54 krw Exp $ */
4
5 /*
6 * Copyright (c) 2007 Mark Kettenis
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/device.h>
24 #include <sys/extent.h>
25 #include <sys/malloc.h>
26 #include <sys/reboot.h>
27 #include <sys/tree.h>
28
29 #include <uvm/uvm.h>
30
31 #include <machine/iomod.h>
32 #include <machine/autoconf.h>
33 #include <machine/pdc.h>
34 #include <machine/endian.h>
35
36 #include <hppa/dev/cpudevs.h>
37 #include <hppa/hppa/machdep.h>
38
39 struct astro_regs {
40 uint32_t rid;
41 uint32_t pad0000;
42 uint32_t ioc_ctrl;
43 uint32_t pad0008;
44 uint8_t resv1[0x0300 - 0x0010];
45 uint64_t lmmio_direct0_base;
46 uint64_t lmmio_direct0_mask;
47 uint64_t lmmio_direct0_route;
48 uint64_t lmmio_direct1_base;
49 uint64_t lmmio_direct1_mask;
50 uint64_t lmmio_direct1_route;
51 uint64_t lmmio_direct2_base;
52 uint64_t lmmio_direct2_mask;
53 uint64_t lmmio_direct2_route;
54 uint64_t lmmio_direct3_base;
55 uint64_t lmmio_direct3_mask;
56 uint64_t lmmio_direct3_route;
57 uint64_t lmmio_dist_base;
58 uint64_t lmmio_dist_mask;
59 uint64_t lmmio_dist_route;
60 uint64_t gmmio_dist_base;
61 uint64_t gmmio_dist_mask;
62 uint64_t gmmio_dist_route;
63 uint64_t ios_dist_base;
64 uint64_t ios_dist_mask;
65 uint64_t ios_dist_route;
66 uint8_t resv2[0x03c0 - 0x03a8];
67 uint64_t ios_direct_base;
68 uint64_t ios_direct_mask;
69 uint64_t ios_direct_route;
70 uint8_t resv3[0x22000 - 0x03d8];
71 uint64_t func_id;
72 uint64_t func_class;
73 uint8_t resv4[0x22040 - 0x22010];
74 uint64_t rope_config;
75 uint8_t resv5[0x22050 - 0x22048];
76 uint64_t rope_debug;
77 uint8_t resv6[0x22200 - 0x22058];
78 uint64_t rope0_control;
79 uint64_t rope1_control;
80 uint64_t rope2_control;
81 uint64_t rope3_control;
82 uint64_t rope4_control;
83 uint64_t rope5_control;
84 uint64_t rope6_control;
85 uint64_t rope7_control;
86 uint8_t resv7[0x22300 - 0x22240];
87 uint32_t tlb_ibase;
88 uint32_t pad22300;
89 uint32_t tlb_imask;
90 uint32_t pad22308;
91 uint32_t tlb_pcom;
92 uint32_t pad22310;
93 uint32_t tlb_tcnfg;
94 uint32_t pad22318;
95 uint64_t tlb_pdir_base;
96 };
97
98 #define ASTRO_IOC_CTRL_TE 0x0001 /* TOC Enable */
99 #define ASTRO_IOC_CTRL_CE 0x0002 /* Coalesce Enable */
100 #define ASTRO_IOC_CTRL_DE 0x0004 /* Dillon Enable */
101 #define ASTRO_IOC_CTRL_IE 0x0008 /* IOS Enable */
102 #define ASTRO_IOC_CTRL_OS 0x0010 /* Outbound Synchronous */
103 #define ASTRO_IOC_CTRL_IS 0x0020 /* Inbound Synchronous */
104 #define ASTRO_IOC_CTRL_RC 0x0040 /* Read Current Enable */
105 #define ASTRO_IOC_CTRL_L0 0x0080 /* 0-length Read Enable */
106 #define ASTRO_IOC_CTRL_RM 0x0100 /* Real Mode */
107 #define ASTRO_IOC_CTRL_NC 0x0200 /* Non-coherent Mode */
108 #define ASTRO_IOC_CTRL_ID 0x0400 /* Interrupt Disable */
109 #define ASTRO_IOC_CTRL_D4 0x0800 /* Disable 4-byte Coalescing */
110 #define ASTRO_IOC_CTRL_CC 0x1000 /* Increase Coalescing counter value */
111 #define ASTRO_IOC_CTRL_DD 0x2000 /* Disable distr. range coalescing */
112 #define ASTRO_IOC_CTRL_DC 0x4000 /* Disable the coalescing counter */
113
114 #define IOTTE_V 0x8000000000000000LL /* Entry valid */
115 #define IOTTE_PAMASK 0x000000fffffff000LL
116 #define IOTTE_CI 0x00000000000000ffLL /* Coherent index */
117
118 struct astro_softc {
119 device_t sc_dv;
120
121 bus_dma_tag_t sc_dmat;
122 struct astro_regs volatile *sc_regs;
123 uint64_t *sc_pdir;
124
125 char sc_dvmamapname[20];
126 struct extent *sc_dvmamap;
127 struct hppa_bus_dma_tag sc_dmatag;
128 };
129
130 /*
131 * per-map DVMA page table
132 */
133 struct iommu_page_entry {
134 SPLAY_ENTRY(iommu_page_entry) ipe_node;
135 paddr_t ipe_pa;
136 vaddr_t ipe_va;
137 bus_addr_t ipe_dva;
138 };
139
140 struct iommu_page_map {
141 SPLAY_HEAD(iommu_page_tree, iommu_page_entry) ipm_tree;
142 int ipm_maxpage; /* Size of allocated page map */
143 int ipm_pagecnt; /* Number of entries in use */
144 struct iommu_page_entry ipm_map[1];
145 };
146
147 /*
148 * per-map IOMMU state
149 */
150 struct iommu_map_state {
151 struct astro_softc *ims_sc;
152 bus_addr_t ims_dvmastart;
153 bus_size_t ims_dvmasize;
154 struct iommu_page_map ims_map; /* map must be last (array at end) */
155 };
156
157 int astro_match(device_t, cfdata_t, void *);
158 void astro_attach(device_t, device_t, void *);
159 static device_t astro_callback(device_t self, struct confargs *ca);
160
161 CFATTACH_DECL_NEW(astro, sizeof(struct astro_softc),
162 astro_match, astro_attach, NULL, NULL);
163
164 extern struct cfdriver astro_cd;
165
166 int iommu_dvmamap_create(void *, bus_size_t, int, bus_size_t, bus_size_t,
167 int, bus_dmamap_t *);
168 void iommu_dvmamap_destroy(void *, bus_dmamap_t);
169 int iommu_dvmamap_load(void *, bus_dmamap_t, void *, bus_size_t,
170 struct proc *, int);
171 int iommu_dvmamap_load_mbuf(void *, bus_dmamap_t, struct mbuf *, int);
172 int iommu_dvmamap_load_uio(void *, bus_dmamap_t, struct uio *, int);
173 int iommu_dvmamap_load_raw(void *, bus_dmamap_t, bus_dma_segment_t *,
174 int, bus_size_t, int);
175 void iommu_dvmamap_unload(void *, bus_dmamap_t);
176 void iommu_dvmamap_sync(void *, bus_dmamap_t, bus_addr_t, bus_size_t, int);
177 int iommu_dvmamem_alloc(void *, bus_size_t, bus_size_t, bus_size_t,
178 bus_dma_segment_t *, int, int *, int);
179 void iommu_dvmamem_free(void *, bus_dma_segment_t *, int);
180 int iommu_dvmamem_map(void *, bus_dma_segment_t *, int, size_t,
181 void **, int);
182 void iommu_dvmamem_unmap(void *, void *, size_t);
183 paddr_t iommu_dvmamem_mmap(void *, bus_dma_segment_t *, int, off_t, int, int);
184
185 void iommu_enter(struct astro_softc *, bus_addr_t, paddr_t, vaddr_t, int);
186 void iommu_remove(struct astro_softc *, bus_addr_t);
187
188 struct iommu_map_state *iommu_iomap_create(int);
189 void iommu_iomap_destroy(struct iommu_map_state *);
190 int iommu_iomap_insert_page(struct iommu_map_state *, vaddr_t, paddr_t);
191 bus_addr_t iommu_iomap_translate(struct iommu_map_state *, paddr_t);
192 void iommu_iomap_clear_pages(struct iommu_map_state *);
193
194 static int iommu_iomap_load_map(struct astro_softc *, bus_dmamap_t, int);
195
196 const struct hppa_bus_dma_tag astro_dmat = {
197 NULL,
198 iommu_dvmamap_create, iommu_dvmamap_destroy,
199 iommu_dvmamap_load, iommu_dvmamap_load_mbuf,
200 iommu_dvmamap_load_uio, iommu_dvmamap_load_raw,
201 iommu_dvmamap_unload, iommu_dvmamap_sync,
202
203 iommu_dvmamem_alloc, iommu_dvmamem_free, iommu_dvmamem_map,
204 iommu_dvmamem_unmap, iommu_dvmamem_mmap
205 };
206
207 int
208 astro_match(device_t parent, cfdata_t cf, void *aux)
209 {
210 struct confargs *ca = aux;
211
212 /* Astro is a U-Turn variant. */
213 if (ca->ca_type.iodc_type != HPPA_TYPE_IOA ||
214 ca->ca_type.iodc_sv_model != HPPA_IOA_UTURN)
215 return 0;
216
217 if (ca->ca_type.iodc_model == 0x58 &&
218 ca->ca_type.iodc_revision >= 0x20)
219 return 1;
220
221 return 0;
222 }
223
224 void
225 astro_attach(device_t parent, device_t self, void *aux)
226 {
227 struct confargs *ca = aux, nca;
228 struct astro_softc *sc = device_private(self);
229 volatile struct astro_regs *r;
230 bus_space_handle_t ioh;
231 uint32_t rid, ioc_ctrl;
232 psize_t size;
233 vaddr_t va;
234 paddr_t pa;
235 void *p;
236 struct vm_page *m;
237 struct pglist mlist;
238 int iova_bits;
239 int pagezero_cookie;
240
241 sc->sc_dv = self;
242 sc->sc_dmat = ca->ca_dmatag;
243 if (bus_space_map(ca->ca_iot, ca->ca_hpa, sizeof(struct astro_regs),
244 0, &ioh)) {
245 aprint_error(": can't map IO space\n");
246 return;
247 }
248 p = bus_space_vaddr(ca->ca_iot, ioh);
249 sc->sc_regs = r = p;
250 rid = le32toh(r->rid);
251 aprint_normal(": Astro rev %d.%d\n", (rid & 7) + 1, (rid >> 3) & 3);
252
253 ioc_ctrl = le32toh(r->ioc_ctrl);
254 ioc_ctrl &= ~ASTRO_IOC_CTRL_CE;
255 ioc_ctrl &= ~ASTRO_IOC_CTRL_RM;
256 ioc_ctrl &= ~ASTRO_IOC_CTRL_NC;
257 r->ioc_ctrl = htole32(ioc_ctrl);
258
259 /*
260 * Setup the iommu.
261 */
262
263 /* XXX This gives us 256MB of iova space. */
264 iova_bits = 28;
265
266 r->tlb_ibase = htole32(0);
267 r->tlb_imask = htole32(0xffffffff << iova_bits);
268
269 /* Page size is 4K. */
270 r->tlb_tcnfg = htole32(0);
271
272 /* Flush TLB. */
273 r->tlb_pcom = htole32(31);
274
275 /*
276 * Allocate memory for I/O pagetables. They need to be physically
277 * contiguous.
278 */
279
280 size = (1 << (iova_bits - PAGE_SHIFT)) * sizeof(uint64_t);
281 TAILQ_INIT(&mlist);
282 if (uvm_pglistalloc(size, 0, -1, PAGE_SIZE, 0, &mlist, 1, 0) != 0) {
283 aprint_error(": can't allocate PDIR\n");
284 return;
285 }
286
287 va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
288
289 if (va == 0) {
290 aprint_error(": can't map PDIR\n");
291 return;
292 }
293 sc->sc_pdir = (uint64_t *)va;
294
295 m = TAILQ_FIRST(&mlist);
296 r->tlb_pdir_base = htole64(VM_PAGE_TO_PHYS(m));
297
298 /* Map the pages. */
299 for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
300 pa = VM_PAGE_TO_PHYS(m);
301 pmap_enter(pmap_kernel(), va, pa,
302 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
303 va += PAGE_SIZE;
304 }
305 pmap_update(pmap_kernel());
306 memset(sc->sc_pdir, 0, size);
307
308 /*
309 * The PDC might have set up some devices to do DMA. It will do
310 * this for the onboard USB controller if an USB keyboard is used
311 * for console input. In that case, bad things will happen if we
312 * enable iova space. So reset the PDC devices before we do that.
313 * Don't do this if we're using a serial console though, since it
314 * will stop working if we do. This is fine since the serial port
315 * doesn't do DMA.
316 */
317 pagezero_cookie = hppa_pagezero_map();
318 if (PAGE0->mem_cons.pz_class != PCL_DUPLEX)
319 pdcproc_ioreset();
320 hppa_pagezero_unmap(pagezero_cookie);
321
322 /* Enable iova space. */
323 r->tlb_ibase = htole32(1);
324
325 /*
326 * Now all the hardware's working we need to allocate a dvma map.
327 */
328 snprintf(sc->sc_dvmamapname, sizeof(sc->sc_dvmamapname),
329 "%s_dvma", device_xname(sc->sc_dv));
330 sc->sc_dvmamap = extent_create(sc->sc_dvmamapname, 0, (1 << iova_bits),
331 0, 0, EX_WAITOK);
332
333 sc->sc_dmatag = astro_dmat;
334 sc->sc_dmatag._cookie = sc;
335
336 nca = *ca; /* clone from us */
337 nca.ca_dmatag = &sc->sc_dmatag;
338 nca.ca_hpabase = IOMOD_IO_IO_LOW(p);
339 nca.ca_nmodules = MAXMODBUS;
340 pdc_scanbus(self, &nca, astro_callback);
341 }
342
343 static device_t
344 astro_callback(device_t self, struct confargs *ca)
345 {
346
347 return config_found(self, ca, mbprint,
348 CFARG_SUBMATCH, mbsubmatch,
349 CFARG_IATTR, "gedoens",
350 CFARG_EOL);
351 }
352
353 int
354 iommu_dvmamap_create(void *v, bus_size_t size, int nsegments,
355 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
356 {
357 struct astro_softc *sc = v;
358 bus_dmamap_t map;
359 struct iommu_map_state *ims;
360 int error;
361
362 error = bus_dmamap_create(sc->sc_dmat, size, nsegments, maxsegsz,
363 boundary, flags, &map);
364 if (error)
365 return (error);
366
367 ims = iommu_iomap_create(atop(round_page(size)));
368 if (ims == NULL) {
369 bus_dmamap_destroy(sc->sc_dmat, map);
370 return (ENOMEM);
371 }
372
373 ims->ims_sc = sc;
374 map->_dm_cookie = ims;
375 *dmamap = map;
376
377 return (0);
378 }
379
380 void
381 iommu_dvmamap_destroy(void *v, bus_dmamap_t map)
382 {
383 struct astro_softc *sc = v;
384
385 /*
386 * The specification (man page) requires a loaded
387 * map to be unloaded before it is destroyed.
388 */
389 if (map->dm_nsegs)
390 iommu_dvmamap_unload(sc, map);
391
392 if (map->_dm_cookie)
393 iommu_iomap_destroy(map->_dm_cookie);
394 map->_dm_cookie = NULL;
395
396 bus_dmamap_destroy(sc->sc_dmat, map);
397 }
398
399 static int
400 iommu_iomap_load_map(struct astro_softc *sc, bus_dmamap_t map, int flags)
401 {
402 struct iommu_map_state *ims = map->_dm_cookie;
403 struct iommu_page_map *ipm = &ims->ims_map;
404 struct iommu_page_entry *e;
405 int err, seg, s;
406 paddr_t pa, paend;
407 vaddr_t va;
408 bus_size_t sgsize;
409 bus_size_t align, boundary;
410 u_long dvmaddr;
411 bus_addr_t dva;
412 int i;
413
414 /* XXX */
415 boundary = map->_dm_boundary;
416 align = PAGE_SIZE;
417
418 iommu_iomap_clear_pages(ims);
419
420 for (seg = 0; seg < map->dm_nsegs; seg++) {
421 struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
422
423 paend = round_page(ds->ds_addr + ds->ds_len);
424 for (pa = trunc_page(ds->ds_addr), va = trunc_page(ds->_ds_va);
425 pa < paend; pa += PAGE_SIZE, va += PAGE_SIZE) {
426 err = iommu_iomap_insert_page(ims, va, pa);
427 if (err) {
428 printf("iomap insert error: %d for "
429 "va 0x%lx pa 0x%lx\n", err, va, pa);
430 bus_dmamap_unload(sc->sc_dmat, map);
431 iommu_iomap_clear_pages(ims);
432 }
433 }
434 }
435
436 sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;
437 /* XXXNH */
438 s = splhigh();
439 err = extent_alloc(sc->sc_dvmamap, sgsize, align, boundary,
440 EX_NOWAIT | EX_BOUNDZERO, &dvmaddr);
441 splx(s);
442 if (err)
443 return (err);
444
445 ims->ims_dvmastart = dvmaddr;
446 ims->ims_dvmasize = sgsize;
447
448 dva = dvmaddr;
449 for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e) {
450 e->ipe_dva = dva;
451 iommu_enter(sc, e->ipe_dva, e->ipe_pa, e->ipe_va, flags);
452 dva += PAGE_SIZE;
453 }
454
455 for (seg = 0; seg < map->dm_nsegs; seg++) {
456 struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
457 ds->ds_addr = iommu_iomap_translate(ims, ds->ds_addr);
458 }
459
460 return (0);
461 }
462
463 int
464 iommu_dvmamap_load(void *v, bus_dmamap_t map, void *addr, bus_size_t size,
465 struct proc *p, int flags)
466 {
467 struct astro_softc *sc = v;
468 int err;
469
470 err = bus_dmamap_load(sc->sc_dmat, map, addr, size, p, flags);
471 if (err)
472 return (err);
473
474 return iommu_iomap_load_map(sc, map, flags);
475 }
476
477 int
478 iommu_dvmamap_load_mbuf(void *v, bus_dmamap_t map, struct mbuf *m, int flags)
479 {
480 struct astro_softc *sc = v;
481 int err;
482
483 err = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, flags);
484 if (err)
485 return (err);
486
487 return iommu_iomap_load_map(sc, map, flags);
488 }
489
490 int
491 iommu_dvmamap_load_uio(void *v, bus_dmamap_t map, struct uio *uio, int flags)
492 {
493 struct astro_softc *sc = v;
494
495 printf("load_uio\n");
496
497 return (bus_dmamap_load_uio(sc->sc_dmat, map, uio, flags));
498 }
499
500 int
501 iommu_dvmamap_load_raw(void *v, bus_dmamap_t map, bus_dma_segment_t *segs,
502 int nsegs, bus_size_t size, int flags)
503 {
504 struct astro_softc *sc = v;
505
506 printf("load_raw\n");
507
508 return (bus_dmamap_load_raw(sc->sc_dmat, map, segs, nsegs, size, flags));
509 }
510
511 void
512 iommu_dvmamap_unload(void *v, bus_dmamap_t map)
513 {
514 struct astro_softc *sc = v;
515 struct iommu_map_state *ims = map->_dm_cookie;
516 struct iommu_page_map *ipm = &ims->ims_map;
517 struct iommu_page_entry *e;
518 int err, i, s;
519
520 /* Remove the IOMMU entries. */
521 for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e)
522 iommu_remove(sc, e->ipe_dva);
523
524 /* Clear the iomap. */
525 iommu_iomap_clear_pages(ims);
526
527 bus_dmamap_unload(sc->sc_dmat, map);
528
529 s = splhigh();
530 err = extent_free(sc->sc_dvmamap, ims->ims_dvmastart,
531 ims->ims_dvmasize, EX_NOWAIT);
532 ims->ims_dvmastart = 0;
533 ims->ims_dvmasize = 0;
534 splx(s);
535 if (err)
536 printf("warning: %ld of DVMA space lost\n", ims->ims_dvmasize);
537 }
538
539 void
540 iommu_dvmamap_sync(void *v, bus_dmamap_t map, bus_addr_t off,
541 bus_size_t len, int ops)
542 {
543 /* Nothing to do; DMA is cache-coherent. */
544 }
545
546 int
547 iommu_dvmamem_alloc(void *v, bus_size_t size, bus_size_t alignment,
548 bus_size_t boundary, bus_dma_segment_t *segs,
549 int nsegs, int *rsegs, int flags)
550 {
551 struct astro_softc *sc = v;
552
553 return (bus_dmamem_alloc(sc->sc_dmat, size, alignment, boundary,
554 segs, nsegs, rsegs, flags));
555 }
556
557 void
558 iommu_dvmamem_free(void *v, bus_dma_segment_t *segs, int nsegs)
559 {
560 struct astro_softc *sc = v;
561
562 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
563 }
564
565 int
566 iommu_dvmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size,
567 void **kvap, int flags)
568 {
569 struct astro_softc *sc = v;
570
571 return (bus_dmamem_map(sc->sc_dmat, segs, nsegs, size, kvap, flags));
572 }
573
574 void
575 iommu_dvmamem_unmap(void *v, void *kva, size_t size)
576 {
577 struct astro_softc *sc = v;
578
579 bus_dmamem_unmap(sc->sc_dmat, kva, size);
580 }
581
582 paddr_t
583 iommu_dvmamem_mmap(void *v, bus_dma_segment_t *segs, int nsegs, off_t off,
584 int prot, int flags)
585 {
586 struct astro_softc *sc = v;
587
588 return (bus_dmamem_mmap(sc->sc_dmat, segs, nsegs, off, prot, flags));
589 }
590
591 /*
592 * Utility function used by splay tree to order page entries by pa.
593 */
594 static inline int
595 iomap_compare(struct iommu_page_entry *a, struct iommu_page_entry *b)
596 {
597 return ((a->ipe_pa > b->ipe_pa) ? 1 :
598 (a->ipe_pa < b->ipe_pa) ? -1 : 0);
599 }
600
601 SPLAY_PROTOTYPE(iommu_page_tree, iommu_page_entry, ipe_node, iomap_compare);
602
603 SPLAY_GENERATE(iommu_page_tree, iommu_page_entry, ipe_node, iomap_compare);
604
605 /*
606 * Create a new iomap.
607 */
608 struct iommu_map_state *
609 iommu_iomap_create(int n)
610 {
611 struct iommu_map_state *ims;
612
613 /* Safety for heavily fragmented data, such as mbufs */
614 n += 4;
615 if (n < 16)
616 n = 16;
617
618 ims = malloc(sizeof(*ims) + (n - 1) * sizeof(ims->ims_map.ipm_map[0]),
619 M_DEVBUF, M_NOWAIT | M_ZERO);
620 if (ims == NULL)
621 return (NULL);
622
623 /* Initialize the map. */
624 ims->ims_map.ipm_maxpage = n;
625 SPLAY_INIT(&ims->ims_map.ipm_tree);
626
627 return (ims);
628 }
629
630 /*
631 * Destroy an iomap.
632 */
633 void
634 iommu_iomap_destroy(struct iommu_map_state *ims)
635 {
636 #ifdef DIAGNOSTIC
637 if (ims->ims_map.ipm_pagecnt > 0)
638 printf("iommu_iomap_destroy: %d page entries in use\n",
639 ims->ims_map.ipm_pagecnt);
640 #endif
641
642 free(ims, M_DEVBUF);
643 }
644
645 /*
646 * Insert a pa entry in the iomap.
647 */
648 int
649 iommu_iomap_insert_page(struct iommu_map_state *ims, vaddr_t va, paddr_t pa)
650 {
651 struct iommu_page_map *ipm = &ims->ims_map;
652 struct iommu_page_entry *e;
653
654 if (ipm->ipm_pagecnt >= ipm->ipm_maxpage) {
655 struct iommu_page_entry ipe;
656
657 ipe.ipe_pa = pa;
658 if (SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &ipe))
659 return (0);
660
661 return (ENOMEM);
662 }
663
664 e = &ipm->ipm_map[ipm->ipm_pagecnt];
665
666 e->ipe_pa = pa;
667 e->ipe_va = va;
668 e->ipe_dva = 0;
669
670 e = SPLAY_INSERT(iommu_page_tree, &ipm->ipm_tree, e);
671
672 /* Duplicates are okay, but only count them once. */
673 if (e)
674 return (0);
675
676 ++ipm->ipm_pagecnt;
677
678 return (0);
679 }
680
681 /*
682 * Translate a physical address (pa) into a DVMA address.
683 */
684 bus_addr_t
685 iommu_iomap_translate(struct iommu_map_state *ims, paddr_t pa)
686 {
687 struct iommu_page_map *ipm = &ims->ims_map;
688 struct iommu_page_entry *e;
689 struct iommu_page_entry pe;
690 paddr_t offset = pa & PAGE_MASK;
691
692 pe.ipe_pa = trunc_page(pa);
693
694 e = SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &pe);
695
696 if (e == NULL) {
697 panic("couldn't find pa %lx\n", pa);
698 return 0;
699 }
700
701 return (e->ipe_dva | offset);
702 }
703
704 /*
705 * Clear the iomap table and tree.
706 */
707 void
708 iommu_iomap_clear_pages(struct iommu_map_state *ims)
709 {
710 ims->ims_map.ipm_pagecnt = 0;
711 SPLAY_INIT(&ims->ims_map.ipm_tree);
712 }
713
714 /*
715 * Add an entry to the IOMMU table.
716 */
717 void
718 iommu_enter(struct astro_softc *sc, bus_addr_t dva, paddr_t pa, vaddr_t va,
719 int flags)
720 {
721 volatile uint64_t *tte_ptr = &sc->sc_pdir[dva >> PAGE_SHIFT];
722 uint64_t tte;
723 uint32_t ci;
724
725 #ifdef ASTRODEBUG
726 printf("iommu_enter dva %lx, pa %lx, va %lx\n", dva, pa, va);
727 #endif
728
729 #ifdef DIAGNOSTIC
730 tte = le64toh(*tte_ptr);
731
732 if (tte & IOTTE_V) {
733 printf("Overwriting valid tte entry (dva %lx pa %lx "
734 "&tte %p tte %llx)\n", dva, pa, tte_ptr, tte);
735 extent_print(sc->sc_dvmamap);
736 panic("IOMMU overwrite");
737 }
738 #endif
739
740 ci = lci(HPPA_SID_KERNEL, va);
741
742 tte = (pa & IOTTE_PAMASK) | ((ci >> 12) & IOTTE_CI);
743 tte |= IOTTE_V;
744
745 *tte_ptr = htole64(tte);
746 fdcache(HPPA_SID_KERNEL, (vaddr_t)tte_ptr, sizeof(*tte_ptr));
747 }
748
749 /*
750 * Remove an entry from the IOMMU table.
751 */
752 void
753 iommu_remove(struct astro_softc *sc, bus_addr_t dva)
754 {
755 volatile struct astro_regs *r = sc->sc_regs;
756 uint64_t *tte_ptr = &sc->sc_pdir[dva >> PAGE_SHIFT];
757 uint64_t tte;
758
759 #ifdef DIAGNOSTIC
760 if (dva != trunc_page(dva)) {
761 printf("iommu_remove: unaligned dva: %lx\n", dva);
762 dva = trunc_page(dva);
763 }
764 #endif
765
766 tte = le64toh(*tte_ptr);
767
768 #ifdef DIAGNOSTIC
769 if ((tte & IOTTE_V) == 0) {
770 printf("Removing invalid tte entry (dva %lx &tte %p "
771 "tte %llx)\n", dva, tte_ptr, tte);
772 extent_print(sc->sc_dvmamap);
773 panic("IOMMU remove overwrite");
774 }
775 #endif
776
777 *tte_ptr = htole64(tte & ~IOTTE_V);
778
779 /* Flush IOMMU. */
780 r->tlb_pcom = htole32(dva | PAGE_SHIFT);
781 }
782