Annotation of sys/arch/hppa/dev/astro.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: astro.c,v 1.7 2007/07/01 12:53:52 kettenis Exp $ */
2:
3: /*
4: * Copyright (c) 2007 Mark Kettenis
5: *
6: * Permission to use, copy, modify, and distribute this software for any
7: * purpose with or without fee is hereby granted, provided that the above
8: * copyright notice and this permission notice appear in all copies.
9: *
10: * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11: * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12: * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13: * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14: * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15: * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16: * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17: */
18:
19: #include <sys/param.h>
20: #include <sys/systm.h>
21: #include <sys/device.h>
22: #include <sys/extent.h>
23: #include <sys/malloc.h>
24: #include <sys/reboot.h>
25: #include <sys/tree.h>
26:
27: #include <uvm/uvm_extern.h>
28:
29: #include <machine/iomod.h>
30: #include <machine/autoconf.h>
31:
32: #include <hppa/dev/cpudevs.h>
33:
34: struct astro_regs {
35: u_int32_t rid;
36: u_int32_t pad0000;
37: u_int32_t ioc_ctrl;
38: u_int32_t pad0008;
39: u_int8_t resv1[0x0300 - 0x0010];
40: u_int64_t lmmio_direct0_base;
41: u_int64_t lmmio_direct0_mask;
42: u_int64_t lmmio_direct0_route;
43: u_int64_t lmmio_direct1_base;
44: u_int64_t lmmio_direct1_mask;
45: u_int64_t lmmio_direct1_route;
46: u_int64_t lmmio_direct2_base;
47: u_int64_t lmmio_direct2_mask;
48: u_int64_t lmmio_direct2_route;
49: u_int64_t lmmio_direct3_base;
50: u_int64_t lmmio_direct3_mask;
51: u_int64_t lmmio_direct3_route;
52: u_int64_t lmmio_dist_base;
53: u_int64_t lmmio_dist_mask;
54: u_int64_t lmmio_dist_route;
55: u_int64_t gmmio_dist_base;
56: u_int64_t gmmio_dist_mask;
57: u_int64_t gmmio_dist_route;
58: u_int64_t ios_dist_base;
59: u_int64_t ios_dist_mask;
60: u_int64_t ios_dist_route;
61: u_int8_t resv2[0x03c0 - 0x03a8];
62: u_int64_t ios_direct_base;
63: u_int64_t ios_direct_mask;
64: u_int64_t ios_direct_route;
65: u_int8_t resv3[0x22000 - 0x03d8];
66: u_int64_t func_id;
67: u_int64_t func_class;
68: u_int8_t resv4[0x22040 - 0x22010];
69: u_int64_t rope_config;
70: u_int8_t resv5[0x22050 - 0x22048];
71: u_int64_t rope_debug;
72: u_int8_t resv6[0x22200 - 0x22058];
73: u_int64_t rope0_control;
74: u_int64_t rope1_control;
75: u_int64_t rope2_control;
76: u_int64_t rope3_control;
77: u_int64_t rope4_control;
78: u_int64_t rope5_control;
79: u_int64_t rope6_control;
80: u_int64_t rope7_control;
81: u_int8_t resv7[0x22300 - 0x22240];
82: u_int32_t tlb_ibase;
83: u_int32_t pad22300;
84: u_int32_t tlb_imask;
85: u_int32_t pad22308;
86: u_int32_t tlb_pcom;
87: u_int32_t pad22310;
88: u_int32_t tlb_tcnfg;
89: u_int32_t pad22318;
90: u_int64_t tlb_pdir_base;
91: };
92:
93: #define ASTRO_IOC_CTRL_TE 0x0001 /* TOC Enable */
94: #define ASTRO_IOC_CTRL_CE 0x0002 /* Coalesce Enable */
95: #define ASTRO_IOC_CTRL_DE 0x0004 /* Dillon Enable */
96: #define ASTRO_IOC_CTRL_IE 0x0008 /* IOS Enable */
97: #define ASTRO_IOC_CTRL_OS 0x0010 /* Outbound Synchronous */
98: #define ASTRO_IOC_CTRL_IS 0x0020 /* Inbound Synchronous */
99: #define ASTRO_IOC_CTRL_RC 0x0040 /* Read Current Enable */
100: #define ASTRO_IOC_CTRL_L0 0x0080 /* 0-length Read Enable */
101: #define ASTRO_IOC_CTRL_RM 0x0100 /* Real Mode */
102: #define ASTRO_IOC_CTRL_NC 0x0200 /* Non-coherent Mode */
103: #define ASTRO_IOC_CTRL_ID 0x0400 /* Interrupt Disable */
104: #define ASTRO_IOC_CTRL_D4 0x0800 /* Disable 4-byte Coalescing */
105: #define ASTRO_IOC_CTRL_CC 0x1000 /* Increase Coalescing counter value */
106: #define ASTRO_IOC_CTRL_DD 0x2000 /* Disable distr. range coalescing */
107: #define ASTRO_IOC_CTRL_DC 0x4000 /* Disable the coalescing counter */
108:
109: #define IOTTE_V 0x8000000000000000LL /* Entry valid */
110: #define IOTTE_PAMASK 0x000000fffffff000LL
111: #define IOTTE_CI 0x00000000000000ffLL /* Coherent index */
112:
113: struct astro_softc {
114: struct device sc_dv;
115:
116: bus_dma_tag_t sc_dmat;
117: struct astro_regs volatile *sc_regs;
118: u_int64_t *sc_pdir;
119:
120: char sc_dvmamapname[20];
121: struct extent *sc_dvmamap;
122: struct hppa_bus_dma_tag sc_dmatag;
123: };
124:
125: /*
126: * per-map DVMA page table
127: */
128: struct iommu_page_entry {
129: SPLAY_ENTRY(iommu_page_entry) ipe_node;
130: paddr_t ipe_pa;
131: vaddr_t ipe_va;
132: bus_addr_t ipe_dva;
133: };
134:
135: struct iommu_page_map {
136: SPLAY_HEAD(iommu_page_tree, iommu_page_entry) ipm_tree;
137: int ipm_maxpage; /* Size of allocated page map */
138: int ipm_pagecnt; /* Number of entries in use */
139: struct iommu_page_entry ipm_map[1];
140: };
141:
142: /*
143: * per-map IOMMU state
144: */
145: struct iommu_map_state {
146: struct astro_softc *ims_sc;
147: bus_addr_t ims_dvmastart;
148: bus_size_t ims_dvmasize;
149: struct iommu_page_map ims_map; /* map must be last (array at end) */
150: };
151:
152: int astro_match(struct device *, void *, void *);
153: void astro_attach(struct device *, struct device *, void *);
154:
155: struct cfattach astro_ca = {
156: sizeof(struct astro_softc), astro_match, astro_attach
157: };
158:
159: struct cfdriver astro_cd = {
160: NULL, "astro", DV_DULL
161: };
162:
163: int iommu_dvmamap_create(void *, bus_size_t, int, bus_size_t, bus_size_t,
164: int, bus_dmamap_t *);
165: void iommu_dvmamap_destroy(void *, bus_dmamap_t);
166: int iommu_dvmamap_load(void *, bus_dmamap_t, void *, bus_size_t,
167: struct proc *, int);
168: int iommu_dvmamap_load_mbuf(void *, bus_dmamap_t, struct mbuf *, int);
169: int iommu_dvmamap_load_uio(void *, bus_dmamap_t, struct uio *, int);
170: int iommu_dvmamap_load_raw(void *, bus_dmamap_t, bus_dma_segment_t *,
171: int, bus_size_t, int);
172: void iommu_dvmamap_unload(void *, bus_dmamap_t);
173: void iommu_dvmamap_sync(void *, bus_dmamap_t, bus_addr_t, bus_size_t, int);
174: int iommu_dvmamem_alloc(void *, bus_size_t, bus_size_t, bus_size_t,
175: bus_dma_segment_t *, int, int *, int);
176: void iommu_dvmamem_free(void *, bus_dma_segment_t *, int);
177: int iommu_dvmamem_map(void *, bus_dma_segment_t *, int, size_t,
178: caddr_t *, int);
179: void iommu_dvmamem_unmap(void *, caddr_t, size_t);
180: paddr_t iommu_dvmamem_mmap(void *, bus_dma_segment_t *, int, off_t, int, int);
181:
182: void iommu_enter(struct astro_softc *, bus_addr_t, paddr_t, vaddr_t, int);
183: void iommu_remove(struct astro_softc *, bus_addr_t);
184:
185: struct iommu_map_state *iommu_iomap_create(int);
186: void iommu_iomap_destroy(struct iommu_map_state *);
187: int iommu_iomap_insert_page(struct iommu_map_state *, vaddr_t, paddr_t);
188: bus_addr_t iommu_iomap_translate(struct iommu_map_state *, paddr_t);
189: void iommu_iomap_clear_pages(struct iommu_map_state *);
190:
191: const struct hppa_bus_dma_tag astro_dmat = {
192: NULL,
193: iommu_dvmamap_create, iommu_dvmamap_destroy,
194: iommu_dvmamap_load, iommu_dvmamap_load_mbuf,
195: iommu_dvmamap_load_uio, iommu_dvmamap_load_raw,
196: iommu_dvmamap_unload, iommu_dvmamap_sync,
197:
198: iommu_dvmamem_alloc, iommu_dvmamem_free, iommu_dvmamem_map,
199: iommu_dvmamem_unmap, iommu_dvmamem_mmap
200: };
201:
202: int
203: astro_match(struct device *parent, void *cfdata, void *aux)
204: {
205: struct confargs *ca = aux;
206:
207: /* Astro is a U-Turn variant. */
208: if (ca->ca_type.iodc_type != HPPA_TYPE_IOA ||
209: ca->ca_type.iodc_sv_model != HPPA_IOA_UTURN)
210: return 0;
211:
212: if (ca->ca_type.iodc_model == 0x58 &&
213: ca->ca_type.iodc_revision >= 0x20)
214: return 1;
215:
216: return 0;
217: }
218:
219: void
220: astro_attach(struct device *parent, struct device *self, void *aux)
221: {
222: struct confargs *ca = aux, nca;
223: struct astro_softc *sc = (struct astro_softc *)self;
224: volatile struct astro_regs *r;
225: bus_space_handle_t ioh;
226: u_int32_t rid, ioc_ctrl;
227: psize_t size;
228: vaddr_t va;
229: paddr_t pa;
230: struct vm_page *m;
231: struct pglist mlist;
232: int iova_bits;
233:
234: sc->sc_dmat = ca->ca_dmatag;
235: if (bus_space_map(ca->ca_iot, ca->ca_hpa, sizeof(struct astro_regs),
236: 0, &ioh)) {
237: printf(": can't map IO space\n");
238: return;
239: }
240: sc->sc_regs = r = (struct astro_regs *)ca->ca_hpa;
241:
242: rid = letoh32(r->rid);
243: printf(": Astro rev %d.%d\n", (rid & 7) + 1, (rid >> 3) & 3);
244:
245: ioc_ctrl = letoh32(r->ioc_ctrl);
246: ioc_ctrl &= ~ASTRO_IOC_CTRL_CE;
247: ioc_ctrl &= ~ASTRO_IOC_CTRL_RM;
248: ioc_ctrl &= ~ASTRO_IOC_CTRL_NC;
249: r->ioc_ctrl = htole32(ioc_ctrl);
250:
251: /*
252: * Setup the iommu.
253: */
254:
255: /* XXX This gives us 256MB of iova space. */
256: iova_bits = 28;
257:
258: r->tlb_ibase = htole32(0);
259: r->tlb_imask = htole32(0xffffffff << iova_bits);
260:
261: /* Page size is 4K. */
262: r->tlb_tcnfg = htole32(0);
263:
264: /* Flush TLB. */
265: r->tlb_pcom = htole32(31);
266:
267: /*
268: * Allocate memory for I/O pagetables. They need to be physically
269: * contiguous.
270: */
271:
272: size = (1 << (iova_bits - PAGE_SHIFT)) * sizeof(u_int64_t);
273: TAILQ_INIT(&mlist);
274: if (uvm_pglistalloc(size, 0, -1, PAGE_SIZE, 0, &mlist, 1, 0) != 0)
275: panic("astrottach: no memory");
276:
277: va = uvm_km_valloc(kernel_map, size);
278: if (va == 0)
279: panic("astroattach: no memory");
280: sc->sc_pdir = (u_int64_t *)va;
281:
282: m = TAILQ_FIRST(&mlist);
283: r->tlb_pdir_base = htole64(VM_PAGE_TO_PHYS(m));
284:
285: /* Map the pages. */
286: for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
287: pa = VM_PAGE_TO_PHYS(m);
288: pmap_enter(pmap_kernel(), va, pa,
289: VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
290: va += PAGE_SIZE;
291: }
292: pmap_update(pmap_kernel());
293: memset(sc->sc_pdir, 0, size);
294:
295: /*
296: * The PDC might have set up some devices to do DMA. It will do
297: * this for the onboard USB controller if an USB keyboard is used
298: * for console input. In that case, bad things will happen if we
299: * enable iova space. So reset the PDC devices before we do that.
300: * Don't do this if we're using a serial console though, since it
301: * will stop working if we do. This is fine since the serial port
302: * doesn't do DMA.
303: */
304: if (PAGE0->mem_cons.pz_class != PCL_DUPLEX)
305: pdc_call((iodcio_t)pdc, 0, PDC_IO, PDC_IO_RESET_DEVICES);
306:
307: /* Enable iova space. */
308: r->tlb_ibase = htole32(1);
309:
310: /*
311: * Now all the hardware's working we need to allocate a dvma map.
312: */
313: snprintf(sc->sc_dvmamapname, sizeof(sc->sc_dvmamapname),
314: "%s_dvma", sc->sc_dv.dv_xname);
315: sc->sc_dvmamap = extent_create(sc->sc_dvmamapname, 0, (1 << iova_bits),
316: M_DEVBUF, 0, 0, EX_NOWAIT);
317:
318: sc->sc_dmatag = astro_dmat;
319: sc->sc_dmatag._cookie = sc;
320:
321: nca = *ca; /* clone from us */
322: nca.ca_hpamask = HPPA_IOBEGIN;
323: nca.ca_dmatag = &sc->sc_dmatag;
324: pdc_scanbus(self, &nca, MAXMODBUS, 0);
325: }
326:
327: int
328: iommu_dvmamap_create(void *v, bus_size_t size, int nsegments,
329: bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
330: {
331: struct astro_softc *sc = v;
332: bus_dmamap_t map;
333: struct iommu_map_state *ims;
334: int error;
335:
336: error = bus_dmamap_create(sc->sc_dmat, size, nsegments, maxsegsz,
337: boundary, flags, &map);
338: if (error)
339: return (error);
340:
341: ims = iommu_iomap_create(atop(round_page(size)));
342: if (ims == NULL) {
343: bus_dmamap_destroy(sc->sc_dmat, map);
344: return (ENOMEM);
345: }
346:
347: ims->ims_sc = sc;
348: map->_dm_cookie = ims;
349: *dmamap = map;
350:
351: return (0);
352: }
353:
354: void
355: iommu_dvmamap_destroy(void *v, bus_dmamap_t map)
356: {
357: struct astro_softc *sc = v;
358:
359: /*
360: * The specification (man page) requires a loaded
361: * map to be unloaded before it is destroyed.
362: */
363: if (map->dm_nsegs)
364: iommu_dvmamap_unload(sc, map);
365:
366: if (map->_dm_cookie)
367: iommu_iomap_destroy(map->_dm_cookie);
368: map->_dm_cookie = NULL;
369:
370: bus_dmamap_destroy(sc->sc_dmat, map);
371: }
372:
373: int
374: iommu_iomap_load_map(struct astro_softc *sc, bus_dmamap_t map, int flags)
375: {
376: struct iommu_map_state *ims = map->_dm_cookie;
377: struct iommu_page_map *ipm = &ims->ims_map;
378: struct iommu_page_entry *e;
379: int err, seg, s;
380: paddr_t pa, paend;
381: vaddr_t va;
382: bus_size_t sgsize;
383: bus_size_t align, boundary;
384: u_long dvmaddr;
385: bus_addr_t dva;
386: int i;
387:
388: /* XXX */
389: boundary = map->_dm_boundary;
390: align = PAGE_SIZE;
391:
392: iommu_iomap_clear_pages(ims);
393:
394: for (seg = 0; seg < map->dm_nsegs; seg++) {
395: struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
396:
397: paend = round_page(ds->ds_addr + ds->ds_len);
398: for (pa = trunc_page(ds->ds_addr), va = trunc_page(ds->_ds_va);
399: pa < paend; pa += PAGE_SIZE, va += PAGE_SIZE) {
400: err = iommu_iomap_insert_page(ims, va, pa);
401: if (err) {
402: printf("iomap insert error: %d for "
403: "va 0x%lx pa 0x%lx\n", err, va, pa);
404: bus_dmamap_unload(sc->sc_dmat, map);
405: iommu_iomap_clear_pages(ims);
406: }
407: }
408: }
409:
410: sgsize = ims->ims_map.ipm_pagecnt * PAGE_SIZE;
411: s = splhigh();
412: err = extent_alloc(sc->sc_dvmamap, sgsize, align, 0, boundary,
413: EX_NOWAIT | EX_BOUNDZERO, &dvmaddr);
414: splx(s);
415: if (err)
416: return (err);
417:
418: ims->ims_dvmastart = dvmaddr;
419: ims->ims_dvmasize = sgsize;
420:
421: dva = dvmaddr;
422: for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e) {
423: e->ipe_dva = dva;
424: iommu_enter(sc, e->ipe_dva, e->ipe_pa, e->ipe_va, flags);
425: dva += PAGE_SIZE;
426: }
427:
428: for (seg = 0; seg < map->dm_nsegs; seg++) {
429: struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
430: ds->ds_addr = iommu_iomap_translate(ims, ds->ds_addr);
431: }
432:
433: return (0);
434: }
435:
436: int
437: iommu_dvmamap_load(void *v, bus_dmamap_t map, void *addr, bus_size_t size,
438: struct proc *p, int flags)
439: {
440: struct astro_softc *sc = v;
441: int err;
442:
443: err = bus_dmamap_load(sc->sc_dmat, map, addr, size, p, flags);
444: if (err)
445: return (err);
446:
447: return iommu_iomap_load_map(sc, map, flags);
448: }
449:
450: int
451: iommu_dvmamap_load_mbuf(void *v, bus_dmamap_t map, struct mbuf *m, int flags)
452: {
453: struct astro_softc *sc = v;
454: int err;
455:
456: err = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, flags);
457: if (err)
458: return (err);
459:
460: return iommu_iomap_load_map(sc, map, flags);
461: }
462:
463: int
464: iommu_dvmamap_load_uio(void *v, bus_dmamap_t map, struct uio *uio, int flags)
465: {
466: struct astro_softc *sc = v;
467:
468: printf("load_uio\n");
469:
470: return (bus_dmamap_load_uio(sc->sc_dmat, map, uio, flags));
471: }
472:
473: int
474: iommu_dvmamap_load_raw(void *v, bus_dmamap_t map, bus_dma_segment_t *segs,
475: int nsegs, bus_size_t size, int flags)
476: {
477: struct astro_softc *sc = v;
478:
479: printf("load_raw\n");
480:
481: return (bus_dmamap_load_raw(sc->sc_dmat, map, segs, nsegs, size, flags));
482: }
483:
484: void
485: iommu_dvmamap_unload(void *v, bus_dmamap_t map)
486: {
487: struct astro_softc *sc = v;
488: struct iommu_map_state *ims = map->_dm_cookie;
489: struct iommu_page_map *ipm = &ims->ims_map;
490: struct iommu_page_entry *e;
491: int err, i, s;
492:
493: /* Remove the IOMMU entries. */
494: for (i = 0, e = ipm->ipm_map; i < ipm->ipm_pagecnt; ++i, ++e)
495: iommu_remove(sc, e->ipe_dva);
496:
497: /* Clear the iomap. */
498: iommu_iomap_clear_pages(ims);
499:
500: bus_dmamap_unload(sc->sc_dmat, map);
501:
502: s = splhigh();
503: err = extent_free(sc->sc_dvmamap, ims->ims_dvmastart,
504: ims->ims_dvmasize, EX_NOWAIT);
505: ims->ims_dvmastart = 0;
506: ims->ims_dvmasize = 0;
507: splx(s);
508: if (err)
509: printf("warning: %ld of DVMA space lost\n", ims->ims_dvmasize);
510: }
511:
512: void
513: iommu_dvmamap_sync(void *v, bus_dmamap_t map, bus_addr_t off,
514: bus_size_t len, int ops)
515: {
516: /* Nothing to do; DMA is cache-coherent. */
517: }
518:
519: int
520: iommu_dvmamem_alloc(void *v, bus_size_t size, bus_size_t alignment,
521: bus_size_t boundary, bus_dma_segment_t *segs,
522: int nsegs, int *rsegs, int flags)
523: {
524: struct astro_softc *sc = v;
525:
526: return (bus_dmamem_alloc(sc->sc_dmat, size, alignment, boundary,
527: segs, nsegs, rsegs, flags));
528: }
529:
530: void
531: iommu_dvmamem_free(void *v, bus_dma_segment_t *segs, int nsegs)
532: {
533: struct astro_softc *sc = v;
534:
535: bus_dmamem_free(sc->sc_dmat, segs, nsegs);
536: }
537:
538: int
539: iommu_dvmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size,
540: caddr_t *kvap, int flags)
541: {
542: struct astro_softc *sc = v;
543:
544: return (bus_dmamem_map(sc->sc_dmat, segs, nsegs, size, kvap, flags));
545: }
546:
547: void
548: iommu_dvmamem_unmap(void *v, caddr_t kva, size_t size)
549: {
550: struct astro_softc *sc = v;
551:
552: bus_dmamem_unmap(sc->sc_dmat, kva, size);
553: }
554:
555: paddr_t
556: iommu_dvmamem_mmap(void *v, bus_dma_segment_t *segs, int nsegs, off_t off,
557: int prot, int flags)
558: {
559: struct astro_softc *sc = v;
560:
561: return (bus_dmamem_mmap(sc->sc_dmat, segs, nsegs, off, prot, flags));
562: }
563:
564: /*
565: * Utility function used by splay tree to order page entries by pa.
566: */
567: static inline int
568: iomap_compare(struct iommu_page_entry *a, struct iommu_page_entry *b)
569: {
570: return ((a->ipe_pa > b->ipe_pa) ? 1 :
571: (a->ipe_pa < b->ipe_pa) ? -1 : 0);
572: }
573:
574: SPLAY_PROTOTYPE(iommu_page_tree, iommu_page_entry, ipe_node, iomap_compare);
575:
576: SPLAY_GENERATE(iommu_page_tree, iommu_page_entry, ipe_node, iomap_compare);
577:
578: /*
579: * Create a new iomap.
580: */
581: struct iommu_map_state *
582: iommu_iomap_create(int n)
583: {
584: struct iommu_map_state *ims;
585:
586: /* Safety for heavily fragmented data, such as mbufs */
587: n += 4;
588: if (n < 16)
589: n = 16;
590:
591: ims = malloc(sizeof(*ims) + (n - 1) * sizeof(ims->ims_map.ipm_map[0]),
592: M_DEVBUF, M_NOWAIT);
593: if (ims == NULL)
594: return (NULL);
595:
596: memset(ims, 0, sizeof *ims);
597:
598: /* Initialize the map. */
599: ims->ims_map.ipm_maxpage = n;
600: SPLAY_INIT(&ims->ims_map.ipm_tree);
601:
602: return (ims);
603: }
604:
605: /*
606: * Destroy an iomap.
607: */
608: void
609: iommu_iomap_destroy(struct iommu_map_state *ims)
610: {
611: #ifdef DIAGNOSTIC
612: if (ims->ims_map.ipm_pagecnt > 0)
613: printf("iommu_iomap_destroy: %d page entries in use\n",
614: ims->ims_map.ipm_pagecnt);
615: #endif
616:
617: free(ims, M_DEVBUF);
618: }
619:
620: /*
621: * Insert a pa entry in the iomap.
622: */
623: int
624: iommu_iomap_insert_page(struct iommu_map_state *ims, vaddr_t va, paddr_t pa)
625: {
626: struct iommu_page_map *ipm = &ims->ims_map;
627: struct iommu_page_entry *e;
628:
629: if (ipm->ipm_pagecnt >= ipm->ipm_maxpage) {
630: struct iommu_page_entry ipe;
631:
632: ipe.ipe_pa = pa;
633: if (SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &ipe))
634: return (0);
635:
636: return (ENOMEM);
637: }
638:
639: e = &ipm->ipm_map[ipm->ipm_pagecnt];
640:
641: e->ipe_pa = pa;
642: e->ipe_va = va;
643: e->ipe_dva = NULL;
644:
645: e = SPLAY_INSERT(iommu_page_tree, &ipm->ipm_tree, e);
646:
647: /* Duplicates are okay, but only count them once. */
648: if (e)
649: return (0);
650:
651: ++ipm->ipm_pagecnt;
652:
653: return (0);
654: }
655:
656: /*
657: * Translate a physical address (pa) into a DVMA address.
658: */
659: bus_addr_t
660: iommu_iomap_translate(struct iommu_map_state *ims, paddr_t pa)
661: {
662: struct iommu_page_map *ipm = &ims->ims_map;
663: struct iommu_page_entry *e;
664: struct iommu_page_entry pe;
665: paddr_t offset = pa & PAGE_MASK;
666:
667: pe.ipe_pa = trunc_page(pa);
668:
669: e = SPLAY_FIND(iommu_page_tree, &ipm->ipm_tree, &pe);
670:
671: if (e == NULL) {
672: panic("couldn't find pa %lx\n", pa);
673: return 0;
674: }
675:
676: return (e->ipe_dva | offset);
677: }
678:
679: /*
680: * Clear the iomap table and tree.
681: */
682: void
683: iommu_iomap_clear_pages(struct iommu_map_state *ims)
684: {
685: ims->ims_map.ipm_pagecnt = 0;
686: SPLAY_INIT(&ims->ims_map.ipm_tree);
687: }
688:
689: /*
690: * Add an entry to the IOMMU table.
691: */
692: void
693: iommu_enter(struct astro_softc *sc, bus_addr_t dva, paddr_t pa, vaddr_t va,
694: int flags)
695: {
696: volatile u_int64_t *tte_ptr = &sc->sc_pdir[dva >> PAGE_SHIFT];
697: u_int64_t tte;
698: u_int32_t ci;
699:
700: #ifdef DEBUG
701: printf("iommu_enter dva %lx, pa %lx, va %lx\n", dva, pa, va);
702: #endif
703:
704: #ifdef DIAGNOSTIC
705: tte = letoh64(*tte_ptr);
706:
707: if (tte & IOTTE_V) {
708: printf("Overwriting valid tte entry (dva %lx pa %lx "
709: "&tte %p tte %llx)\n", dva, pa, tte_ptr, tte);
710: extent_print(sc->sc_dvmamap);
711: panic("IOMMU overwrite");
712: }
713: #endif
714:
715: mtsp(HPPA_SID_KERNEL, 1);
716: __asm volatile("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (va));
717:
718: tte = (pa & IOTTE_PAMASK) | ((ci >> 12) & IOTTE_CI);
719: tte |= IOTTE_V;
720:
721: *tte_ptr = htole64(tte);
722: __asm volatile("fdc 0(%%sr1, %0)\n\tsync" : : "r" (tte_ptr));
723: }
724:
725: /*
726: * Remove an entry from the IOMMU table.
727: */
728: void
729: iommu_remove(struct astro_softc *sc, bus_addr_t dva)
730: {
731: volatile struct astro_regs *r = sc->sc_regs;
732: u_int64_t *tte_ptr = &sc->sc_pdir[dva >> PAGE_SHIFT];
733: u_int64_t tte;
734:
735: #ifdef DIAGNOSTIC
736: if (dva != trunc_page(dva)) {
737: printf("iommu_remove: unaligned dva: %lx\n", dva);
738: dva = trunc_page(dva);
739: }
740: #endif
741:
742: tte = letoh64(*tte_ptr);
743:
744: #ifdef DIAGNOSTIC
745: if ((tte & IOTTE_V) == 0) {
746: printf("Removing invalid tte entry (dva %lx &tte %p "
747: "tte %llx)\n", dva, tte_ptr, tte);
748: extent_print(sc->sc_dvmamap);
749: panic("IOMMU remove overwrite");
750: }
751: #endif
752:
753: *tte_ptr = htole64(tte & ~IOTTE_V);
754:
755: /* Flush IOMMU. */
756: r->tlb_pcom = htole32(dva | PAGE_SHIFT);
757: }
CVSweb