Annotation of sys/arch/alpha/pci/cia_dma.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: cia_dma.c,v 1.8 2007/04/18 16:56:34 martin Exp $ */
2: /* $NetBSD: cia_dma.c,v 1.16 2000/06/29 08:58:46 mrg Exp $ */
3:
4: /*-
5: * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
9: * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10: * NASA Ames Research Center.
11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: * 3. All advertising materials mentioning features or use of this software
21: * must display the following acknowledgement:
22: * This product includes software developed by the NetBSD
23: * Foundation, Inc. and its contributors.
24: * 4. Neither the name of The NetBSD Foundation nor the names of its
25: * contributors may be used to endorse or promote products derived
26: * from this software without specific prior written permission.
27: *
28: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38: * POSSIBILITY OF SUCH DAMAGE.
39: */
40:
41: /*
42: * XXX - We should define this before including bus.h, but since other stuff
43: * pulls in bus.h we must do this here.
44: */
45: #define _ALPHA_BUS_DMA_PRIVATE
46:
47: #include <sys/param.h>
48: #include <sys/systm.h>
49: #include <sys/kernel.h>
50: #include <sys/device.h>
51: #include <sys/malloc.h>
52:
53: #include <uvm/uvm_extern.h>
54:
55: #include <machine/bus.h>
56:
57: #include <dev/pci/pcireg.h>
58: #include <dev/pci/pcivar.h>
59: #include <alpha/pci/ciareg.h>
60: #include <alpha/pci/ciavar.h>
61:
62: bus_dma_tag_t cia_dma_get_tag(bus_dma_tag_t, alpha_bus_t);
63:
64: int cia_bus_dmamap_create_direct(bus_dma_tag_t, bus_size_t, int,
65: bus_size_t, bus_size_t, int, bus_dmamap_t *);
66:
67: int cia_bus_dmamap_load_sgmap(bus_dma_tag_t, bus_dmamap_t, void *,
68: bus_size_t, struct proc *, int);
69:
70: int cia_bus_dmamap_load_mbuf_sgmap(bus_dma_tag_t, bus_dmamap_t,
71: struct mbuf *, int);
72:
73: int cia_bus_dmamap_load_uio_sgmap(bus_dma_tag_t, bus_dmamap_t,
74: struct uio *, int);
75:
76: int cia_bus_dmamap_load_raw_sgmap(bus_dma_tag_t, bus_dmamap_t,
77: bus_dma_segment_t *, int, bus_size_t, int);
78:
79: void cia_bus_dmamap_unload_sgmap(bus_dma_tag_t, bus_dmamap_t);
80:
81: /*
82: * Direct-mapped window: 1G at 1G
83: */
84: #define CIA_DIRECT_MAPPED_BASE (1*1024*1024*1024)
85: #define CIA_DIRECT_MAPPED_SIZE (1*1024*1024*1024)
86:
87: /*
88: * SGMAP window: 8M at 8M
89: */
90: #define CIA_SGMAP_MAPPED_BASE (8*1024*1024)
91: #define CIA_SGMAP_MAPPED_SIZE (8*1024*1024)
92:
93: /* ALCOR/ALGOR2/PYXIS have a 256-byte out-bound DMA prefetch threshold. */
94: #define CIA_SGMAP_PFTHRESH 256
95:
96: void cia_tlb_invalidate(void);
97: void cia_broken_pyxis_tlb_invalidate(void);
98:
99: void (*cia_tlb_invalidate_fn)(void);
100:
101: #define CIA_TLB_INVALIDATE() (*cia_tlb_invalidate_fn)()
102:
103: struct alpha_sgmap cia_pyxis_bug_sgmap;
104: #define CIA_PYXIS_BUG_BASE (128*1024*1024)
105: #define CIA_PYXIS_BUG_SIZE (2*1024*1024)
106:
107: void
108: cia_dma_init(ccp)
109: struct cia_config *ccp;
110: {
111: bus_addr_t tbase;
112: bus_dma_tag_t t;
113:
114: /*
115: * Initialize the DMA tag used for direct-mapped DMA.
116: */
117: t = &ccp->cc_dmat_direct;
118: t->_cookie = ccp;
119: t->_wbase = CIA_DIRECT_MAPPED_BASE;
120: t->_wsize = CIA_DIRECT_MAPPED_SIZE;
121: t->_next_window = &ccp->cc_dmat_sgmap;
122: t->_boundary = 0;
123: t->_sgmap = NULL;
124: t->_get_tag = cia_dma_get_tag;
125: t->_dmamap_create = cia_bus_dmamap_create_direct;
126: t->_dmamap_destroy = _bus_dmamap_destroy;
127: t->_dmamap_load = _bus_dmamap_load_direct;
128: t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
129: t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
130: t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
131: t->_dmamap_unload = _bus_dmamap_unload;
132: t->_dmamap_sync = _bus_dmamap_sync;
133:
134: t->_dmamem_alloc = _bus_dmamem_alloc;
135: t->_dmamem_free = _bus_dmamem_free;
136: t->_dmamem_map = _bus_dmamem_map;
137: t->_dmamem_unmap = _bus_dmamem_unmap;
138: t->_dmamem_mmap = _bus_dmamem_mmap;
139:
140: /*
141: * Initialize the DMA tag used for sgmap-mapped DMA.
142: */
143: t = &ccp->cc_dmat_sgmap;
144: t->_cookie = ccp;
145: t->_wbase = CIA_SGMAP_MAPPED_BASE;
146: t->_wsize = CIA_SGMAP_MAPPED_SIZE;
147: t->_next_window = NULL;
148: t->_boundary = 0;
149: t->_sgmap = &ccp->cc_sgmap;
150: t->_pfthresh = CIA_SGMAP_PFTHRESH;
151: t->_get_tag = cia_dma_get_tag;
152: t->_dmamap_create = alpha_sgmap_dmamap_create;
153: t->_dmamap_destroy = alpha_sgmap_dmamap_destroy;
154: t->_dmamap_load = cia_bus_dmamap_load_sgmap;
155: t->_dmamap_load_mbuf = cia_bus_dmamap_load_mbuf_sgmap;
156: t->_dmamap_load_uio = cia_bus_dmamap_load_uio_sgmap;
157: t->_dmamap_load_raw = cia_bus_dmamap_load_raw_sgmap;
158: t->_dmamap_unload = cia_bus_dmamap_unload_sgmap;
159: t->_dmamap_sync = _bus_dmamap_sync;
160:
161: t->_dmamem_alloc = _bus_dmamem_alloc;
162: t->_dmamem_free = _bus_dmamem_free;
163: t->_dmamem_map = _bus_dmamem_map;
164: t->_dmamem_unmap = _bus_dmamem_unmap;
165: t->_dmamem_mmap = _bus_dmamem_mmap;
166:
167: /*
168: * The firmware has set up window 1 as a 1G direct-mapped DMA
169: * window beginning at 1G. We leave it alone. Leave window
170: * 0 alone until we reconfigure it for SGMAP-mapped DMA.
171: * Windows 2 and 3 are already disabled.
172: */
173:
174: /*
175: * Initialize the SGMAP. Must align page table to 32k
176: * (hardware bug?).
177: */
178: alpha_sgmap_init(t, &ccp->cc_sgmap, "cia_sgmap",
179: CIA_SGMAP_MAPPED_BASE, 0, CIA_SGMAP_MAPPED_SIZE,
180: sizeof(u_int64_t), NULL, (32*1024));
181:
182: /*
183: * Set up window 0 as an 8MB SGMAP-mapped window
184: * starting at 8MB.
185: */
186: REGVAL(CIA_PCI_W0BASE) = CIA_SGMAP_MAPPED_BASE |
187: CIA_PCI_WnBASE_SG_EN | CIA_PCI_WnBASE_W_EN;
188: alpha_mb();
189:
190: REGVAL(CIA_PCI_W0MASK) = CIA_PCI_WnMASK_8M;
191: alpha_mb();
192:
193: tbase = ccp->cc_sgmap.aps_ptpa >> CIA_PCI_TnBASE_SHIFT;
194: if ((tbase & CIA_PCI_TnBASE_MASK) != tbase)
195: panic("cia_dma_init: bad page table address");
196: REGVAL(CIA_PCI_T0BASE) = tbase;
197: alpha_mb();
198:
199: /*
200: * Pass 1 and 2 (i.e. revision <= 1) of the Pyxis have a
201: * broken scatter/gather TLB; it cannot be invalidated. To
202: * work around this problem, we configure window 2 as an SG
203: * 2M window at 128M, which we use in DMA loopback mode to
204: * read a spill page. This works by causing TLB misses,
205: * causing the old entries to be purged to make room for
206: * the new entries coming in for the spill page.
207: */
208: if ((ccp->cc_flags & CCF_ISPYXIS) != 0 && ccp->cc_rev <= 1) {
209: u_int64_t *page_table;
210: int i;
211:
212: cia_tlb_invalidate_fn =
213: cia_broken_pyxis_tlb_invalidate;
214:
215: alpha_sgmap_init(t, &cia_pyxis_bug_sgmap,
216: "pyxis_bug_sgmap", CIA_PYXIS_BUG_BASE, 0,
217: CIA_PYXIS_BUG_SIZE, sizeof(u_int64_t), NULL,
218: (32*1024));
219:
220: REGVAL(CIA_PCI_W2BASE) = CIA_PYXIS_BUG_BASE |
221: CIA_PCI_WnBASE_SG_EN | CIA_PCI_WnBASE_W_EN;
222: alpha_mb();
223:
224: REGVAL(CIA_PCI_W2MASK) = CIA_PCI_WnMASK_2M;
225: alpha_mb();
226:
227: tbase = cia_pyxis_bug_sgmap.aps_ptpa >>
228: CIA_PCI_TnBASE_SHIFT;
229: if ((tbase & CIA_PCI_TnBASE_MASK) != tbase)
230: panic("cia_dma_init: bad page table address");
231: REGVAL(CIA_PCI_T2BASE) = tbase;
232: alpha_mb();
233:
234: /*
235: * Initialize the page table to point at the spill
236: * page. Leave the last entry invalid.
237: */
238: pci_sgmap_pte64_init_spill_page_pte();
239: for (i = 0, page_table = cia_pyxis_bug_sgmap.aps_pt;
240: i < (CIA_PYXIS_BUG_SIZE / PAGE_SIZE) - 1; i++) {
241: page_table[i] =
242: pci_sgmap_pte64_prefetch_spill_page_pte;
243: }
244: alpha_mb();
245: } else
246: cia_tlb_invalidate_fn = cia_tlb_invalidate;
247:
248: CIA_TLB_INVALIDATE();
249:
250: /* XXX XXX BEGIN XXX XXX */
251: { /* XXX */
252: extern paddr_t alpha_XXX_dmamap_or; /* XXX */
253: alpha_XXX_dmamap_or = CIA_DIRECT_MAPPED_BASE; /* XXX */
254: } /* XXX */
255: /* XXX XXX END XXX XXX */
256: }
257:
258: /*
259: * Return the bus dma tag to be used for the specified bus type.
260: * INTERNAL USE ONLY!
261: */
262: bus_dma_tag_t
263: cia_dma_get_tag(t, bustype)
264: bus_dma_tag_t t;
265: alpha_bus_t bustype;
266: {
267: struct cia_config *ccp = t->_cookie;
268:
269: switch (bustype) {
270: case ALPHA_BUS_PCI:
271: case ALPHA_BUS_EISA:
272: /*
273: * Systems with a CIA can only support 1G
274: * of memory, so we use the direct-mapped window
275: * on busses that have 32-bit DMA.
276: *
277: * Ahem: I have a PWS 500au with 1.5G of memory, and it
278: * had problems doing DMA because it was not falling back
279: * to using SGMAPs. I've fixed that and my PWS now works with
280: * 1.5G. There have been other reports about failures with
281: * more than 1.0G of memory. Michael Hitch
282: */
283: return (&ccp->cc_dmat_direct);
284:
285: case ALPHA_BUS_ISA:
286: /*
287: * ISA doesn't have enough address bits to use
288: * the direct-mapped DMA window, so we must use
289: * SGMAPs.
290: */
291: return (&ccp->cc_dmat_sgmap);
292:
293: default:
294: panic("cia_dma_get_tag: shouldn't be here, really...");
295: }
296: }
297:
298: /*
299: * Create a CIA direct-mapped DMA map.
300: */
301: int
302: cia_bus_dmamap_create_direct(t, size, nsegments, maxsegsz, boundary,
303: flags, dmamp)
304: bus_dma_tag_t t;
305: bus_size_t size;
306: int nsegments;
307: bus_size_t maxsegsz;
308: bus_size_t boundary;
309: int flags;
310: bus_dmamap_t *dmamp;
311: {
312: struct cia_config *ccp = t->_cookie;
313: bus_dmamap_t map;
314: int error;
315:
316: error = _bus_dmamap_create(t, size, nsegments, maxsegsz,
317: boundary, flags, dmamp);
318: if (error)
319: return (error);
320:
321: map = *dmamp;
322:
323: if ((ccp->cc_flags & CCF_PYXISBUG) != 0 &&
324: map->_dm_segcnt > 1) {
325: /*
326: * We have a Pyxis with the DMA page crossing bug, make
327: * sure we don't coalesce adjacent DMA segments.
328: *
329: * NOTE: We can only do this if the max segment count
330: * is greater than 1. This is because many network
331: * drivers allocate large contiguous blocks of memory
332: * for control data structures, even though they won't
333: * do any single DMA that crosses a page boundary.
334: * -- thorpej@netbsd.org, 2/5/2000
335: */
336: map->_dm_flags |= DMAMAP_NO_COALESCE;
337: }
338:
339: return (0);
340: }
341:
342: /*
343: * Load a CIA SGMAP-mapped DMA map with a linear buffer.
344: */
345: int
346: cia_bus_dmamap_load_sgmap(t, map, buf, buflen, p, flags)
347: bus_dma_tag_t t;
348: bus_dmamap_t map;
349: void *buf;
350: bus_size_t buflen;
351: struct proc *p;
352: int flags;
353: {
354: int error;
355:
356: error = pci_sgmap_pte64_load(t, map, buf, buflen, p, flags,
357: t->_sgmap);
358: if (error == 0)
359: CIA_TLB_INVALIDATE();
360:
361: return (error);
362: }
363:
364: /*
365: * Load a CIA SGMAP-mapped DMA map with an mbuf chain.
366: */
367: int
368: cia_bus_dmamap_load_mbuf_sgmap(t, map, m, flags)
369: bus_dma_tag_t t;
370: bus_dmamap_t map;
371: struct mbuf *m;
372: int flags;
373: {
374: int error;
375:
376: error = pci_sgmap_pte64_load_mbuf(t, map, m, flags, t->_sgmap);
377: if (error == 0)
378: CIA_TLB_INVALIDATE();
379:
380: return (error);
381: }
382:
383: /*
384: * Load a CIA SGMAP-mapped DMA map with a uio.
385: */
386: int
387: cia_bus_dmamap_load_uio_sgmap(t, map, uio, flags)
388: bus_dma_tag_t t;
389: bus_dmamap_t map;
390: struct uio *uio;
391: int flags;
392: {
393: int error;
394:
395: error = pci_sgmap_pte64_load_uio(t, map, uio, flags, t->_sgmap);
396: if (error == 0)
397: CIA_TLB_INVALIDATE();
398:
399: return (error);
400: }
401:
402: /*
403: * Load a CIA SGMAP-mapped DMA map with raw memory.
404: */
405: int
406: cia_bus_dmamap_load_raw_sgmap(t, map, segs, nsegs, size, flags)
407: bus_dma_tag_t t;
408: bus_dmamap_t map;
409: bus_dma_segment_t *segs;
410: int nsegs;
411: bus_size_t size;
412: int flags;
413: {
414: int error;
415:
416: error = pci_sgmap_pte64_load_raw(t, map, segs, nsegs, size, flags,
417: t->_sgmap);
418: if (error == 0)
419: CIA_TLB_INVALIDATE();
420:
421: return (error);
422: }
423:
424: /*
425: * Unload a CIA DMA map.
426: */
427: void
428: cia_bus_dmamap_unload_sgmap(t, map)
429: bus_dma_tag_t t;
430: bus_dmamap_t map;
431: {
432:
433: /*
434: * Invalidate any SGMAP page table entries used by this
435: * mapping.
436: */
437: pci_sgmap_pte64_unload(t, map, t->_sgmap);
438: CIA_TLB_INVALIDATE();
439:
440: /*
441: * Do the generic bits of the unload.
442: */
443: _bus_dmamap_unload(t, map);
444: }
445:
446: /*
447: * Flush the CIA scatter/gather TLB.
448: */
449: void
450: cia_tlb_invalidate()
451: {
452:
453: alpha_mb();
454: REGVAL(CIA_PCI_TBIA) = CIA_PCI_TBIA_ALL;
455: alpha_mb();
456: }
457:
458: /*
459: * Flush the scatter/gather TLB on broken Pyxis chips.
460: */
461: void
462: cia_broken_pyxis_tlb_invalidate()
463: {
464: volatile u_int64_t dummy;
465: u_int32_t ctrl;
466: int i, s;
467:
468: s = splhigh();
469:
470: /*
471: * Put the Pyxis into PCI loopback mode.
472: */
473: alpha_mb();
474: ctrl = REGVAL(CIA_CSR_CTRL);
475: REGVAL(CIA_CSR_CTRL) = ctrl | CTRL_PCI_LOOP_EN;
476: alpha_mb();
477:
478: /*
479: * Now, read from PCI dense memory space at offset 128M (our
480: * target window base), skipping 64k on each read. This forces
481: * S/G TLB misses.
482: *
483: * XXX Looks like the TLB entries are `not quite LRU'. We need
484: * XXX to read more times than there are actual tags!
485: */
486: for (i = 0; i < CIA_TLB_NTAGS + 4; i++) {
487: dummy = *((volatile u_int64_t *)
488: ALPHA_PHYS_TO_K0SEG(CIA_PCI_DENSE + CIA_PYXIS_BUG_BASE +
489: (i * 65536)));
490: }
491:
492: /*
493: * Restore normal PCI operation.
494: */
495: alpha_mb();
496: REGVAL(CIA_CSR_CTRL) = ctrl;
497: alpha_mb();
498:
499: splx(s);
500: }
CVSweb