Annotation of sys/arch/alpha/dev/bus_dma.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: bus_dma.c,v 1.21 2006/05/21 02:11:54 brad Exp $ */
2: /* $NetBSD: bus_dma.c,v 1.40 2000/07/17 04:47:56 thorpej Exp $ */
3:
4: /*-
5: * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
9: * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10: * NASA Ames Research Center.
11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: * 3. All advertising materials mentioning features or use of this software
21: * must display the following acknowledgement:
22: * This product includes software developed by the NetBSD
23: * Foundation, Inc. and its contributors.
24: * 4. Neither the name of The NetBSD Foundation nor the names of its
25: * contributors may be used to endorse or promote products derived
26: * from this software without specific prior written permission.
27: *
28: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38: * POSSIBILITY OF SUCH DAMAGE.
39: */
40:
41: #define _ALPHA_BUS_DMA_PRIVATE
42: #include <sys/param.h>
43: #include <sys/systm.h>
44: #include <sys/kernel.h>
45: #include <sys/device.h>
46: #include <sys/malloc.h>
47: #include <sys/proc.h>
48: #include <sys/mbuf.h>
49:
50: #include <uvm/uvm_extern.h>
51:
52: #include <machine/bus.h>
53: #include <machine/intr.h>
54:
55: int _bus_dmamap_load_buffer_direct(bus_dma_tag_t,
56: bus_dmamap_t, void *, bus_size_t, struct proc *, int,
57: paddr_t *, int *, int);
58:
59: extern paddr_t avail_start, avail_end; /* from pmap.c */
60:
61: /*
62: * Common function for DMA map creation. May be called by bus-specific
63: * DMA map creation functions.
64: */
65: int
66: _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
67: bus_dma_tag_t t;
68: bus_size_t size;
69: int nsegments;
70: bus_size_t maxsegsz;
71: bus_size_t boundary;
72: int flags;
73: bus_dmamap_t *dmamp;
74: {
75: struct alpha_bus_dmamap *map;
76: void *mapstore;
77: size_t mapsize;
78:
79: /*
80: * Allocate and initialize the DMA map. The end of the map
81: * is a variable-sized array of segments, so we allocate enough
82: * room for them in one shot.
83: *
84: * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
85: * of ALLOCNOW notifies others that we've reserved these resources,
86: * and they are not to be freed.
87: *
88: * The bus_dmamap_t includes one bus_dma_segment_t, hence
89: * the (nsegments - 1).
90: */
91: mapsize = sizeof(struct alpha_bus_dmamap) +
92: (sizeof(bus_dma_segment_t) * (nsegments - 1));
93: if ((mapstore = malloc(mapsize, M_DEVBUF,
94: (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
95: return (ENOMEM);
96:
97: bzero(mapstore, mapsize);
98: map = (struct alpha_bus_dmamap *)mapstore;
99: map->_dm_size = size;
100: map->_dm_segcnt = nsegments;
101: map->_dm_maxsegsz = maxsegsz;
102: if (t->_boundary != 0 && t->_boundary < boundary)
103: map->_dm_boundary = t->_boundary;
104: else
105: map->_dm_boundary = boundary;
106: map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
107: map->dm_mapsize = 0; /* no valid mappings */
108: map->dm_nsegs = 0;
109: map->_dm_window = NULL;
110:
111: *dmamp = map;
112: return (0);
113: }
114:
115: /*
116: * Common function for DMA map destruction. May be called by bus-specific
117: * DMA map destruction functions.
118: */
119: void
120: _bus_dmamap_destroy(t, map)
121: bus_dma_tag_t t;
122: bus_dmamap_t map;
123: {
124:
125: free(map, M_DEVBUF);
126: }
127:
128: /*
129: * Utility function to load a linear buffer. lastaddrp holds state
130: * between invocations (for multiple-buffer loads). segp contains
131: * the starting segment on entrance, and the ending segment on exit.
132: * first indicates if this is the first invocation of this function.
133: */
134: int
135: _bus_dmamap_load_buffer_direct(t, map, buf, buflen, p, flags,
136: lastaddrp, segp, first)
137: bus_dma_tag_t t;
138: bus_dmamap_t map;
139: void *buf;
140: bus_size_t buflen;
141: struct proc *p;
142: int flags;
143: paddr_t *lastaddrp;
144: int *segp;
145: int first;
146: {
147: bus_size_t sgsize;
148: bus_addr_t curaddr, lastaddr, baddr, bmask;
149: vaddr_t vaddr = (vaddr_t)buf;
150: int seg;
151:
152: lastaddr = *lastaddrp;
153: bmask = ~(map->_dm_boundary - 1);
154:
155: for (seg = *segp; buflen > 0 ; ) {
156: /*
157: * Get the physical address for this segment.
158: */
159: if (p != NULL)
160: pmap_extract(p->p_vmspace->vm_map.pmap, vaddr,
161: &curaddr);
162: else
163: curaddr = vtophys(vaddr);
164:
165: /*
166: * If we're beyond the current DMA window, indicate
167: * that and try to fall back into SGMAPs.
168: */
169: if (t->_wsize != 0 && curaddr >= t->_wsize)
170: return (EINVAL);
171:
172: curaddr |= t->_wbase;
173:
174: /*
175: * Compute the segment size, and adjust counts.
176: */
177: sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
178: if (buflen < sgsize)
179: sgsize = buflen;
180: if (map->_dm_maxsegsz < sgsize)
181: sgsize = map->_dm_maxsegsz;
182:
183: /*
184: * Make sure we don't cross any boundaries.
185: */
186: if (map->_dm_boundary > 0) {
187: baddr = (curaddr + map->_dm_boundary) & bmask;
188: if (sgsize > (baddr - curaddr))
189: sgsize = (baddr - curaddr);
190: }
191:
192: /*
193: * Insert chunk into a segment, coalescing with
194: * the previous segment if possible.
195: */
196: if (first) {
197: map->dm_segs[seg].ds_addr = curaddr;
198: map->dm_segs[seg].ds_len = sgsize;
199: first = 0;
200: } else {
201: if ((map->_dm_flags & DMAMAP_NO_COALESCE) == 0 &&
202: curaddr == lastaddr &&
203: (map->dm_segs[seg].ds_len + sgsize) <=
204: map->_dm_maxsegsz &&
205: (map->_dm_boundary == 0 ||
206: (map->dm_segs[seg].ds_addr & bmask) ==
207: (curaddr & bmask)))
208: map->dm_segs[seg].ds_len += sgsize;
209: else {
210: if (++seg >= map->_dm_segcnt)
211: break;
212: map->dm_segs[seg].ds_addr = curaddr;
213: map->dm_segs[seg].ds_len = sgsize;
214: }
215: }
216:
217: lastaddr = curaddr + sgsize;
218: vaddr += sgsize;
219: buflen -= sgsize;
220: }
221:
222: *segp = seg;
223: *lastaddrp = lastaddr;
224:
225: /*
226: * Did we fit?
227: */
228: if (buflen != 0) {
229: /*
230: * If there is a chained window, we will automatically
231: * fall back to it.
232: */
233: return (EFBIG); /* XXX better return value here? */
234: }
235:
236: return (0);
237: }
238:
239: /*
240: * Common function for loading a direct-mapped DMA map with a linear
241: * buffer. Called by bus-specific DMA map load functions with the
242: * OR value appropriate for indicating "direct-mapped" for that
243: * chipset.
244: */
245: int
246: _bus_dmamap_load_direct(t, map, buf, buflen, p, flags)
247: bus_dma_tag_t t;
248: bus_dmamap_t map;
249: void *buf;
250: bus_size_t buflen;
251: struct proc *p;
252: int flags;
253: {
254: paddr_t lastaddr;
255: int seg, error;
256:
257: /*
258: * Make sure that on error condition we return "no valid mappings".
259: */
260: map->dm_mapsize = 0;
261: map->dm_nsegs = 0;
262: KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
263:
264: if (buflen > map->_dm_size)
265: return (EINVAL);
266:
267: seg = 0;
268: error = _bus_dmamap_load_buffer_direct(t, map, buf, buflen,
269: p, flags, &lastaddr, &seg, 1);
270: if (error == 0) {
271: map->dm_mapsize = buflen;
272: map->dm_nsegs = seg + 1;
273: map->_dm_window = t;
274: } else if (t->_next_window != NULL) {
275: /*
276: * Give the next window a chance.
277: */
278: error = bus_dmamap_load(t->_next_window, map, buf, buflen,
279: p, flags);
280: }
281: return (error);
282: }
283:
284: /*
285: * Like _bus_dmamap_load_direct(), but for mbufs.
286: */
287: int
288: _bus_dmamap_load_mbuf_direct(t, map, m0, flags)
289: bus_dma_tag_t t;
290: bus_dmamap_t map;
291: struct mbuf *m0;
292: int flags;
293: {
294: paddr_t lastaddr;
295: int seg, error, first;
296: struct mbuf *m;
297:
298: /*
299: * Make sure that on error condition we return "no valid mappings."
300: */
301: map->dm_mapsize = 0;
302: map->dm_nsegs = 0;
303: KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
304:
305: #ifdef DIAGNOSTIC
306: if ((m0->m_flags & M_PKTHDR) == 0)
307: panic("_bus_dmamap_load_mbuf_direct: no packet header");
308: #endif
309:
310: if (m0->m_pkthdr.len > map->_dm_size)
311: return (EINVAL);
312:
313: first = 1;
314: seg = 0;
315: error = 0;
316: for (m = m0; m != NULL && error == 0; m = m->m_next) {
317: if (m->m_len == 0)
318: continue;
319: error = _bus_dmamap_load_buffer_direct(t, map,
320: m->m_data, m->m_len, NULL, flags, &lastaddr, &seg, first);
321: first = 0;
322: }
323: if (error == 0) {
324: map->dm_mapsize = m0->m_pkthdr.len;
325: map->dm_nsegs = seg + 1;
326: map->_dm_window = t;
327: } else if (t->_next_window != NULL) {
328: /*
329: * Give the next window a chance.
330: */
331: error = bus_dmamap_load_mbuf(t->_next_window, map, m0, flags);
332: }
333: return (error);
334: }
335:
336: /*
337: * Like _bus_dmamap_load_direct(), but for uios.
338: */
339: int
340: _bus_dmamap_load_uio_direct(t, map, uio, flags)
341: bus_dma_tag_t t;
342: bus_dmamap_t map;
343: struct uio *uio;
344: int flags;
345: {
346: paddr_t lastaddr;
347: int seg, i, error, first;
348: bus_size_t minlen, resid;
349: struct proc *p = NULL;
350: struct iovec *iov;
351: caddr_t addr;
352:
353: /*
354: * Make sure that on error condition we return "no valid mappings."
355: */
356: map->dm_mapsize = 0;
357: map->dm_nsegs = 0;
358: KASSERT((map->_dm_flags & (BUS_DMA_READ|BUS_DMA_WRITE)) == 0);
359:
360: resid = uio->uio_resid;
361: iov = uio->uio_iov;
362:
363: if (uio->uio_segflg == UIO_USERSPACE) {
364: p = uio->uio_procp;
365: #ifdef DIAGNOSTIC
366: if (p == NULL)
367: panic("_bus_dmamap_load_uio_direct: "
368: "USERSPACE but no proc");
369: #endif
370: }
371:
372: first = 1;
373: seg = 0;
374: error = 0;
375: for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
376: /*
377: * Now at the first iovec to load. Load each iovec
378: * until we have exhausted the residual count.
379: */
380: minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
381: addr = (caddr_t)iov[i].iov_base;
382:
383: error = _bus_dmamap_load_buffer_direct(t, map,
384: addr, minlen, p, flags, &lastaddr, &seg, first);
385: first = 0;
386:
387: resid -= minlen;
388: }
389: if (error == 0) {
390: map->dm_mapsize = uio->uio_resid;
391: map->dm_nsegs = seg + 1;
392: map->_dm_window = t;
393: } else if (t->_next_window != NULL) {
394: /*
395: * Give the next window a chance.
396: */
397: error = bus_dmamap_load_uio(t->_next_window, map, uio, flags);
398: }
399: return (error);
400: }
401:
402: /*
403: * Like _bus_dmamap_load_direct(), but for raw memory.
404: */
405: int
406: _bus_dmamap_load_raw_direct(t, map, segs, nsegs, size, flags)
407: bus_dma_tag_t t;
408: bus_dmamap_t map;
409: bus_dma_segment_t *segs;
410: int nsegs;
411: bus_size_t size;
412: int flags;
413: {
414:
415: panic("_bus_dmamap_load_raw_direct: not implemented");
416: }
417:
418: /*
419: * Common function for unloading a DMA map. May be called by
420: * chipset-specific DMA map unload functions.
421: */
422: void
423: _bus_dmamap_unload(t, map)
424: bus_dma_tag_t t;
425: bus_dmamap_t map;
426: {
427:
428: /*
429: * No resources to free; just mark the mappings as
430: * invalid.
431: */
432: map->dm_mapsize = 0;
433: map->dm_nsegs = 0;
434: map->_dm_window = NULL;
435: map->_dm_flags &= ~(BUS_DMA_READ|BUS_DMA_WRITE);
436: }
437:
438: /*
439: * Common function for DMA map synchronization. May be called
440: * by chipset-specific DMA map synchronization functions.
441: */
442: void
443: _bus_dmamap_sync(t, map, offset, len, op)
444: bus_dma_tag_t t;
445: bus_dmamap_t map;
446: bus_addr_t offset;
447: bus_size_t len;
448: int op;
449: {
450:
451: /*
452: * Flush the store buffer.
453: */
454: alpha_mb();
455: }
456:
457: /*
458: * Common function for DMA-safe memory allocation. May be called
459: * by bus-specific DMA memory allocation functions.
460: */
461: int
462: _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
463: bus_dma_tag_t t;
464: bus_size_t size, alignment, boundary;
465: bus_dma_segment_t *segs;
466: int nsegs;
467: int *rsegs;
468: int flags;
469: {
470:
471: return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
472: segs, nsegs, rsegs, flags, 0, trunc_page(avail_end)));
473: }
474:
475: /*
476: * Allocate physical memory from the given physical address range.
477: * Called by DMA-safe memory allocation methods.
478: */
479: int
480: _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
481: flags, low, high)
482: bus_dma_tag_t t;
483: bus_size_t size, alignment, boundary;
484: bus_dma_segment_t *segs;
485: int nsegs;
486: int *rsegs;
487: int flags;
488: paddr_t low;
489: paddr_t high;
490: {
491: paddr_t curaddr, lastaddr;
492: struct vm_page *m;
493: struct pglist mlist;
494: int curseg, error;
495:
496: /* Always round the size. */
497: size = round_page(size);
498:
499: /*
500: * Allocate pages from the VM system.
501: */
502: TAILQ_INIT(&mlist);
503: error = uvm_pglistalloc(size, low, high, alignment, boundary,
504: &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
505: if (error)
506: return (error);
507:
508: /*
509: * Compute the location, size, and number of segments actually
510: * returned by the VM code.
511: */
512: m = TAILQ_FIRST(&mlist);
513: curseg = 0;
514: lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
515: segs[curseg].ds_len = PAGE_SIZE;
516: m = TAILQ_NEXT(m, pageq);
517:
518: for (; m != TAILQ_END(&mlist); m = TAILQ_NEXT(m, pageq)) {
519: curaddr = VM_PAGE_TO_PHYS(m);
520: #ifdef DIAGNOSTIC
521: if (curaddr < avail_start || curaddr >= high) {
522: printf("uvm_pglistalloc returned non-sensical"
523: " address 0x%lx\n", curaddr);
524: panic("_bus_dmamem_alloc");
525: }
526: #endif
527: if (curaddr == (lastaddr + PAGE_SIZE))
528: segs[curseg].ds_len += PAGE_SIZE;
529: else {
530: curseg++;
531: segs[curseg].ds_addr = curaddr;
532: segs[curseg].ds_len = PAGE_SIZE;
533: }
534: lastaddr = curaddr;
535: }
536:
537: *rsegs = curseg + 1;
538:
539: return (0);
540: }
541:
542: /*
543: * Common function for freeing DMA-safe memory. May be called by
544: * bus-specific DMA memory free functions.
545: */
546: void
547: _bus_dmamem_free(t, segs, nsegs)
548: bus_dma_tag_t t;
549: bus_dma_segment_t *segs;
550: int nsegs;
551: {
552: struct vm_page *m;
553: bus_addr_t addr;
554: struct pglist mlist;
555: int curseg;
556:
557: /*
558: * Build a list of pages to free back to the VM system.
559: */
560: TAILQ_INIT(&mlist);
561: for (curseg = 0; curseg < nsegs; curseg++) {
562: for (addr = segs[curseg].ds_addr;
563: addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
564: addr += PAGE_SIZE) {
565: m = PHYS_TO_VM_PAGE(addr);
566: TAILQ_INSERT_TAIL(&mlist, m, pageq);
567: }
568: }
569:
570: uvm_pglistfree(&mlist);
571: }
572:
573: /*
574: * Common function for mapping DMA-safe memory. May be called by
575: * bus-specific DMA memory map functions.
576: */
577: int
578: _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
579: bus_dma_tag_t t;
580: bus_dma_segment_t *segs;
581: int nsegs;
582: size_t size;
583: caddr_t *kvap;
584: int flags;
585: {
586: vaddr_t va;
587: bus_addr_t addr;
588: int curseg;
589:
590: /*
591: * If we're only mapping 1 segment, use K0SEG, to avoid
592: * TLB thrashing.
593: */
594: if (nsegs == 1) {
595: *kvap = (caddr_t)ALPHA_PHYS_TO_K0SEG(segs[0].ds_addr);
596: return (0);
597: }
598:
599: size = round_page(size);
600:
601: va = uvm_km_valloc(kernel_map, size);
602:
603: if (va == 0)
604: return (ENOMEM);
605:
606: *kvap = (caddr_t)va;
607:
608: for (curseg = 0; curseg < nsegs; curseg++) {
609: for (addr = segs[curseg].ds_addr;
610: addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
611: addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
612: if (size == 0)
613: panic("_bus_dmamem_map: size botch");
614: pmap_enter(pmap_kernel(), va, addr,
615: VM_PROT_READ | VM_PROT_WRITE,
616: VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
617: }
618: }
619: pmap_update(pmap_kernel());
620:
621: return (0);
622: }
623:
624: /*
625: * Common function for unmapping DMA-safe memory. May be called by
626: * bus-specific DMA memory unmapping functions.
627: */
628: void
629: _bus_dmamem_unmap(t, kva, size)
630: bus_dma_tag_t t;
631: caddr_t kva;
632: size_t size;
633: {
634:
635: #ifdef DIAGNOSTIC
636: if ((u_long)kva & PGOFSET)
637: panic("_bus_dmamem_unmap");
638: #endif
639:
640: /*
641: * Nothing to do if we mapped it with K0SEG.
642: */
643: if (kva >= (caddr_t)ALPHA_K0SEG_BASE &&
644: kva <= (caddr_t)ALPHA_K0SEG_END)
645: return;
646:
647: size = round_page(size);
648: uvm_km_free(kernel_map, (vaddr_t)kva, size);
649: }
650:
651: /*
652: * Common function for mmap(2)'ing DMA-safe memory. May be called by
653: * bus-specific DMA mmap(2)'ing functions.
654: */
655: paddr_t
656: _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
657: bus_dma_tag_t t;
658: bus_dma_segment_t *segs;
659: int nsegs;
660: off_t off;
661: int prot, flags;
662: {
663: int i;
664:
665: for (i = 0; i < nsegs; i++) {
666: #ifdef DIAGNOSTIC
667: if (off & PGOFSET)
668: panic("_bus_dmamem_mmap: offset unaligned");
669: if (segs[i].ds_addr & PGOFSET)
670: panic("_bus_dmamem_mmap: segment unaligned");
671: if (segs[i].ds_len & PGOFSET)
672: panic("_bus_dmamem_mmap: segment size not multiple"
673: " of page size");
674: #endif
675: if (off >= segs[i].ds_len) {
676: off -= segs[i].ds_len;
677: continue;
678: }
679:
680: return (atop(segs[i].ds_addr + off));
681: }
682:
683: /* Page not found. */
684: return (-1);
685: }
CVSweb