Annotation of sys/arch/alpha/isa/isadma_bounce.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: isadma_bounce.c,v 1.6 2006/05/12 20:48:19 brad Exp $ */
2: /* $NetBSD: isadma_bounce.c,v 1.3 2000/06/29 09:02:57 mrg Exp $ */
3:
4: /*-
5: * Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc.
6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
9: * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10: * NASA Ames Research Center.
11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: * 3. All advertising materials mentioning features or use of this software
21: * must display the following acknowledgement:
22: * This product includes software developed by the NetBSD
23: * Foundation, Inc. and its contributors.
24: * 4. Neither the name of The NetBSD Foundation nor the names of its
25: * contributors may be used to endorse or promote products derived
26: * from this software without specific prior written permission.
27: *
28: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38: * POSSIBILITY OF SUCH DAMAGE.
39: */
40:
41: #define _ALPHA_BUS_DMA_PRIVATE
42: #include <sys/param.h>
43: #include <sys/systm.h>
44: #include <sys/syslog.h>
45: #include <sys/device.h>
46: #include <sys/malloc.h>
47: #include <sys/proc.h>
48: #include <sys/mbuf.h>
49:
50: #include <machine/bus.h>
51:
52: #include <dev/isa/isareg.h>
53: #include <dev/isa/isavar.h>
54:
55: #include <uvm/uvm_extern.h>
56:
57: extern paddr_t avail_end;
58:
59: /*
60: * ISA can only DMA to 0-16M.
61: */
62: #define ISA_DMA_BOUNCE_THRESHOLD (16 * 1024 * 1024)
63:
64: /*
65: * Cookie used by bouncing ISA DMA. A pointer to one of these is stashed
66: * in the DMA map.
67: */
68: struct isadma_bounce_cookie {
69: int id_flags; /* flags; see below */
70:
71: /*
72: * Information about the original buffer used during
73: * DMA map syncs. Note that origbuflen is only used
74: * for ID_BUFTYPE_LINEAR.
75: */
76: void *id_origbuf; /* pointer to orig buffer if
77: bouncing */
78: bus_size_t id_origbuflen; /* ...and size */
79: int id_buftype; /* type of buffer */
80:
81: void *id_bouncebuf; /* pointer to the bounce buffer */
82: bus_size_t id_bouncebuflen; /* ...and size */
83: int id_nbouncesegs; /* number of valid bounce segs */
84: bus_dma_segment_t id_bouncesegs[1]; /* array of bounce buffer
85: physical memory segments */
86: };
87:
88: /* id_flags */
89: #define ID_MIGHT_NEED_BOUNCE 0x01 /* map could need bounce buffers */
90: #define ID_HAS_BOUNCE 0x02 /* map currently has bounce buffers */
91: #define ID_IS_BOUNCING 0x04 /* map is bouncing current xfer */
92:
93: /* id_buftype */
94: #define ID_BUFTYPE_INVALID 0
95: #define ID_BUFTYPE_LINEAR 1
96: #define ID_BUFTYPE_MBUF 2
97: #define ID_BUFTYPE_UIO 3
98: #define ID_BUFTYPE_RAW 4
99:
100: int isadma_bounce_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t,
101: bus_size_t, int);
102: void isadma_bounce_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t);
103:
104: /*
105: * Create an ISA DMA map.
106: */
107: int
108: isadma_bounce_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
109: bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
110: {
111: struct isadma_bounce_cookie *cookie;
112: bus_dmamap_t map;
113: int error, cookieflags;
114: void *cookiestore;
115: size_t cookiesize;
116:
117: /* Call common function to create the basic map. */
118: error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
119: flags, dmamp);
120: if (error)
121: return (error);
122:
123: map = *dmamp;
124: map->_dm_cookie = NULL;
125:
126: cookiesize = sizeof(*cookie);
127:
128: /*
129: * ISA only has 24-bits of address space. This means
130: * we can't DMA to pages over 16M. In order to DMA to
131: * arbitrary buffers, we use "bounce buffers" - pages
132: * in memory below the 16M boundary. On DMA reads,
133: * DMA happens to the bounce buffers, and is copied into
134: * the caller's buffer. On writes, data is copied into
135: * but bounce buffer, and the DMA happens from those
136: * pages. To software using the DMA mapping interface,
137: * this looks simply like a data cache.
138: *
139: * If we have more than 16M of RAM in the system, we may
140: * need bounce buffers. We check and remember that here.
141: *
142: * ...or, there is an opposite case. The most segments
143: * a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
144: * the caller can't handle that many segments (e.g. the
145: * ISA DMA controller), we may have to bounce it as well.
146: */
147: cookieflags = 0;
148: if (avail_end > (t->_wbase + t->_wsize) ||
149: ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) {
150: cookieflags |= ID_MIGHT_NEED_BOUNCE;
151: cookiesize += (sizeof(bus_dma_segment_t) *
152: (map->_dm_segcnt - 1));
153: }
154:
155: /*
156: * Allocate our cookie.
157: */
158: if ((cookiestore = malloc(cookiesize, M_DEVBUF,
159: (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
160: error = ENOMEM;
161: goto out;
162: }
163: memset(cookiestore, 0, cookiesize);
164: cookie = (struct isadma_bounce_cookie *)cookiestore;
165: cookie->id_flags = cookieflags;
166: map->_dm_cookie = cookie;
167:
168: if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
169: /*
170: * Allocate the bounce pages now if the caller
171: * wishes us to do so.
172: */
173: if ((flags & BUS_DMA_ALLOCNOW) == 0)
174: goto out;
175:
176: error = isadma_bounce_alloc_bouncebuf(t, map, size, flags);
177: }
178:
179: out:
180: if (error) {
181: if (map->_dm_cookie != NULL)
182: free(map->_dm_cookie, M_DEVBUF);
183: _bus_dmamap_destroy(t, map);
184: }
185: return (error);
186: }
187:
188: /*
189: * Destroy an ISA DMA map.
190: */
191: void
192: isadma_bounce_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
193: {
194: struct isadma_bounce_cookie *cookie = map->_dm_cookie;
195:
196: /*
197: * Free any bounce pages this map might hold.
198: */
199: if (cookie->id_flags & ID_HAS_BOUNCE)
200: isadma_bounce_free_bouncebuf(t, map);
201:
202: free(cookie, M_DEVBUF);
203: _bus_dmamap_destroy(t, map);
204: }
205:
206: /*
207: * Load an ISA DMA map with a linear buffer.
208: */
209: int
210: isadma_bounce_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
211: size_t buflen, struct proc *p, int flags)
212: {
213: struct isadma_bounce_cookie *cookie = map->_dm_cookie;
214: int error;
215:
216: /*
217: * Make sure that on error condition we return "no valid mappings."
218: */
219: map->dm_mapsize = 0;
220: map->dm_nsegs = 0;
221:
222: /*
223: * Try to load the map the normal way. If this errors out,
224: * and we can bounce, we will.
225: */
226: error = _bus_dmamap_load_direct(t, map, buf, buflen, p, flags);
227: if (error == 0 ||
228: (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
229: return (error);
230:
231: /*
232: * First attempt failed; bounce it.
233: */
234:
235: /*
236: * Allocate bounce pages, if necessary.
237: */
238: if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
239: error = isadma_bounce_alloc_bouncebuf(t, map, buflen, flags);
240: if (error)
241: return (error);
242: }
243:
244: /*
245: * Cache a pointer to the caller's buffer and load the DMA map
246: * with the bounce buffer.
247: */
248: cookie->id_origbuf = buf;
249: cookie->id_origbuflen = buflen;
250: cookie->id_buftype = ID_BUFTYPE_LINEAR;
251: error = _bus_dmamap_load_direct(t, map, cookie->id_bouncebuf, buflen,
252: p, flags);
253: if (error) {
254: /*
255: * Free the bounce pages, unless our resources
256: * are reserved for our exclusive use.
257: */
258: if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
259: isadma_bounce_free_bouncebuf(t, map);
260: return (error);
261: }
262:
263: /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */
264: cookie->id_flags |= ID_IS_BOUNCING;
265: map->_dm_window = t;
266: return (0);
267: }
268:
269: /*
270: * Like isadma_bounce_dmamap_load(), but for mbufs.
271: */
272: int
273: isadma_bounce_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
274: struct mbuf *m0, int flags)
275: {
276: struct isadma_bounce_cookie *cookie = map->_dm_cookie;
277: int error;
278:
279: /*
280: * Make sure on error condition we return "no valid mappings."
281: */
282: map->dm_mapsize = 0;
283: map->dm_nsegs = 0;
284:
285: #ifdef DIAGNOSTIC
286: if ((m0->m_flags & M_PKTHDR) == 0)
287: panic("isadma_bounce_dmamap_load_mbuf: no packet header");
288: #endif
289:
290: if (m0->m_pkthdr.len > map->_dm_size)
291: return (EINVAL);
292:
293: /*
294: * Try to load the map the normal way. If this errors out,
295: * and we can bounce, we will.
296: */
297: error = _bus_dmamap_load_mbuf_direct(t, map, m0, flags);
298: if (error == 0 ||
299: (error != 0 && (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) == 0))
300: return (error);
301:
302: /*
303: * First attempt failed; bounce it.
304: */
305:
306: /*
307: * Allocate bounce pages, if necessary.
308: */
309: if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
310: error = isadma_bounce_alloc_bouncebuf(t, map, m0->m_pkthdr.len,
311: flags);
312: if (error)
313: return (error);
314: }
315:
316: /*
317: * Cache a pointer to the caller's buffer and load the DMA map
318: * with the bounce buffer.
319: */
320: cookie->id_origbuf = m0;
321: cookie->id_origbuflen = m0->m_pkthdr.len; /* not really used */
322: cookie->id_buftype = ID_BUFTYPE_MBUF;
323: error = _bus_dmamap_load_direct(t, map, cookie->id_bouncebuf,
324: m0->m_pkthdr.len, NULL, flags);
325: if (error) {
326: /*
327: * Free the bounce pages, unless our resources
328: * are reserved for our exclusive use.
329: */
330: if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
331: isadma_bounce_free_bouncebuf(t, map);
332: return (error);
333: }
334:
335: /* ...so isadma_bounce_dmamap_sync() knows we're bouncing */
336: cookie->id_flags |= ID_IS_BOUNCING;
337: map->_dm_window = t;
338: return (0);
339: }
340:
341: /*
342: * Like isadma_bounce_dmamap_load(), but for uios.
343: */
344: int
345: isadma_bounce_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
346: struct uio *uio, int flags)
347: {
348:
349: panic("isadma_bounce_dmamap_load_uio: not implemented");
350: }
351:
352: /*
353: * Like isadma_bounce_dmamap_load(), but for raw memory allocated with
354: * bus_dmamem_alloc().
355: */
356: int
357: isadma_bounce_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
358: bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
359: {
360:
361: panic("isadma_bounce_dmamap_load_raw: not implemented");
362: }
363:
364: /*
365: * Unload an ISA DMA map.
366: */
367: void
368: isadma_bounce_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
369: {
370: struct isadma_bounce_cookie *cookie = map->_dm_cookie;
371:
372: /*
373: * If we have bounce pages, free them, unless they're
374: * reserved for our exclusive use.
375: */
376: if ((cookie->id_flags & ID_HAS_BOUNCE) &&
377: (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
378: isadma_bounce_free_bouncebuf(t, map);
379:
380: cookie->id_flags &= ~ID_IS_BOUNCING;
381: cookie->id_buftype = ID_BUFTYPE_INVALID;
382:
383: /*
384: * Do the generic bits of the unload.
385: */
386: _bus_dmamap_unload(t, map);
387: }
388:
389: void
390: isadma_bounce_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
391: bus_size_t len, int ops)
392: {
393: struct isadma_bounce_cookie *cookie = map->_dm_cookie;
394:
395: /*
396: * Mixing PRE and POST operations is not allowed.
397: */
398: if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
399: (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
400: panic("isadma_bounce_dmamap_sync: mix PRE and POST");
401:
402: #ifdef DIAGNOSTIC
403: if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
404: if (offset >= map->dm_mapsize)
405: panic("isadma_bounce_dmamap_sync: bad offset");
406: if (len == 0 || (offset + len) > map->dm_mapsize)
407: panic("isadma_bounce_dmamap_sync: bad length");
408: }
409: #endif
410:
411: /*
412: * If we're not bouncing, just drain the write buffer
413: * and return.
414: */
415: if ((cookie->id_flags & ID_IS_BOUNCING) == 0) {
416: alpha_mb();
417: return;
418: }
419:
420: switch (cookie->id_buftype) {
421: case ID_BUFTYPE_LINEAR:
422: /*
423: * Nothing to do for pre-read.
424: */
425:
426: if (ops & BUS_DMASYNC_PREWRITE) {
427: /*
428: * Copy the caller's buffer to the bounce buffer.
429: */
430: memcpy((char *)cookie->id_bouncebuf + offset,
431: (char *)cookie->id_origbuf + offset, len);
432: }
433:
434: if (ops & BUS_DMASYNC_POSTREAD) {
435: /*
436: * Copy the bounce buffer to the caller's buffer.
437: */
438: memcpy((char *)cookie->id_origbuf + offset,
439: (char *)cookie->id_bouncebuf + offset, len);
440: }
441:
442: /*
443: * Nothing to do for post-write.
444: */
445: break;
446:
447: case ID_BUFTYPE_MBUF:
448: {
449: struct mbuf *m, *m0 = cookie->id_origbuf;
450: bus_size_t minlen, moff;
451:
452: /*
453: * Nothing to do for pre-read.
454: */
455:
456: if (ops & BUS_DMASYNC_PREWRITE) {
457: /*
458: * Copy the caller's buffer to the bounce buffer.
459: */
460: m_copydata(m0, offset, len,
461: (char *)cookie->id_bouncebuf + offset);
462: }
463:
464: if (ops & BUS_DMASYNC_POSTREAD) {
465: /*
466: * Copy the bounce buffer to the caller's buffer.
467: */
468: for (moff = offset, m = m0; m != NULL && len != 0;
469: m = m->m_next) {
470: /* Find the beginning mbuf. */
471: if (moff >= m->m_len) {
472: moff -= m->m_len;
473: continue;
474: }
475:
476: /*
477: * Now at the first mbuf to sync; nail
478: * each one until we have exhausted the
479: * length.
480: */
481: minlen = len < m->m_len - moff ?
482: len : m->m_len - moff;
483:
484: memcpy(mtod(m, caddr_t) + moff,
485: (char *)cookie->id_bouncebuf + offset,
486: minlen);
487:
488: moff = 0;
489: len -= minlen;
490: offset += minlen;
491: }
492: }
493:
494: /*
495: * Nothing to do for post-write.
496: */
497: break;
498: }
499:
500: case ID_BUFTYPE_UIO:
501: panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_UIO");
502: break;
503:
504: case ID_BUFTYPE_RAW:
505: panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_RAW");
506: break;
507:
508: case ID_BUFTYPE_INVALID:
509: panic("isadma_bounce_dmamap_sync: ID_BUFTYPE_INVALID");
510: break;
511:
512: default:
513: panic("isadma_bounce_dmamap_sync: unknown buffer type %d",
514: cookie->id_buftype);
515: }
516:
517: /* Drain the write buffer. */
518: alpha_mb();
519: }
520:
521: /*
522: * Allocate memory safe for ISA DMA.
523: */
524: int
525: isadma_bounce_dmamem_alloc(bus_dma_tag_t t, bus_size_t size,
526: bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
527: int nsegs, int *rsegs, int flags)
528: {
529: paddr_t high;
530:
531: if (avail_end > ISA_DMA_BOUNCE_THRESHOLD)
532: high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD);
533: else
534: high = trunc_page(avail_end);
535:
536: return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
537: segs, nsegs, rsegs, flags, 0, high));
538: }
539:
540: /**********************************************************************
541: * ISA DMA utility functions
542: **********************************************************************/
543:
544: int
545: isadma_bounce_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
546: bus_size_t size, int flags)
547: {
548: struct isadma_bounce_cookie *cookie = map->_dm_cookie;
549: int error = 0;
550:
551: cookie->id_bouncebuflen = round_page(size);
552: error = isadma_bounce_dmamem_alloc(t, cookie->id_bouncebuflen,
553: PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
554: map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
555: if (error)
556: goto out;
557: error = _bus_dmamem_map(t, cookie->id_bouncesegs,
558: cookie->id_nbouncesegs, cookie->id_bouncebuflen,
559: (caddr_t *)&cookie->id_bouncebuf, flags);
560:
561: out:
562: if (error) {
563: _bus_dmamem_free(t, cookie->id_bouncesegs,
564: cookie->id_nbouncesegs);
565: cookie->id_bouncebuflen = 0;
566: cookie->id_nbouncesegs = 0;
567: } else
568: cookie->id_flags |= ID_HAS_BOUNCE;
569:
570: return (error);
571: }
572:
573: void
574: isadma_bounce_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
575: {
576: struct isadma_bounce_cookie *cookie = map->_dm_cookie;
577:
578: _bus_dmamem_unmap(t, cookie->id_bouncebuf,
579: cookie->id_bouncebuflen);
580: _bus_dmamem_free(t, cookie->id_bouncesegs,
581: cookie->id_nbouncesegs);
582: cookie->id_bouncebuflen = 0;
583: cookie->id_nbouncesegs = 0;
584: cookie->id_flags &= ~ID_HAS_BOUNCE;
585: }
CVSweb