Annotation of sys/dev/pci/musycc.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: musycc.c,v 1.15 2006/03/25 22:41:46 djm Exp $ */
2:
3: /*
4: * Copyright (c) 2004,2005 Internet Business Solutions AG, Zurich, Switzerland
5: * Written by: Claudio Jeker <jeker@accoom.net>
6: *
7: * Permission to use, copy, modify, and distribute this software for any
8: * purpose with or without fee is hereby granted, provided that the above
9: * copyright notice and this permission notice appear in all copies.
10: *
11: * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12: * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13: * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14: * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15: * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16: * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17: * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18: */
19: #include "bpfilter.h"
20:
21: #include <sys/param.h>
22: #include <sys/types.h>
23:
24: #include <sys/device.h>
25: #include <sys/kernel.h>
26: #include <sys/limits.h>
27: #include <sys/malloc.h>
28: #include <sys/mbuf.h>
29: #include <sys/proc.h>
30: #include <sys/socket.h>
31: #include <sys/syslog.h>
32: #include <sys/systm.h>
33:
34: #include <machine/cpu.h>
35: #include <machine/bus.h>
36: #include <machine/intr.h>
37:
38: #include <net/if.h>
39: #include <net/if_media.h>
40: #include <net/if_sppp.h>
41:
42: #if NBPFILTER > 0
43: #include <net/bpf.h>
44: #endif
45:
46: #include <dev/pci/musyccvar.h>
47: #include <dev/pci/musyccreg.h>
48:
49: int musycc_alloc_groupdesc(struct musycc_softc *);
50: int musycc_alloc_intqueue(struct musycc_softc *);
51: int musycc_alloc_group(struct musycc_group *);
52: void musycc_free_groupdesc(struct musycc_softc *);
53: void musycc_free_intqueue(struct musycc_softc *);
54: void musycc_free_dmadesc(struct musycc_group *);
55: void musycc_free_group(struct musycc_group *);
56: void musycc_set_group(struct musycc_group *, int, int, int);
57: int musycc_set_tsmap(struct musycc_group *, struct channel_softc *, char);
58: int musycc_set_chandesc(struct musycc_group *, int, int, int);
59: void musycc_activate_channel(struct musycc_group *, int);
60: void musycc_state_engine(struct musycc_group *, int, enum musycc_event);
61:
62: struct dma_desc *musycc_dma_get(struct musycc_group *);
63: void musycc_dma_free(struct musycc_group *, struct dma_desc *);
64: int musycc_list_tx_init(struct musycc_group *, int, int);
65: int musycc_list_rx_init(struct musycc_group *, int, int);
66: void musycc_list_tx_free(struct musycc_group *, int);
67: void musycc_list_rx_free(struct musycc_group *, int);
68: void musycc_reinit_dma(struct musycc_group *, int);
69: int musycc_newbuf(struct musycc_group *, struct dma_desc *, struct mbuf *);
70: int musycc_encap(struct musycc_group *, struct mbuf *, int);
71:
72: void musycc_rxeom(struct musycc_group *, int, int);
73: void musycc_txeom(struct musycc_group *, int, int);
74: void musycc_kick(struct musycc_group *);
75: void musycc_sreq(struct musycc_group *, int, u_int32_t, int,
76: enum musycc_event);
77:
78: #ifndef ACCOOM_DEBUG
79: #define musycc_dump_group(n, x)
80: #define musycc_dump_desc(n, x)
81: #define musycc_dump_dma(n, x, y)
82: #else
83: int accoom_debug = 0;
84:
85: char *musycc_intr_print(u_int32_t);
86: void musycc_dump_group(int, struct musycc_group *);
87: void musycc_dump_desc(int, struct musycc_group *);
88: void musycc_dump_dma(int, struct musycc_group *, int);
89: #endif
90:
91: int
92: musycc_attach_common(struct musycc_softc *sc, u_int32_t portmap, u_int32_t mode)
93: {
94: struct musycc_group *mg;
95: int i, j;
96:
97: if (musycc_alloc_groupdesc(sc) == -1) {
98: printf(": couldn't alloc group descriptors\n");
99: return (-1);
100: }
101:
102: if (musycc_alloc_intqueue(sc) == -1) {
103: printf(": couldn't alloc interrupt queue\n");
104: musycc_free_groupdesc(sc);
105: return (-1);
106: }
107:
108: /*
109: * global configuration: set EBUS to sane defaults:
110: * intel mode, elapse = 3, blapse = 3, alapse = 3
111: * XXX XXX disable INTB for now
112: */
113: sc->mc_global_conf = (portmap & MUSYCC_CONF_PORTMAP) |
114: MUSYCC_CONF_MPUSEL | MUSYCC_CONF_ECKEN |
115: MUSYCC_CONF_ELAPSE_SET(3) | MUSYCC_CONF_ALAPSE_SET(3) |
116: MUSYCC_CONF_BLAPSE_SET(3) | MUSYCC_CONF_INTB;
117:
118: /* initialize group descriptors */
119: sc->mc_groups = (struct musycc_group *)malloc(sc->mc_ngroups *
120: sizeof(struct musycc_group), M_DEVBUF, M_NOWAIT);
121: if (sc->mc_groups == NULL) {
122: printf(": couldn't alloc group descriptors\n");
123: musycc_free_groupdesc(sc);
124: musycc_free_intqueue(sc);
125: return (-1);
126: }
127: bzero(sc->mc_groups, sc->mc_ngroups * sizeof(struct musycc_group));
128:
129: for (i = 0; i < sc->mc_ngroups; i++) {
130: mg = &sc->mc_groups[i];
131: mg->mg_hdlc = sc;
132: mg->mg_gnum = i;
133: mg->mg_port = i >> (portmap & MUSYCC_CONF_PORTMAP);
134: mg->mg_dmat = sc->mc_dmat;
135:
136: if (musycc_alloc_group(mg) == -1) {
137: printf(": couldn't alloc group structures\n");
138: for (j = 0; j < i; j++)
139: musycc_free_group(&sc->mc_groups[j]);
140: musycc_free_groupdesc(sc);
141: musycc_free_intqueue(sc);
142: return (-1);
143: }
144:
145: mg->mg_group = (struct musycc_grpdesc *)
146: (sc->mc_groupkva + MUSYCC_GROUPBASE(i));
147: bzero(mg->mg_group, sizeof(struct musycc_grpdesc));
148: musycc_set_group(mg, MUSYCC_GRCFG_POLL32, MUSYCC_MAXFRM_MAX,
149: MUSYCC_MAXFRM_MAX);
150: musycc_set_port(mg, mode);
151:
152: bus_dmamap_sync(sc->mc_dmat, sc->mc_cfgmap,
153: MUSYCC_GROUPBASE(i), sizeof(struct musycc_grpdesc),
154: BUS_DMASYNC_PREWRITE);
155: bus_space_write_4(sc->mc_st, sc->mc_sh, MUSYCC_GROUPBASE(i),
156: sc->mc_cfgmap->dm_segs[0].ds_addr + MUSYCC_GROUPBASE(i));
157: }
158:
159: /* Dual Address Cycle Base Pointer */
160: bus_space_write_4(sc->mc_st, sc->mc_sh, MUSYCC_DACB_PTR, 0);
161: /* Global Configuration Descriptor */
162: bus_space_write_4(sc->mc_st, sc->mc_sh, MUSYCC_GLOBALCONF,
163: sc->mc_global_conf);
164: /* Interrupt Queue Descriptor */
165: bus_space_write_4(sc->mc_st, sc->mc_sh, MUSYCC_INTQPTR,
166: sc->mc_intrqptr);
167: /*
168: * Interrupt Queue Length.
169: * NOTE: a value of 1 indicates a queue length of 2 descriptors!
170: */
171: bus_space_write_4(sc->mc_st, sc->mc_sh, MUSYCC_INTQLEN,
172: MUSYCC_INTLEN - 1);
173:
174: /* Configure groups, needs to be done only once per group */
175: for (i = 0; i < sc->mc_ngroups; i++) {
176: mg = &sc->mc_groups[i];
177: musycc_sreq(mg, 0, MUSYCC_SREQ_SET(5), MUSYCC_SREQ_BOTH,
178: EV_NULL);
179: mg->mg_loaded = 1;
180: }
181:
182: return (0);
183: }
184:
185: int
186: musycc_alloc_groupdesc(struct musycc_softc *sc)
187: {
188: /*
189: * Allocate per group/port shared memory.
190: * One big cunck of nports * 2048 bytes is allocated. This is
191: * done to ensure that all group structures are 2048 bytes aligned.
192: */
193: if (bus_dmamem_alloc(sc->mc_dmat, sc->mc_ngroups * 2048,
194: 2048, 0, sc->mc_cfgseg, 1, &sc->mc_cfgnseg, BUS_DMA_NOWAIT)) {
195: return (-1);
196: }
197: if (bus_dmamem_map(sc->mc_dmat, sc->mc_cfgseg, sc->mc_cfgnseg,
198: sc->mc_ngroups * 2048, &sc->mc_groupkva, BUS_DMA_NOWAIT)) {
199: bus_dmamem_free(sc->mc_dmat, sc->mc_cfgseg, sc->mc_cfgnseg);
200: return (-1);
201: }
202: /* create and load bus dma segment, one for all ports */
203: if (bus_dmamap_create(sc->mc_dmat, sc->mc_ngroups * 2048,
204: 1, sc->mc_ngroups * 2048, 0, BUS_DMA_NOWAIT, &sc->mc_cfgmap)) {
205: bus_dmamem_unmap(sc->mc_dmat, sc->mc_groupkva,
206: sc->mc_ngroups * 2048);
207: bus_dmamem_free(sc->mc_dmat, sc->mc_cfgseg, sc->mc_cfgnseg);
208: return (-1);
209: }
210: if (bus_dmamap_load(sc->mc_dmat, sc->mc_cfgmap, sc->mc_groupkva,
211: sc->mc_ngroups * 2048, NULL, BUS_DMA_NOWAIT)) {
212: musycc_free_groupdesc(sc);
213: return (-1);
214: }
215:
216: return (0);
217: }
218:
219: int
220: musycc_alloc_intqueue(struct musycc_softc *sc)
221: {
222: /*
223: * allocate interrupt queue, use one page for the queue
224: */
225: if (bus_dmamem_alloc(sc->mc_dmat, sizeof(struct musycc_intdesc), 4, 0,
226: sc->mc_intrseg, 1, &sc->mc_intrnseg, BUS_DMA_NOWAIT)) {
227: return (-1);
228: }
229: if (bus_dmamem_map(sc->mc_dmat, sc->mc_intrseg, sc->mc_intrnseg,
230: sizeof(struct musycc_intdesc), (caddr_t *)&sc->mc_intrd,
231: BUS_DMA_NOWAIT)) {
232: bus_dmamem_free(sc->mc_dmat, sc->mc_intrseg, sc->mc_intrnseg);
233: return (-1);
234: }
235:
236: /* create and load bus dma segment */
237: if (bus_dmamap_create(sc->mc_dmat, sizeof(struct musycc_intdesc),
238: 1, sizeof(struct musycc_intdesc), 0, BUS_DMA_NOWAIT,
239: &sc->mc_intrmap)) {
240: bus_dmamem_unmap(sc->mc_dmat, (caddr_t)sc->mc_intrd,
241: sizeof(struct musycc_intdesc));
242: bus_dmamem_free(sc->mc_dmat, sc->mc_intrseg, sc->mc_intrnseg);
243: return (-1);
244: }
245: if (bus_dmamap_load(sc->mc_dmat, sc->mc_intrmap, sc->mc_intrd,
246: sizeof(struct musycc_intdesc), NULL, BUS_DMA_NOWAIT)) {
247: musycc_free_intqueue(sc);
248: return (-1);
249: }
250:
251: /* initialize the interrupt queue pointer */
252: sc->mc_intrqptr = sc->mc_intrmap->dm_segs[0].ds_addr +
253: offsetof(struct musycc_intdesc, md_intrq[0]);
254:
255: return (0);
256: }
257:
258: int
259: musycc_alloc_group(struct musycc_group *mg)
260: {
261: struct dma_desc *dd;
262: int j;
263:
264: /* Allocate per group dma memory */
265: if (bus_dmamem_alloc(mg->mg_dmat, MUSYCC_DMA_MAPSIZE,
266: PAGE_SIZE, 0, mg->mg_listseg, 1, &mg->mg_listnseg,
267: BUS_DMA_NOWAIT))
268: return (-1);
269: if (bus_dmamem_map(mg->mg_dmat, mg->mg_listseg, mg->mg_listnseg,
270: MUSYCC_DMA_MAPSIZE, &mg->mg_listkva, BUS_DMA_NOWAIT)) {
271: bus_dmamem_free(mg->mg_dmat, mg->mg_listseg, mg->mg_listnseg);
272: return (-1);
273: }
274:
275: /* create and load bus dma segment */
276: if (bus_dmamap_create(mg->mg_dmat, MUSYCC_DMA_MAPSIZE, 1,
277: MUSYCC_DMA_MAPSIZE, 0, BUS_DMA_NOWAIT, &mg->mg_listmap)) {
278: bus_dmamem_unmap(mg->mg_dmat, mg->mg_listkva,
279: MUSYCC_DMA_MAPSIZE);
280: bus_dmamem_free(mg->mg_dmat, mg->mg_listseg, mg->mg_listnseg);
281: return (-1);
282: }
283: if (bus_dmamap_load(mg->mg_dmat, mg->mg_listmap, mg->mg_listkva,
284: MUSYCC_DMA_MAPSIZE, NULL, BUS_DMA_NOWAIT)) {
285: musycc_free_dmadesc(mg);
286: return (-1);
287: }
288:
289: /*
290: * Create spare maps for musycc_start and musycc_newbuf.
291: * Limit the dma queue to MUSYCC_DMA_SIZE entries even though there
292: * is no actual hard limit from the chip.
293: */
294: if (bus_dmamap_create(mg->mg_dmat, MCLBYTES, MUSYCC_DMA_SIZE, MCLBYTES,
295: 0, BUS_DMA_NOWAIT, &mg->mg_tx_sparemap) != 0) {
296: musycc_free_dmadesc(mg);
297: return (-1);
298: }
299: if (bus_dmamap_create(mg->mg_dmat, MCLBYTES, MUSYCC_DMA_SIZE, MCLBYTES,
300: 0, BUS_DMA_NOWAIT, &mg->mg_rx_sparemap) != 0) {
301: bus_dmamap_destroy(mg->mg_dmat, mg->mg_tx_sparemap);
302: musycc_free_dmadesc(mg);
303: return (-1);
304: }
305:
306: mg->mg_dma_pool = (struct dma_desc *)mg->mg_listkva;
307: bzero(mg->mg_dma_pool,
308: MUSYCC_DMA_CNT * sizeof(struct dma_desc));
309:
310: /* add all descriptors to the freelist */
311: for (j = 0; j < MUSYCC_DMA_CNT; j++) {
312: dd = &mg->mg_dma_pool[j];
313: /* initalize, same as for spare maps */
314: if (bus_dmamap_create(mg->mg_dmat, MCLBYTES, MUSYCC_DMA_SIZE,
315: MCLBYTES, 0, BUS_DMA_NOWAIT, &dd->map)) {
316: musycc_free_group(mg);
317: return (-1);
318: }
319: /* link */
320: dd->nextdesc = mg->mg_freelist;
321: mg->mg_freelist = dd;
322: mg->mg_freecnt++;
323: }
324:
325: return (0);
326: }
327:
328: void
329: musycc_free_groupdesc(struct musycc_softc *sc)
330: {
331: bus_dmamap_destroy(sc->mc_dmat, sc->mc_cfgmap);
332: bus_dmamem_unmap(sc->mc_dmat, sc->mc_groupkva,
333: sc->mc_ngroups * 2048);
334: bus_dmamem_free(sc->mc_dmat, sc->mc_cfgseg, sc->mc_cfgnseg);
335: }
336:
337: void
338: musycc_free_intqueue(struct musycc_softc *sc)
339: {
340: bus_dmamap_destroy(sc->mc_dmat, sc->mc_intrmap);
341: bus_dmamem_unmap(sc->mc_dmat, (caddr_t)sc->mc_intrd,
342: sizeof(struct musycc_intdesc));
343: bus_dmamem_free(sc->mc_dmat, sc->mc_intrseg, sc->mc_intrnseg);
344: }
345:
346: void
347: musycc_free_dmadesc(struct musycc_group *mg)
348: {
349: bus_dmamap_destroy(mg->mg_dmat, mg->mg_listmap);
350: bus_dmamem_unmap(mg->mg_dmat, mg->mg_listkva,
351: MUSYCC_DMA_MAPSIZE);
352: bus_dmamem_free(mg->mg_dmat, mg->mg_listseg, mg->mg_listnseg);
353: }
354:
355: void
356: musycc_free_group(struct musycc_group *mg)
357: {
358: bus_dmamap_destroy(mg->mg_dmat, mg->mg_tx_sparemap);
359: bus_dmamap_destroy(mg->mg_dmat, mg->mg_tx_sparemap);
360: /* XXX dma descriptors ? */
361: musycc_free_dmadesc(mg);
362: mg->mg_dma_pool = NULL;
363: mg->mg_freelist = NULL;
364: mg->mg_freecnt = 0;
365: }
366:
367: void
368: musycc_set_group(struct musycc_group *mg, int poll, int maxa, int maxb)
369: {
370: /* set global conf and interrupt descriptor */
371: mg->mg_group->global_conf = htole32(mg->mg_hdlc->mc_global_conf);
372: /*
373: * Interrupt Queue and Length.
374: * NOTE: a value of 1 indicates the queue length of 2 descriptors!
375: */
376: mg->mg_group->int_queuep = htole32(mg->mg_hdlc->mc_intrqptr);
377: mg->mg_group->int_queuelen = htole32(MUSYCC_INTLEN - 1);
378:
379: /* group config */
380: mg->mg_group->group_conf = htole32(MUSYCC_GRCFG_RXENBL |
381: MUSYCC_GRCFG_TXENBL | MUSYCC_GRCFG_SUBDSBL |
382: MUSYCC_GRCFG_MSKCOFA | MUSYCC_GRCFG_MSKOOF |
383: MUSYCC_GRCFG_MCENBL | (poll & MUSYCC_GRCFG_POLL64));
384:
385: /* memory protection, not supported by device */
386:
387: /* message length config, preinit with useful data */
388: /* this is currently not used and the max is limited to 4094 bytes */
389: mg->mg_group->msglen_conf = htole32(maxa);
390: mg->mg_group->msglen_conf |= htole32(maxb << MUSYCC_MAXFRM2_SHIFT);
391: }
392:
393: void
394: musycc_set_port(struct musycc_group *mg, int mode)
395: {
396: /*
397: * All signals trigger on falling edge only exception is TSYNC
398: * which triggers on rising edge. For the framer TSYNC is set to
399: * falling edge too but Musycc needs rising edge or everything gets
400: * off by one. Don't three-state TX (not needed).
401: */
402: mg->mg_group->port_conf = htole32(MUSYCC_PORT_TSYNC_EDGE |
403: MUSYCC_PORT_TRITX | (mode & MUSYCC_PORT_MODEMASK));
404:
405: if (mg->mg_loaded)
406: musycc_sreq(mg, 0, MUSYCC_SREQ_SET(21), MUSYCC_SREQ_RX,
407: EV_NULL);
408: }
409:
410: /*
411: * Channel specifc calls
412: */
413: int
414: musycc_set_tsmap(struct musycc_group *mg, struct channel_softc *cc, char slot)
415: {
416: int i, nslots = 0, off, scale;
417: u_int32_t tslots = cc->cc_tslots;
418:
419: ACCOOM_PRINTF(1, ("%s: musycc_set_tsmap %08x slot %c\n",
420: cc->cc_ifp->if_xname, tslots, slot));
421:
422: switch (slot) {
423: case 'A': /* single port, non interleaved */
424: off = 0;
425: scale = 1;
426: break;
427: case 'a': /* dual port, interleaved */
428: case 'b':
429: off = slot - 'a';
430: scale = 2;
431: break;
432: case '1': /* possible quad port, interleaved */
433: case '2':
434: case '3':
435: case '4':
436: off = slot - '1';
437: scale = 4;
438: break;
439: default:
440: /* impossible */
441: log(LOG_ERR, "%s: accessing unsupported slot %c",
442: cc->cc_ifp->if_xname, slot);
443: return (-1);
444: }
445:
446: /*
447: * setup timeslot map but first make sure no timeslot is already used
448: * note: 56kbps mode for T1-SF needs to be set in here
449: * note2: if running with port mapping the other group needs to be
450: * checked too or we may get funny results. Currenly not possible
451: * because of the slot offsets (odd, even slots).
452: */
453: for (i = 0; i < sizeof(u_int32_t) * 8; i++)
454: if (tslots & (1 << i))
455: if (mg->mg_group->tx_tsmap[i * scale + off] &
456: MUSYCC_TSLOT_ENABLED ||
457: mg->mg_group->rx_tsmap[i * scale + off] &
458: MUSYCC_TSLOT_ENABLED)
459: return (0);
460:
461: for (i = 0; i < sizeof(u_int32_t) * 8; i++)
462: if (tslots & (1 << i)) {
463: nslots++;
464: mg->mg_group->tx_tsmap[i * scale + off] =
465: MUSYCC_TSLOT_CHAN(cc->cc_channel) |
466: MUSYCC_TSLOT_ENABLED;
467: mg->mg_group->rx_tsmap[i * scale + off] =
468: MUSYCC_TSLOT_CHAN(cc->cc_channel) |
469: MUSYCC_TSLOT_ENABLED;
470: }
471:
472: return (nslots);
473: }
474:
475: int
476: musycc_set_chandesc(struct musycc_group *mg, int chan, int nslots, int proto)
477: {
478: u_int64_t mask = ULLONG_MAX;
479: int idx, n;
480:
481: ACCOOM_PRINTF(1, ("%s: musycc_set_chandesc nslots %d proto %d\n",
482: mg->mg_channels[chan]->cc_ifp->if_xname, nslots, proto));
483:
484: if (nslots == 0 || nslots > 32)
485: return (EINVAL);
486:
487: n = 64 - 2 * nslots;
488: mask >>= n;
489:
490: for (idx = 0; idx <= n; idx += 2)
491: if (!(mg->mg_fifomask & mask << idx))
492: break;
493:
494: if (idx > n)
495: return (EBUSY);
496:
497: mg->mg_fifomask |= mask << idx;
498:
499: /* setup channel descriptor */
500: mg->mg_group->tx_cconf[chan] = htole32(MUSYCC_CHAN_BUFIDX_SET(idx) |
501: MUSYCC_CHAN_BUFLEN_SET(nslots * 2 - 1) |
502: MUSYCC_CHAN_PROTO_SET(proto));
503: mg->mg_group->rx_cconf[chan] = htole32(MUSYCC_CHAN_BUFIDX_SET(idx) |
504: MUSYCC_CHAN_BUFLEN_SET(nslots * 2 - 1) |
505: MUSYCC_CHAN_MSKIDLE | MUSYCC_CHAN_MSKSUERR | MUSYCC_CHAN_MSKSINC |
506: MUSYCC_CHAN_MSKSDEC | MUSYCC_CHAN_MSKSFILT |
507: MUSYCC_CHAN_PROTO_SET(proto));
508:
509: return (0);
510: }
511:
512: int
513: musycc_init_channel(struct channel_softc *cc, char slot)
514: {
515: struct musycc_group *mg;
516: struct ifnet *ifp = cc->cc_ifp;
517: int nslots, rv, s;
518:
519: if (cc->cc_state == CHAN_FLOAT)
520: return (ENOTTY);
521: mg = cc->cc_group;
522:
523: ACCOOM_PRINTF(2, ("%s: musycc_init_channel [state %d] slot %c\n",
524: cc->cc_ifp->if_xname, cc->cc_state, slot));
525:
526: if (cc->cc_state != CHAN_IDLE) {
527: musycc_sreq(mg, cc->cc_channel, MUSYCC_SREQ_SET(9),
528: MUSYCC_SREQ_BOTH, EV_STOP);
529: tsleep(cc, PZERO | PCATCH, "musycc", hz);
530: if (cc->cc_state != CHAN_IDLE) {
531: ACCOOM_PRINTF(0, ("%s: failed to reset channel\n",
532: cc->cc_ifp->if_xname));
533: return (EIO);
534: }
535: }
536:
537: s = splnet();
538: /* setup timeslot map */
539: nslots = musycc_set_tsmap(mg, cc, slot);
540: if (nslots == -1) {
541: rv = EINVAL;
542: goto fail;
543: } else if (nslots == 0) {
544: rv = EBUSY;
545: goto fail;
546: }
547:
548: if ((rv = musycc_set_chandesc(mg, cc->cc_channel, nslots,
549: MUSYCC_PROTO_HDLC16)))
550: goto fail;
551:
552: /* setup tx DMA chain */
553: musycc_list_tx_init(mg, cc->cc_channel, MUSYCC_DMA_SIZE);
554: /* setup rx DMA chain */
555: if ((rv = musycc_list_rx_init(mg, cc->cc_channel, MUSYCC_DMA_SIZE))) {
556: ACCOOM_PRINTF(0, ("%s: initialization failed: "
557: "no memory for rx buffers\n", cc->cc_ifp->if_xname));
558: goto fail;
559: }
560:
561: /* IFF_RUNNING set by sppp_ioctl() */
562: ifp->if_flags &= ~IFF_OACTIVE;
563:
564: cc->cc_state = CHAN_TRANSIENT;
565: splx(s);
566:
567: musycc_dump_group(3, mg);
568: musycc_activate_channel(mg, cc->cc_channel);
569: tsleep(cc, PZERO | PCATCH, "musycc", hz);
570:
571: /*
572: * XXX we could actually check if the activation of the channels was
573: * successful but what type of error should we return?
574: */
575: return (0);
576:
577: fail:
578: splx(s);
579: cc->cc_state = CHAN_IDLE; /* force idle state */
580: musycc_free_channel(mg, cc->cc_channel);
581: return (rv);
582: }
583:
584: void
585: musycc_activate_channel(struct musycc_group *mg, int chan)
586: {
587: ACCOOM_PRINTF(2, ("%s: musycc_activate_channel\n",
588: mg->mg_channels[chan]->cc_ifp->if_xname));
589: musycc_sreq(mg, chan, MUSYCC_SREQ_SET(26), MUSYCC_SREQ_BOTH,
590: EV_NULL);
591: musycc_sreq(mg, chan, MUSYCC_SREQ_SET(24), MUSYCC_SREQ_BOTH,
592: EV_NULL);
593: musycc_sreq(mg, chan, MUSYCC_SREQ_SET(8), MUSYCC_SREQ_BOTH,
594: EV_ACTIVATE);
595: }
596:
597: void
598: musycc_stop_channel(struct channel_softc *cc)
599: {
600: struct musycc_group *mg;
601:
602: if (cc->cc_state == CHAN_FLOAT) {
603: /* impossible */
604: log(LOG_ERR, "%s: unexpected state in musycc_stop_channel",
605: cc->cc_ifp->if_xname);
606: cc->cc_state = CHAN_IDLE; /* reset */
607: musycc_free_channel(mg, cc->cc_channel);
608: return;
609: }
610:
611: mg = cc->cc_group;
612: ACCOOM_PRINTF(2, ("%s: musycc_stop_channel\n", cc->cc_ifp->if_xname));
613: musycc_sreq(mg, cc->cc_channel, MUSYCC_SREQ_SET(9), MUSYCC_SREQ_BOTH,
614: EV_STOP);
615: tsleep(cc, PZERO | PCATCH, "musycc", hz);
616: }
617:
618: void
619: musycc_free_channel(struct musycc_group *mg, int chan)
620: {
621: u_int64_t mask = ULLONG_MAX;
622: int i, idx, s, slots;
623:
624: ACCOOM_PRINTF(2, ("%s: musycc_free_channel\n",
625: mg->mg_channels[chan]->cc_ifp->if_xname));
626:
627: s = splnet();
628: /* Clear the timeout timer. */
629: mg->mg_channels[chan]->cc_ifp->if_timer = 0;
630:
631: /* clear timeslot map */
632: for (i = 0; i < 128; i++) {
633: if (mg->mg_group->tx_tsmap[i] & MUSYCC_TSLOT_ENABLED)
634: if ((mg->mg_group->tx_tsmap[i] & MUSYCC_TSLOT_MASK) ==
635: chan)
636: mg->mg_group->tx_tsmap[i] = 0;
637: if (mg->mg_group->rx_tsmap[i] & MUSYCC_TSLOT_ENABLED)
638: if ((mg->mg_group->rx_tsmap[i] & MUSYCC_TSLOT_MASK) ==
639: chan)
640: mg->mg_group->rx_tsmap[i] = 0;
641: }
642:
643: /* clear channel descriptor, especially free FIFO space */
644: idx = MUSYCC_CHAN_BUFIDX_GET(letoh32(mg->mg_group->tx_cconf[chan]));
645: slots = MUSYCC_CHAN_BUFLEN_GET(letoh32(mg->mg_group->tx_cconf[chan]));
646: slots = (slots + 1) / 2;
647: mask >>= 64 - 2 * slots;
648: mask <<= idx;
649: mg->mg_fifomask &= ~mask;
650: mg->mg_group->tx_cconf[chan] = 0;
651: mg->mg_group->rx_cconf[chan] = 0;
652:
653: /* free dma rings */
654: musycc_list_rx_free(mg, chan);
655: musycc_list_tx_free(mg, chan);
656:
657: splx(s);
658:
659: /* update chip info with sreq */
660: musycc_sreq(mg, chan, MUSYCC_SREQ_SET(24), MUSYCC_SREQ_BOTH,
661: EV_NULL);
662: musycc_sreq(mg, chan, MUSYCC_SREQ_SET(26), MUSYCC_SREQ_BOTH,
663: EV_IDLE);
664: }
665:
666: void
667: musycc_state_engine(struct musycc_group *mg, int chan, enum musycc_event ev)
668: {
669: enum musycc_state state;
670:
671: if (mg->mg_channels[chan] == NULL)
672: return;
673:
674: state = mg->mg_channels[chan]->cc_state;
675:
676: ACCOOM_PRINTF(2, ("%s: musycc_state_engine state %d event %d\n",
677: mg->mg_channels[chan]->cc_ifp->if_xname, state, ev));
678:
679: switch (ev) {
680: case EV_NULL:
681: /* no state change */
682: return;
683: case EV_ACTIVATE:
684: state = CHAN_RUNNING;
685: break;
686: case EV_STOP:
687: /* channel disabled now free dma rings et al. */
688: mg->mg_channels[chan]->cc_state = CHAN_TRANSIENT;
689: musycc_free_channel(mg, chan);
690: return;
691: case EV_IDLE:
692: state = CHAN_IDLE;
693: break;
694: case EV_WATCHDOG:
695: musycc_reinit_dma(mg, chan);
696: return;
697: }
698:
699: mg->mg_channels[chan]->cc_state = state;
700: wakeup(mg->mg_channels[chan]);
701: }
702:
703: /*
704: * DMA handling functions
705: */
706:
707: struct dma_desc *
708: musycc_dma_get(struct musycc_group *mg)
709: {
710: struct dma_desc *dd;
711:
712: splassert(IPL_NET);
713:
714: if (mg->mg_freecnt == 0)
715: return (NULL);
716: mg->mg_freecnt--;
717: dd = mg->mg_freelist;
718: mg->mg_freelist = dd->nextdesc;
719: /* clear some important data */
720: dd->nextdesc = NULL;
721: dd->mbuf = NULL;
722:
723: return (dd);
724: }
725:
726: void
727: musycc_dma_free(struct musycc_group *mg, struct dma_desc *dd)
728: {
729: splassert(IPL_NET);
730:
731: dd->nextdesc = mg->mg_freelist;
732: mg->mg_freelist = dd;
733: mg->mg_freecnt++;
734: }
735:
736: /*
737: * Initialize the transmit descriptors. Acctually they are left empty until
738: * a packet comes in.
739: */
740: int
741: musycc_list_tx_init(struct musycc_group *mg, int c, int size)
742: {
743: struct musycc_dma_data *md;
744: struct dma_desc *dd;
745: bus_addr_t base;
746: int i;
747:
748: splassert(IPL_NET);
749: ACCOOM_PRINTF(2, ("musycc_list_tx_init\n"));
750: md = &mg->mg_dma_d[c];
751: md->tx_pend = NULL;
752: md->tx_cur = NULL;
753: md->tx_cnt = size;
754: md->tx_pkts = 0;
755:
756: base = mg->mg_listmap->dm_segs[0].ds_addr;
757: for (i = 0; i < md->tx_cnt; i++) {
758: dd = musycc_dma_get(mg);
759: if (dd == NULL) {
760: ACCOOM_PRINTF(0, ("musycc_list_tx_init: "
761: "out of dma_desc\n"));
762: musycc_list_tx_free(mg, c);
763: return (ENOBUFS);
764: }
765: dd->status = 0 /* MUSYCC_STATUS_NOPOLL */;
766: dd->data = 0;
767: if (md->tx_cur) {
768: md->tx_cur->nextdesc = dd;
769: md->tx_cur->next = htole32(base + (caddr_t)dd -
770: mg->mg_listkva);
771: md->tx_cur = dd;
772: } else
773: md->tx_pend = md->tx_cur = dd;
774: }
775:
776: dd->nextdesc = md->tx_pend;
777: dd->next = htole32(base + (caddr_t)md->tx_pend - mg->mg_listkva);
778: md->tx_pend = dd;
779:
780: mg->mg_group->tx_headp[c] = htole32(base + (caddr_t)dd -
781: mg->mg_listkva);
782:
783: bus_dmamap_sync(mg->mg_dmat, mg->mg_listmap, 0, MUSYCC_DMA_MAPSIZE,
784: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
785:
786: return (0);
787: }
788:
789:
790: /*
791: * Initialize the RX descriptors and allocate mbufs for them. Note that
792: * we arrange the descriptors in a closed ring, so that the last descriptor
793: * points back to the first.
794: */
795: int
796: musycc_list_rx_init(struct musycc_group *mg, int c, int size)
797: {
798: struct musycc_dma_data *md;
799: struct dma_desc *dd = NULL, *last;
800: bus_addr_t base;
801: int i;
802:
803: splassert(IPL_NET);
804: ACCOOM_PRINTF(2, ("musycc_list_rx_init\n"));
805: md = &mg->mg_dma_d[c];
806: md->rx_cnt = size;
807:
808: base = mg->mg_listmap->dm_segs[0].ds_addr;
809: for (i = 0; i < size; i++) {
810: dd = musycc_dma_get(mg);
811: if (dd == NULL) {
812: ACCOOM_PRINTF(0, ("musycc_list_rx_init: "
813: "out of dma_desc\n"));
814: musycc_list_rx_free(mg, c);
815: return (ENOBUFS);
816: }
817: if (musycc_newbuf(mg, dd, NULL) == ENOBUFS) {
818: ACCOOM_PRINTF(0, ("musycc_list_rx_init: "
819: "out of mbufs\n"));
820: musycc_list_rx_free(mg, c);
821: return (ENOBUFS);
822: }
823: if (md->rx_prod) {
824: md->rx_prod->nextdesc = dd;
825: md->rx_prod->next = htole32(base + (caddr_t)dd -
826: mg->mg_listkva);
827: md->rx_prod = dd;
828: } else
829: last = md->rx_prod = dd;
830: }
831:
832: dd->nextdesc = last;
833: dd->next = htole32(base + (caddr_t)last - mg->mg_listkva);
834:
835: mg->mg_group->rx_headp[c] = htole32(base + (caddr_t)dd -
836: mg->mg_listkva);
837:
838: bus_dmamap_sync(mg->mg_dmat, mg->mg_listmap, 0, MUSYCC_DMA_MAPSIZE,
839: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
840:
841: return (0);
842: }
843:
844: void
845: musycc_list_tx_free(struct musycc_group *mg, int c)
846: {
847: struct musycc_dma_data *md;
848: struct dma_desc *dd, *tmp;
849:
850: md = &mg->mg_dma_d[c];
851:
852: splassert(IPL_NET);
853: ACCOOM_PRINTF(2, ("musycc_list_tx_free\n"));
854: dd = md->tx_pend;
855: do {
856: if (dd == NULL)
857: break;
858: if (dd->map->dm_nsegs != 0) {
859: bus_dmamap_t map = dd->map;
860:
861: bus_dmamap_unload(mg->mg_dmat, map);
862: }
863: if (dd->mbuf != NULL) {
864: m_freem(dd->mbuf);
865: dd->mbuf = NULL;
866: }
867: tmp = dd;
868: dd = dd->nextdesc;
869: musycc_dma_free(mg, tmp);
870: } while (dd != md->tx_pend);
871: md->tx_pend = md->tx_cur = NULL;
872: md->tx_cnt = md->tx_use = md->tx_pkts = 0;
873: }
874:
875: void
876: musycc_list_rx_free(struct musycc_group *mg, int c)
877: {
878: struct musycc_dma_data *md;
879: struct dma_desc *dd, *tmp;
880:
881: md = &mg->mg_dma_d[c];
882:
883: splassert(IPL_NET);
884: ACCOOM_PRINTF(2, ("musycc_list_rx_free\n"));
885: dd = md->rx_prod;
886: do {
887: if (dd == NULL)
888: break;
889: if (dd->map->dm_nsegs != 0) {
890: bus_dmamap_t map = dd->map;
891:
892: bus_dmamap_unload(mg->mg_dmat, map);
893: }
894: if (dd->mbuf != NULL) {
895: m_freem(dd->mbuf);
896: dd->mbuf = NULL;
897: }
898: tmp = dd;
899: dd = dd->nextdesc;
900: musycc_dma_free(mg, tmp);
901: } while (dd != md->rx_prod);
902: md->rx_prod = NULL;
903: md->rx_cnt = 0;
904: }
905:
906: /* only used by the watchdog timeout */
907: void
908: musycc_reinit_dma(struct musycc_group *mg, int c)
909: {
910: int s;
911:
912: s = splnet();
913:
914: musycc_list_tx_free(mg, c);
915: musycc_list_rx_free(mg, c);
916:
917: /* setup tx & rx DMA chain */
918: if (musycc_list_tx_init(mg, c, MUSYCC_DMA_SIZE) ||
919: musycc_list_rx_init(mg, c, MUSYCC_DMA_SIZE)) {
920: log(LOG_ERR, "%s: Failed to malloc memory\n",
921: mg->mg_channels[c]->cc_ifp->if_xname);
922: musycc_free_channel(mg, c);
923: }
924: splx(s);
925:
926: musycc_activate_channel(mg, c);
927: }
928:
929: /*
930: * Initialize an RX descriptor and attach an mbuf cluster.
931: */
932: int
933: musycc_newbuf(struct musycc_group *mg, struct dma_desc *c, struct mbuf *m)
934: {
935: struct mbuf *m_new = NULL;
936: bus_dmamap_t map;
937:
938: if (m == NULL) {
939: MGETHDR(m_new, M_DONTWAIT, MT_DATA);
940: if (m_new == NULL)
941: return (ENOBUFS);
942:
943: MCLGET(m_new, M_DONTWAIT);
944: if (!(m_new->m_flags & M_EXT)) {
945: m_freem(m_new);
946: return (ENOBUFS);
947: }
948: m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
949: } else {
950: m_new = m;
951: m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
952: m_new->m_data = m_new->m_ext.ext_buf;
953: }
954:
955: if (bus_dmamap_load(mg->mg_dmat, mg->mg_rx_sparemap,
956: mtod(m_new, caddr_t), m_new->m_pkthdr.len, NULL,
957: BUS_DMA_NOWAIT) != 0) {
958: ACCOOM_PRINTF(0, ("%s: rx load failed\n",
959: mg->mg_hdlc->mc_dev.dv_xname));
960: m_freem(m_new);
961: return (ENOBUFS);
962: }
963: map = c->map;
964: c->map = mg->mg_rx_sparemap;
965: mg->mg_rx_sparemap = map;
966:
967: bus_dmamap_sync(mg->mg_dmat, c->map, 0, c->map->dm_mapsize,
968: BUS_DMASYNC_PREREAD);
969:
970: c->mbuf = m_new;
971: c->data = htole32(c->map->dm_segs[0].ds_addr);
972: c->status = htole32(MUSYCC_STATUS_NOPOLL |
973: MUSYCC_STATUS_LEN(m_new->m_pkthdr.len));
974:
975: bus_dmamap_sync(mg->mg_dmat, mg->mg_listmap,
976: ((caddr_t)c - mg->mg_listkva), sizeof(struct dma_desc),
977: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
978:
979: return (0);
980: }
981:
982: /*
983: * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
984: * pointers to the fragment pointers.
985: */
986: int
987: musycc_encap(struct musycc_group *mg, struct mbuf *m_head, int c)
988: {
989: struct dma_desc *cur, *tmp;
990: bus_dmamap_t map;
991: bus_addr_t base;
992: u_int32_t status;
993: int i;
994:
995: splassert(IPL_NET);
996:
997: map = mg->mg_tx_sparemap;
998: if (bus_dmamap_load_mbuf(mg->mg_dmat, map, m_head,
999: BUS_DMA_NOWAIT) != 0) {
1000: ACCOOM_PRINTF(0, ("%s: musycc_encap: dmamap_load failed\n",
1001: mg->mg_channels[c]->cc_ifp->if_xname));
1002: return (ENOBUFS);
1003: }
1004:
1005: cur = mg->mg_dma_d[c].tx_cur;
1006: base = mg->mg_listmap->dm_segs[0].ds_addr;
1007:
1008: if (map->dm_nsegs + mg->mg_dma_d[c].tx_use >= mg->mg_dma_d[c].tx_cnt) {
1009: ACCOOM_PRINTF(1, ("%s: tx out of dma bufs\n",
1010: mg->mg_channels[c]->cc_ifp->if_xname));
1011: return (ENOBUFS);
1012: }
1013:
1014: i = 0;
1015: while (i < map->dm_nsegs) {
1016: status = /* MUSYCC_STATUS_NOPOLL | */
1017: MUSYCC_STATUS_LEN(map->dm_segs[i].ds_len);
1018: if (cur != mg->mg_dma_d[c].tx_cur)
1019: status |= MUSYCC_STATUS_OWNER;
1020:
1021: cur->status = htole32(status);
1022: cur->data = htole32(map->dm_segs[i].ds_addr);
1023:
1024: bus_dmamap_sync(mg->mg_dmat, mg->mg_listmap,
1025: ((caddr_t)cur - mg->mg_listkva), sizeof(struct dma_desc),
1026: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1027:
1028: if (++i >= map->dm_nsegs)
1029: break;
1030: cur = cur->nextdesc;
1031: }
1032:
1033: bus_dmamap_sync(mg->mg_dmat, map, 0, map->dm_mapsize,
1034: BUS_DMASYNC_PREWRITE);
1035:
1036: cur->mbuf = m_head;
1037: mg->mg_tx_sparemap = cur->map;
1038: cur->map = map;
1039: cur->status |= htole32(MUSYCC_STATUS_EOM);
1040: tmp = mg->mg_dma_d[c].tx_cur;
1041: mg->mg_dma_d[c].tx_cur = cur->nextdesc;
1042: mg->mg_dma_d[c].tx_use += i;
1043: mg->mg_dma_d[c].tx_pkts++;
1044:
1045: /*
1046: * Last but not least, flag the buffer if the buffer is flagged to
1047: * early, it may happen, that the buffer is already transmitted
1048: * before we changed all relevant variables.
1049: */
1050: tmp->status |= htole32(MUSYCC_STATUS_OWNER);
1051: #if 0
1052: /* check for transmited packets NO POLLING mode only */
1053: /*
1054: * Note: a bug in the HDLC chip seems to make it impossible to use
1055: * no polling mode.
1056: */
1057: musycc_txeom(mg, c);
1058: if (mg->mg_dma_d[c].tx_pend == tmp) {
1059: /* and restart as needed */
1060: printf("%s: tx needs kick\n",
1061: mg->mg_channels[c]->cc_ifp->if_xname);
1062: mg->mg_group->tx_headp[c] = htole32(base +
1063: (caddr_t)mg->mg_dma_d[c].tx_pend - mg->mg_listkva);
1064:
1065: musycc_sreq(mg, c, MUSYCC_SREQ_SET(8), MUSYCC_SREQ_TX);
1066: }
1067: #endif
1068:
1069: return (0);
1070: }
1071:
1072:
1073: /*
1074: * API towards the kernel
1075: */
1076:
1077: /* start transmit of new network buffer */
1078: void
1079: musycc_start(struct ifnet *ifp)
1080: {
1081: struct musycc_group *mg;
1082: struct channel_softc *cc;
1083: struct mbuf *m = NULL;
1084: int s;
1085:
1086: cc = ifp->if_softc;
1087: mg = cc->cc_group;
1088:
1089: ACCOOM_PRINTF(3, ("musycc_start\n"));
1090: if (cc->cc_state != CHAN_RUNNING)
1091: return;
1092: if (ifp->if_flags & IFF_OACTIVE)
1093: return;
1094: if (sppp_isempty(ifp))
1095: return;
1096:
1097: s = splnet();
1098: while ((m = sppp_pick(ifp)) != NULL) {
1099: if (musycc_encap(mg, m, cc->cc_channel)) {
1100: ifp->if_flags |= IFF_OACTIVE;
1101: break;
1102: }
1103:
1104: #if NBPFILTER > 0
1105: if (ifp->if_bpf)
1106: bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1107: #endif
1108:
1109: /* now we are committed to transmit the packet */
1110: sppp_dequeue(ifp);
1111: }
1112: splx(s);
1113:
1114: /*
1115: * Set a timeout in case the chip goes out to lunch.
1116: */
1117: ifp->if_timer = 5;
1118:
1119: return;
1120: }
1121:
1122:
1123: /*
1124: * Watchdog/transmission transmit timeout handler. Called when a
1125: * transmission is started on the interface, but no interrupt is
1126: * received before the timeout. This usually indicates that the
1127: * card has wedged for some reason.
1128: */
1129: void
1130: musycc_watchdog(struct ifnet *ifp)
1131: {
1132: struct channel_softc *cc = ifp->if_softc;
1133:
1134: log(LOG_ERR, "%s: device timeout\n", cc->cc_ifp->if_xname);
1135: ifp->if_oerrors++;
1136:
1137: musycc_sreq(cc->cc_group, cc->cc_channel, MUSYCC_SREQ_SET(9),
1138: MUSYCC_SREQ_BOTH, EV_WATCHDOG);
1139: }
1140:
1141:
1142: /*
1143: * Interrupt specific functions
1144: */
1145:
1146: /*
1147: * A frame has been uploaded: pass the resulting mbuf chain up to
1148: * the higher level protocols.
1149: */
1150: void
1151: musycc_rxeom(struct musycc_group *mg, int channel, int forcekick)
1152: {
1153: struct mbuf *m;
1154: struct ifnet *ifp;
1155: struct dma_desc *cur_rx, *start_rx;
1156: int total_len = 0, consumed = 0;
1157: u_int32_t rxstat;
1158:
1159: ACCOOM_PRINTF(3, ("musycc_rxeom\n"));
1160:
1161: ifp = mg->mg_channels[channel]->cc_ifp;
1162:
1163: start_rx = cur_rx = mg->mg_dma_d[channel].rx_prod;
1164: if (cur_rx == NULL)
1165: return; /* dma ring got cleared */
1166: do {
1167: bus_dmamap_sync(mg->mg_dmat, mg->mg_listmap,
1168: ((caddr_t)cur_rx - mg->mg_listkva),
1169: sizeof(struct dma_desc),
1170: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1171:
1172: rxstat = letoh32(cur_rx->status);
1173: if (!(rxstat & MUSYCC_STATUS_OWNER))
1174: break;
1175:
1176: m = cur_rx->mbuf;
1177: cur_rx->mbuf = NULL;
1178: total_len = MUSYCC_STATUS_LEN(rxstat);
1179:
1180:
1181: /*
1182: * If an error occurs, update stats, clear the
1183: * status word and leave the mbuf cluster in place:
1184: * it should simply get re-used next time this descriptor
1185: * comes up in the ring.
1186: */
1187: if (rxstat & MUSYCC_STATUS_ERROR) {
1188: ifp->if_ierrors++;
1189: ACCOOM_PRINTF(1, ("%s: rx error %08x\n",
1190: ifp->if_xname, rxstat));
1191: musycc_newbuf(mg, cur_rx, m);
1192: cur_rx = cur_rx->nextdesc;
1193: consumed++;
1194: continue;
1195: }
1196:
1197: /* No errors; receive the packet. */
1198: bus_dmamap_sync(mg->mg_dmat, cur_rx->map, 0,
1199: cur_rx->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1200: if (musycc_newbuf(mg, cur_rx, NULL) != 0) {
1201: cur_rx = cur_rx->nextdesc;
1202: consumed++;
1203: continue;
1204: }
1205:
1206: cur_rx = cur_rx->nextdesc;
1207: consumed++;
1208:
1209: /* TODO support mbuf chains */
1210: m->m_pkthdr.rcvif = ifp;
1211: m->m_pkthdr.len = m->m_len = total_len;
1212: ifp->if_ipackets++;
1213:
1214: #if NBPFILTER > 0
1215: if (ifp->if_bpf)
1216: bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1217: #endif
1218:
1219: /* pass it on. */
1220: sppp_input(ifp, m);
1221: } while (cur_rx != start_rx);
1222:
1223: mg->mg_dma_d[channel].rx_prod = cur_rx;
1224:
1225: if ((cur_rx == start_rx && consumed) || forcekick) {
1226: /* send SREQ to signal the new buffers */
1227: ACCOOM_PRINTF(1, ("%s: rx kick, consumed %d pkts\n",
1228: mg->mg_channels[channel]->cc_ifp->if_xname, consumed));
1229: mg->mg_group->rx_headp[channel] = htole32(
1230: mg->mg_listmap->dm_segs[0].ds_addr +
1231: (caddr_t)cur_rx - mg->mg_listkva);
1232: musycc_sreq(mg, channel, MUSYCC_SREQ_SET(8),
1233: MUSYCC_SREQ_RX, EV_NULL);
1234: }
1235: }
1236:
1237: /*
1238: * A frame was downloaded to the chip. It's safe for us to clean up
1239: * the list buffers.
1240: */
1241: void
1242: musycc_txeom(struct musycc_group *mg, int channel, int forcekick)
1243: {
1244: struct dma_desc *dd, *dd_pend;
1245: struct ifnet *ifp;
1246:
1247: ACCOOM_PRINTF(3, ("musycc_txeom\n"));
1248:
1249: ifp = mg->mg_channels[channel]->cc_ifp;
1250: /* Clear the watchdog timer. */
1251: ifp->if_timer = 0;
1252:
1253: /*
1254: * Go through our tx list and free mbufs for those
1255: * frames that have been transmitted.
1256: */
1257: for (dd = mg->mg_dma_d[channel].tx_pend;
1258: dd != mg->mg_dma_d[channel].tx_cur;
1259: dd = dd->nextdesc) {
1260: bus_dmamap_sync(mg->mg_dmat, mg->mg_listmap,
1261: ((caddr_t)dd - mg->mg_listkva), sizeof(struct dma_desc),
1262: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1263:
1264: if (letoh32(dd->status) & MUSYCC_STATUS_OWNER)
1265: /* musycc still owns this descriptor */
1266: break;
1267:
1268: mg->mg_dma_d[channel].tx_use--;
1269:
1270: dd->status = 0; /* reinit dma status flags */
1271: /* dd->status |= MUSYCC_STATUS_NOPOLL; *//* disable polling */
1272:
1273: if (dd->map->dm_nsegs != 0) {
1274: bus_dmamap_sync(mg->mg_dmat, dd->map, 0,
1275: dd->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1276: bus_dmamap_unload(mg->mg_dmat, dd->map);
1277: }
1278: if (dd->mbuf != NULL) {
1279: m_freem(dd->mbuf);
1280: dd->mbuf = NULL;
1281: mg->mg_dma_d[channel].tx_pkts--;
1282: ifp->if_opackets++;
1283: }
1284: }
1285:
1286: dd_pend = mg->mg_dma_d[channel].tx_pend;
1287: mg->mg_dma_d[channel].tx_pend = dd;
1288:
1289: if (ifp->if_flags & IFF_OACTIVE && dd_pend != dd) {
1290: ifp->if_flags &= ~IFF_OACTIVE;
1291: musycc_start(ifp);
1292: }
1293:
1294: if (forcekick) {
1295: /* restart */
1296: ACCOOM_PRINTF(1, ("%s: tx kick forced\n",
1297: mg->mg_channels[channel]->cc_ifp->if_xname));
1298: mg->mg_group->tx_headp[channel] =
1299: htole32(mg->mg_listmap->dm_segs[0].ds_addr +
1300: (caddr_t)mg->mg_dma_d[channel].tx_pend - mg->mg_listkva);
1301:
1302: musycc_sreq(mg, channel, MUSYCC_SREQ_SET(8), MUSYCC_SREQ_TX,
1303: EV_NULL);
1304: }
1305: }
1306:
1307: int
1308: musycc_intr(void *arg)
1309: {
1310: struct musycc_softc *mc = arg;
1311: struct musycc_group *mg;
1312: struct ifnet *ifp;
1313: u_int32_t intstatus, id;
1314: int i, n, chan;
1315:
1316: intstatus = bus_space_read_4(mc->mc_st, mc->mc_sh, MUSYCC_INTRSTATUS);
1317:
1318: if (intstatus & MUSYCC_INTCNT_MASK) {
1319: bus_dmamap_sync(mc->mc_dmat, mc->mc_intrmap,
1320: offsetof(struct musycc_intdesc, md_intrq[0]),
1321: MUSYCC_INTLEN * sizeof(u_int32_t), BUS_DMASYNC_POSTREAD);
1322:
1323: ACCOOM_PRINTF(4, ("%s: interrupt status %08x\n",
1324: mc->mc_dev.dv_xname, intstatus));
1325:
1326: n = MUSYCC_NEXTINT_GET(intstatus);
1327: for (i = 0; i < (intstatus & MUSYCC_INTCNT_MASK); i++) {
1328: id = letoh32(mc->mc_intrd->md_intrq[(n + i) %
1329: MUSYCC_INTLEN]);
1330: chan = MUSYCC_INTD_CHAN(id);
1331: mg = &mc->mc_groups[MUSYCC_INTD_GRP(id)];
1332:
1333: ACCOOM_PRINTF(4, ("%s: interrupt %s\n",
1334: mc->mc_dev.dv_xname, musycc_intr_print(id)));
1335:
1336: if (id & MUSYCC_INTD_ILOST)
1337: ACCOOM_PRINTF(0, ("%s: interrupt lost\n",
1338: mc->mc_dev.dv_xname));
1339:
1340: switch (MUSYCC_INTD_EVENT(id)) {
1341: case MUSYCC_INTEV_NONE:
1342: break;
1343: case MUSYCC_INTEV_SACK:
1344: musycc_state_engine(mg, chan,
1345: mg->mg_sreq[mg->mg_sreqpend].event);
1346: mg->mg_sreqpend =
1347: (mg->mg_sreqpend + 1) & MUSYCC_SREQMASK;
1348: if (mg->mg_sreqpend != mg->mg_sreqprod)
1349: musycc_kick(mg);
1350: break;
1351: case MUSYCC_INTEV_EOM:
1352: case MUSYCC_INTEV_EOB:
1353: if (id & MUSYCC_INTD_DIR)
1354: musycc_txeom(mg, chan, 0);
1355: else
1356: musycc_rxeom(mg, chan, 0);
1357: break;
1358: default:
1359: ACCOOM_PRINTF(0, ("%s: unhandled event: %s\n",
1360: mc->mc_dev.dv_xname,
1361: musycc_intr_print(id)));
1362: break;
1363: }
1364: switch (MUSYCC_INTD_ERROR(id)) {
1365: case MUSYCC_INTERR_NONE:
1366: break;
1367: case MUSYCC_INTERR_COFA:
1368: if ((id & MUSYCC_INTD_DIR) == 0)
1369: /* ignore COFA for RX side */
1370: break;
1371: if (mg->mg_channels[chan]->cc_state !=
1372: CHAN_RUNNING) {
1373: /*
1374: * ignore COFA for TX side if card is
1375: * not running
1376: */
1377: break;
1378: }
1379: ACCOOM_PRINTF(0, ("%s: error: %s\n",
1380: mc->mc_dev.dv_xname,
1381: musycc_intr_print(id)));
1382: #if 0
1383: /* digest already transmitted packets */
1384: musycc_txeom(mg, chan);
1385:
1386: /* adjust head pointer */
1387: musycc_dump_dma(mg);
1388: mg->mg_group->tx_headp[chan] =
1389: htole32(mg->mg_listmap->dm_segs[0].ds_addr +
1390: (caddr_t)mg->mg_dma_d[chan].tx_pend -
1391: mg->mg_listkva);
1392: musycc_dump_dma(mg);
1393:
1394: musycc_sreq(mg, chan, MUSYCC_SREQ_SET(8),
1395: MUSYCC_SREQ_TX, CHAN_RUNNING);
1396: #endif
1397: break;
1398: case MUSYCC_INTERR_BUFF:
1399: /*
1400: * log event as this should not happen,
1401: * indicates PCI bus congestion
1402: */
1403: log(LOG_ERR, "%s: internal FIFO %s\n",
1404: mg->mg_channels[chan]->cc_ifp->if_xname,
1405: id & MUSYCC_INTD_DIR ? "underflow" :
1406: "overflow");
1407:
1408: /* digest queue and restarting dma engine */
1409: ifp = mg->mg_channels[chan]->cc_ifp;
1410: if (id & MUSYCC_INTD_DIR) {
1411: ifp->if_oerrors++;
1412: musycc_txeom(mg, chan, 1);
1413: } else {
1414: ifp->if_ierrors++;
1415: musycc_rxeom(mg, chan, 1);
1416: }
1417: break;
1418: case MUSYCC_INTERR_ONR:
1419: ACCOOM_PRINTF(0, ("%s: error: %s\n",
1420: mc->mc_dev.dv_xname,
1421: musycc_intr_print(id)));
1422:
1423: /* digest queue and restarting dma engine */
1424: ifp = mg->mg_channels[chan]->cc_ifp;
1425: if (id & MUSYCC_INTD_DIR) {
1426: ifp->if_oerrors++;
1427: musycc_txeom(mg, chan, 1);
1428: } else {
1429: ifp->if_ierrors++;
1430: musycc_rxeom(mg, chan, 1);
1431: }
1432: break;
1433: case MUSYCC_INTERR_OOF:
1434: /* ignore */
1435: break;
1436: default:
1437: ACCOOM_PRINTF(0, ("%s: unhandled error: %s\n",
1438: mc->mc_dev.dv_xname,
1439: musycc_intr_print(id)));
1440: break;
1441: }
1442: }
1443: bus_space_write_4(mc->mc_st, mc->mc_sh, MUSYCC_INTRSTATUS,
1444: MUSYCC_NEXTINT_SET((n + i) % MUSYCC_INTLEN));
1445: bus_space_barrier(mc->mc_st, mc->mc_sh, MUSYCC_INTRSTATUS,
1446: sizeof(u_int32_t), BUS_SPACE_BARRIER_WRITE);
1447: return (1);
1448: } else
1449: return (0);
1450: }
1451:
1452: void
1453: musycc_kick(struct musycc_group *mg)
1454: {
1455:
1456: bus_dmamap_sync(mg->mg_dmat, mg->mg_hdlc->mc_cfgmap,
1457: MUSYCC_GROUPBASE(mg->mg_gnum), sizeof(struct musycc_grpdesc),
1458: BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1459:
1460: ACCOOM_PRINTF(4, ("musycc_kick: group %d sreq[%d] req %08x\n",
1461: mg->mg_gnum, mg->mg_sreqpend, mg->mg_sreq[mg->mg_sreqpend].sreq));
1462:
1463: bus_space_write_4(mg->mg_hdlc->mc_st, mg->mg_hdlc->mc_sh,
1464: MUSYCC_SERREQ(mg->mg_gnum), mg->mg_sreq[mg->mg_sreqpend].sreq);
1465: bus_space_barrier(mg->mg_hdlc->mc_st, mg->mg_hdlc->mc_sh,
1466: MUSYCC_SERREQ(mg->mg_gnum), sizeof(u_int32_t),
1467: BUS_SPACE_BARRIER_WRITE);
1468: }
1469:
1470: void
1471: musycc_sreq(struct musycc_group *mg, int channel, u_int32_t req, int dir,
1472: enum musycc_event event)
1473: {
1474: #define MUSYCC_SREQINC(x, y) \
1475: do { \
1476: (x) = ((x) + 1) & MUSYCC_SREQMASK; \
1477: if (x == y) \
1478: panic("%s: sreq queue overflow", \
1479: mg->mg_hdlc->mc_dev.dv_xname); \
1480: } while (0)
1481:
1482: struct timeval tv;
1483: int needskick;
1484:
1485: needskick = (mg->mg_sreqpend == mg->mg_sreqprod);
1486: getmicrouptime(&tv);
1487:
1488: ACCOOM_PRINTF(4, ("musycc_sreq: g# %d c# %d req %x dir %x\n",
1489: mg->mg_gnum, channel, req, dir));
1490:
1491: if (dir & MUSYCC_SREQ_RX) {
1492: req &= ~MUSYCC_SREQ_TXDIR & ~MUSYCC_SREQ_MASK;
1493: req |= MUSYCC_SREQ_CHSET(channel);
1494: mg->mg_sreq[mg->mg_sreqprod].sreq = req;
1495: mg->mg_sreq[mg->mg_sreqprod].timeout = tv.tv_sec +
1496: MUSYCC_SREQTIMEOUT;
1497: if (dir == MUSYCC_SREQ_RX)
1498: mg->mg_sreq[mg->mg_sreqprod].event = event;
1499: else
1500: mg->mg_sreq[mg->mg_sreqprod].event = EV_NULL;
1501: MUSYCC_SREQINC(mg->mg_sreqprod, mg->mg_sreqpend);
1502: }
1503: if (dir & MUSYCC_SREQ_TX) {
1504: req &= ~MUSYCC_SREQ_MASK;
1505: req |= MUSYCC_SREQ_TXDIR;
1506: req |= MUSYCC_SREQ_CHSET(channel);
1507: mg->mg_sreq[mg->mg_sreqprod].timeout = tv.tv_sec +
1508: MUSYCC_SREQTIMEOUT;
1509: mg->mg_sreq[mg->mg_sreqprod].sreq = req;
1510: mg->mg_sreq[mg->mg_sreqprod].event = event;
1511: MUSYCC_SREQINC(mg->mg_sreqprod, mg->mg_sreqpend);
1512: }
1513:
1514: if (needskick)
1515: musycc_kick(mg);
1516:
1517: #undef MUSYCC_SREQINC
1518: }
1519:
1520: void
1521: musycc_tick(struct channel_softc *cc)
1522: {
1523: struct musycc_group *mg = cc->cc_group;
1524: struct timeval tv;
1525:
1526: if (mg->mg_sreqpend == mg->mg_sreqprod)
1527: return;
1528:
1529: getmicrouptime(&tv);
1530: if (mg->mg_sreq[mg->mg_sreqpend].timeout < tv.tv_sec) {
1531: log(LOG_ERR, "%s: service request timeout\n",
1532: cc->cc_ifp->if_xname);
1533: mg->mg_sreqpend++;
1534: /* digest all timed out SREQ */
1535: while (mg->mg_sreq[mg->mg_sreqpend].timeout < tv.tv_sec &&
1536: mg->mg_sreqpend != mg->mg_sreqprod)
1537: mg->mg_sreqpend++;
1538:
1539: if (mg->mg_sreqpend != mg->mg_sreqprod)
1540: musycc_kick(mg);
1541: }
1542: }
1543:
1544: /*
1545: * Extension Bus API
1546: */
1547: int
1548: ebus_intr(void *arg)
1549: {
1550: struct musycc_softc *sc = arg;
1551:
1552: printf("%s: interrupt\n", sc->mc_dev.dv_xname);
1553: return (1);
1554: }
1555:
1556: int
1557: ebus_attach_device(struct ebus_dev *e, struct musycc_softc *mc,
1558: bus_size_t offset, bus_size_t size)
1559: {
1560: struct musycc_softc *ec = mc->mc_other;
1561:
1562: e->base = offset << 2;
1563: e->size = size;
1564: e->st = ec->mc_st;
1565: return (bus_space_subregion(ec->mc_st, ec->mc_sh, offset << 2,
1566: size, &e->sh));
1567: }
1568:
1569: u_int8_t
1570: ebus_read(struct ebus_dev *e, bus_size_t offset)
1571: {
1572: u_int8_t value;
1573:
1574: value = bus_space_read_1(e->st, e->sh, offset << 2);
1575: bus_space_barrier(e->st, e->sh, 0, e->size,
1576: BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
1577: return (value);
1578: }
1579:
1580: void
1581: ebus_write(struct ebus_dev *e, bus_size_t offset, u_int8_t value)
1582: {
1583: bus_space_write_1(e->st, e->sh, offset << 2, value);
1584: bus_space_barrier(e->st, e->sh, 0, e->size,
1585: BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
1586: }
1587:
1588: void
1589: ebus_read_buf(struct ebus_dev *rom, bus_size_t offset, void *buf, size_t size)
1590: {
1591: u_int8_t *b = buf;
1592: size_t i;
1593:
1594: for (i = 0; i < size; i++)
1595: b[i] = ebus_read(rom, offset + i);
1596: }
1597:
1598: void
1599: ebus_set_led(struct channel_softc *cc, int on, u_int8_t value)
1600: {
1601: struct musycc_softc *sc = cc->cc_group->mg_hdlc->mc_other;
1602:
1603: value &= MUSYCC_LED_MASK; /* don't write to other ports led */
1604: value <<= cc->cc_group->mg_gnum * 2;
1605:
1606: if (on)
1607: sc->mc_ledstate |= value;
1608: else
1609: sc->mc_ledstate &= ~value;
1610:
1611: bus_space_write_1(sc->mc_st, sc->mc_sh, sc->mc_ledbase,
1612: sc->mc_ledstate);
1613: bus_space_barrier(sc->mc_st, sc->mc_sh, sc->mc_ledbase, 1,
1614: BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE);
1615: }
1616:
1617: /*
1618: * Channel API
1619: */
1620:
1621: void
1622: musycc_attach_sppp(struct channel_softc *cc,
1623: int (*if_ioctl)(struct ifnet *, u_long, caddr_t))
1624: {
1625: struct ifnet *ifp;
1626:
1627: ifp = &cc->cc_ppp.pp_if;
1628: cc->cc_ifp = ifp;
1629:
1630: IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1631: IFQ_SET_READY(&ifp->if_snd);
1632: ifp->if_mtu = PP_MTU;
1633: ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST /* | IFF_SIMPLEX */;
1634: cc->cc_ppp.pp_flags |= PP_CISCO;
1635: cc->cc_ppp.pp_flags |= PP_KEEPALIVE;
1636: cc->cc_ppp.pp_framebytes = 3;
1637:
1638: ifp->if_ioctl = if_ioctl;
1639: ifp->if_start = musycc_start;
1640: ifp->if_watchdog = musycc_watchdog;
1641:
1642: if_attach(ifp);
1643: if_alloc_sadl(ifp);
1644: sppp_attach(ifp);
1645: #if NBPFILTER > 0
1646: bpfattach(&ifp->if_bpf, ifp, DLT_PPP, PPP_HEADER_LEN);
1647: #endif /* NBPFILTER > 0 */
1648:
1649: }
1650:
1651: struct channel_softc *
1652: musycc_channel_create(const char *name, u_int8_t locked)
1653: {
1654: struct channel_softc *cc;
1655:
1656: cc = malloc(sizeof(*cc), M_DEVBUF, M_NOWAIT);
1657: if (!cc)
1658: return (NULL);
1659: bzero(cc, sizeof(*cc));
1660:
1661: cc->cc_state = CHAN_FLOAT;
1662: cc->cc_locked = locked;
1663:
1664: /* set default timeslot map for E1 */
1665: cc->cc_tslots = 0xfffffffe; /* all but timeslot 0 */
1666: strlcpy(cc->cc_ppp.pp_if.if_xname, name,
1667: sizeof(cc->cc_ppp.pp_if.if_xname));
1668:
1669: cc->cc_ppp.pp_if.if_softc = cc;
1670:
1671: return (cc);
1672: }
1673:
1674: int
1675: musycc_channel_attach(struct musycc_softc *mc, struct channel_softc *cc,
1676: struct device *dev, u_int8_t gnum)
1677: {
1678: struct musycc_group *mg;
1679: int i;
1680:
1681: if (cc->cc_state != CHAN_FLOAT)
1682: return (-1); /* already attached */
1683:
1684: if (gnum >= mc->mc_ngroups) {
1685: ACCOOM_PRINTF(0, ("%s: %s tries to attach to nonexistent group",
1686: mc->mc_dev.dv_xname, cc->cc_ifp->if_xname));
1687: return (-1);
1688: }
1689:
1690: mg = &mc->mc_groups[gnum];
1691: for (i = 0; i < MUSYCC_NUMCHAN; i++)
1692: if (mg->mg_channels[i] == NULL) {
1693: mg->mg_channels[i] = cc;
1694: cc->cc_state = CHAN_IDLE;
1695: cc->cc_group = mg;
1696: cc->cc_channel = i;
1697: cc->cc_parent = dev;
1698: return (i);
1699: }
1700: return (-1);
1701: }
1702:
1703: void
1704: musycc_channel_detach(struct ifnet *ifp)
1705: {
1706: struct channel_softc *cc = ifp->if_softc;
1707:
1708: if (cc->cc_state != CHAN_FLOAT) {
1709: musycc_free_channel(cc->cc_group, cc->cc_channel);
1710: cc->cc_group->mg_channels[cc->cc_channel] = NULL;
1711: }
1712:
1713: if_detach(ifp);
1714: }
1715:
1716: #ifdef ACCOOM_DEBUG
1717: const char *musycc_events[] = {
1718: "NONE", "SACK", "EOB", "EOM", "EOP", "CHABT", "CHIC", "FREC",
1719: "SINC", "SDEC", "SFILT", "RFU", "RFU", "RFU", "RFU", "RFU"
1720: };
1721: const char *musycc_errors[] = {
1722: "NONE", "BUFF", "COFA", "ONR", "PROT", "RFU", "RFU", "RFU",
1723: "OOF", "FCS", "ALIGN", "ABT", "LNG", "SHT", "SUERR", "PERR"
1724: };
1725: const char *mu_proto[] = {
1726: "trans", "ss7", "hdlc16", "hdlc32", "rsvd4", "rsvd5", "rsvd6", "rsvd7"
1727: };
1728: const char *mu_mode[] = {
1729: "t1", "e1", "2*e1", "4*e1", "n64", "rsvd5", "rsvd6", "rsvd7"
1730: };
1731:
1732: char musycc_intrbuf[48];
1733:
1734: char *
1735: musycc_intr_print(u_int32_t id)
1736: {
1737: snprintf(musycc_intrbuf, sizeof(musycc_intrbuf),
1738: "ev %s er %s grp %d chan %d dir %s",
1739: musycc_events[MUSYCC_INTD_EVENT(id)],
1740: musycc_errors[MUSYCC_INTD_ERROR(id)],
1741: MUSYCC_INTD_GRP(id), MUSYCC_INTD_CHAN(id),
1742: id & MUSYCC_INTD_DIR ? "T" : "R");
1743: return (musycc_intrbuf);
1744: }
1745:
1746: void
1747: musycc_dump_group(int level, struct musycc_group *mg)
1748: {
1749: struct musycc_grpdesc *md = mg->mg_group;
1750: u_int32_t d;
1751: int i;
1752:
1753: if (level > accoom_debug)
1754: return;
1755:
1756: printf("%s: dumping group %d\n",
1757: mg->mg_hdlc->mc_dev.dv_xname, mg->mg_gnum);
1758: printf("===========================================================\n");
1759: printf("global conf: %08x\n", letoh32(md->global_conf));
1760: d = letoh32(md->group_conf);
1761: printf("group conf: [%08x] %s %s %s int %s%s inhib BSD %s%s poll %d\n",
1762: d,
1763: d & MUSYCC_GRCFG_TXENBL ? "TX" : "",
1764: d & MUSYCC_GRCFG_RXENBL ? "RX" : "",
1765: d & MUSYCC_GRCFG_SUBDSBL ? "" : "SUB",
1766: d & MUSYCC_GRCFG_MSKOOF ? "" : "O",
1767: d & MUSYCC_GRCFG_MSKCOFA ? "" : "C",
1768: d & MUSYCC_GRCFG_INHTBSD ? "TX" : "",
1769: d & MUSYCC_GRCFG_INHRBSD ? "RX" : "",
1770: (d & MUSYCC_GRCFG_POLL64) == MUSYCC_GRCFG_POLL64 ? 64 :
1771: d & MUSYCC_GRCFG_POLL32 ? 32 :
1772: d & MUSYCC_GRCFG_POLL16 ? 16 : 1);
1773: d = letoh32(md->port_conf);
1774: printf("port conf: [%08x] %s %s %s %s %s %s %s\n", d,
1775: mu_mode[d & MUSYCC_PORT_MODEMASK],
1776: d & MUSYCC_PORT_TDAT_EDGE ? "TXE" : "!TXE",
1777: d & MUSYCC_PORT_TSYNC_EDGE ? "TXS" : "!TXS",
1778: d & MUSYCC_PORT_RDAT_EDGE ? "RXE" : "!RXE",
1779: d & MUSYCC_PORT_RSYNC_EDGE ? "RXS" : "!RXS",
1780: d & MUSYCC_PORT_ROOF_EDGE ? "ROOF" : "!ROOF",
1781: d & MUSYCC_PORT_TRITX ? "!tri-state" : "tri-state");
1782: printf("message len 1: %d 2: %d\n",
1783: letoh32(md->msglen_conf) & MUSYCC_MAXFRM_MASK,
1784: (letoh32(md->msglen_conf) >> MUSYCC_MAXFRM2_SHIFT) &
1785: MUSYCC_MAXFRM_MASK);
1786: printf("interrupt queue %x len %d\n", letoh32(md->int_queuep),
1787: letoh32(md->int_queuelen));
1788: printf("memory protection %x\n", letoh32(md->memprot));
1789: printf("===========================================================\n");
1790: printf("Timeslot Map:TX\t\tRX\n");
1791: for (i = 0; i < 128; i++) {
1792: if (md->tx_tsmap[i] & MUSYCC_TSLOT_ENABLED)
1793: printf("%d: %s%s%s[%02d]\t\t", i,
1794: md->tx_tsmap[i] & MUSYCC_TSLOT_ENABLED ? "C" : " ",
1795: md->tx_tsmap[i] & MUSYCC_TSLOT_SUB ? "S" : " ",
1796: md->tx_tsmap[i] & MUSYCC_TSLOT_56K ? "*" : " ",
1797: MUSYCC_TSLOT_CHAN(md->tx_tsmap[i]));
1798: else if (md->rx_tsmap[i] & MUSYCC_TSLOT_ENABLED)
1799: printf("%d: \t\t", i);
1800: if (md->rx_tsmap[i] & MUSYCC_TSLOT_ENABLED)
1801: printf("%s%s%s[%02d]\n",
1802: md->rx_tsmap[i] & MUSYCC_TSLOT_ENABLED ? "C" : " ",
1803: md->rx_tsmap[i] & MUSYCC_TSLOT_SUB ? "S" : " ",
1804: md->rx_tsmap[i] & MUSYCC_TSLOT_56K ? "*" : " ",
1805: MUSYCC_TSLOT_CHAN(md->rx_tsmap[i]));
1806: else
1807: printf("\n");
1808: }
1809: printf("===========================================================\n");
1810: printf("Channel config:\nTX\t\t\tRX\n");
1811: for (i = 0; i < 32; i++)
1812: if (md->tx_cconf[i] != 0) {
1813: d = letoh32(md->tx_cconf[i]);
1814: printf("%s%s%s%s%s%s%s %s [%x]\t",
1815: d & MUSYCC_CHAN_MSKBUFF ? "B" : " ",
1816: d & MUSYCC_CHAN_MSKEOM ? "E" : " ",
1817: d & MUSYCC_CHAN_MSKMSG ? "M" : " ",
1818: d & MUSYCC_CHAN_MSKIDLE ? "I" : " ",
1819: d & MUSYCC_CHAN_FCS ? "F" : "",
1820: d & MUSYCC_CHAN_MAXLEN1 ? "1" : "",
1821: d & MUSYCC_CHAN_MAXLEN2 ? "2" : "",
1822: mu_proto[MUSYCC_CHAN_PROTO_GET(d)],
1823: d);
1824: d = letoh32(md->rx_cconf[i]);
1825: printf("%s%s%s%s%s%s%s %s [%x]\n",
1826: d & MUSYCC_CHAN_MSKBUFF ? "B" : " ",
1827: d & MUSYCC_CHAN_MSKEOM ? "E" : " ",
1828: d & MUSYCC_CHAN_MSKMSG ? "M" : " ",
1829: d & MUSYCC_CHAN_MSKIDLE ? "I" : " ",
1830: d & MUSYCC_CHAN_FCS ? "F" : "",
1831: d & MUSYCC_CHAN_MAXLEN1 ? "1" : "",
1832: d & MUSYCC_CHAN_MAXLEN2 ? "2" : "",
1833: mu_proto[MUSYCC_CHAN_PROTO_GET(d)],
1834: d);
1835: }
1836: printf("===========================================================\n");
1837: musycc_dump_dma(level, mg, 0);
1838: }
1839:
1840: void
1841: musycc_dump_desc(int level, struct musycc_group *mg)
1842: {
1843: #define READ4(x) \
1844: bus_space_read_4(mg->mg_hdlc->mc_st, mg->mg_hdlc->mc_sh, \
1845: MUSYCC_GROUPBASE(mg->mg_gnum) + (x))
1846: u_int32_t w;
1847: u_int8_t c1, c2;
1848: int i;
1849:
1850: if (level > accoom_debug)
1851: return;
1852:
1853: printf("%s: dumping descriptor %d at %p kva %08x + %x dma %08x\n",
1854: mg->mg_hdlc->mc_dev.dv_xname, mg->mg_gnum, mg->mg_group,
1855: mg->mg_hdlc->mc_cfgmap->dm_segs[0].ds_addr,
1856: MUSYCC_GROUPBASE(mg->mg_gnum), READ4(0));
1857: printf("===========================================================\n");
1858: printf("global conf: %08x\n", READ4(MUSYCC_GLOBALCONF));
1859: w = READ4(0x060c);
1860: printf("group conf: [%08x] %s %s %s int %s%s inhib BSD %s%s poll %d\n",
1861: w, w & MUSYCC_GRCFG_TXENBL ? "TX" : "",
1862: w & MUSYCC_GRCFG_RXENBL ? "RX" : "",
1863: w & MUSYCC_GRCFG_SUBDSBL ? "" : "SUB",
1864: w & MUSYCC_GRCFG_MSKOOF ? "" : "O",
1865: w & MUSYCC_GRCFG_MSKCOFA ? "" : "C",
1866: w & MUSYCC_GRCFG_INHTBSD ? "TX" : "",
1867: w & MUSYCC_GRCFG_INHRBSD ? "RX" : "",
1868: (w & MUSYCC_GRCFG_POLL64) == MUSYCC_GRCFG_POLL64 ? 64 :
1869: w & MUSYCC_GRCFG_POLL32 ? 32 :
1870: w & MUSYCC_GRCFG_POLL16 ? 16 : 1);
1871: w = READ4(0x0618);
1872: printf("port conf: [%08x] %s %s %s %s %s %s %s\n", w,
1873: mu_mode[w & MUSYCC_PORT_MODEMASK],
1874: w & MUSYCC_PORT_TDAT_EDGE ? "TXE" : "!TXE",
1875: w & MUSYCC_PORT_TSYNC_EDGE ? "TXS" : "!TXS",
1876: w & MUSYCC_PORT_RDAT_EDGE ? "RXE" : "!RXE",
1877: w & MUSYCC_PORT_RSYNC_EDGE ? "RXS" : "!RXS",
1878: w & MUSYCC_PORT_ROOF_EDGE ? "ROOF" : "!ROOF",
1879: w & MUSYCC_PORT_TRITX ? "!tri-state" : "tri-state");
1880: w = READ4(0x0614);
1881: printf("message len 1: %d 2: %d\n",
1882: w & MUSYCC_MAXFRM_MASK,
1883: (w >> MUSYCC_MAXFRM2_SHIFT) & MUSYCC_MAXFRM_MASK);
1884: printf("interrupt queue %x len %d\n", READ4(0x0604), READ4(0x0608));
1885: printf("memory protection %x\n", READ4(0x0610));
1886: printf("===========================================================\n");
1887: printf("Timeslot Map:TX\t\tRX\n");
1888: for (i = 0; i < 128; i++) {
1889: c1 = bus_space_read_1(mg->mg_hdlc->mc_st, mg->mg_hdlc->mc_sh,
1890: MUSYCC_GROUPBASE(mg->mg_gnum) + 0x0200 + i);
1891: c2 = bus_space_read_1(mg->mg_hdlc->mc_st, mg->mg_hdlc->mc_sh,
1892: MUSYCC_GROUPBASE(mg->mg_gnum) + 0x0400 + i);
1893: if (c1 & MUSYCC_TSLOT_ENABLED)
1894: printf("%d: %s%s%s[%02d]\t\t", i,
1895: c1 & MUSYCC_TSLOT_ENABLED ? "C" : " ",
1896: c1 & MUSYCC_TSLOT_SUB ? "S" : " ",
1897: c1 & MUSYCC_TSLOT_56K ? "*" : " ",
1898: MUSYCC_TSLOT_CHAN(c1));
1899: else if (c2 & MUSYCC_TSLOT_ENABLED)
1900: printf("%d: \t\t", i);
1901: if (c2 & MUSYCC_TSLOT_ENABLED)
1902: printf("%s%s%s[%02d]\n",
1903: c2 & MUSYCC_TSLOT_ENABLED ? "C" : " ",
1904: c2 & MUSYCC_TSLOT_SUB ? "S" : " ",
1905: c2 & MUSYCC_TSLOT_56K ? "*" : " ",
1906: MUSYCC_TSLOT_CHAN(c2));
1907: else
1908: printf("\n");
1909: }
1910: printf("===========================================================\n");
1911: printf("Channel config:\nTX\t\t\t\tRX\n");
1912: for (i = 0; i < 32; i++) {
1913: w = READ4(0x0380 + i * 4);
1914: if (w != 0) {
1915: printf("%s%s%s%s%s%s%s %s [%08x]\t",
1916: w & MUSYCC_CHAN_MSKBUFF ? "B" : " ",
1917: w & MUSYCC_CHAN_MSKEOM ? "E" : " ",
1918: w & MUSYCC_CHAN_MSKMSG ? "M" : " ",
1919: w & MUSYCC_CHAN_MSKIDLE ? "I" : " ",
1920: w & MUSYCC_CHAN_FCS ? "F" : "",
1921: w & MUSYCC_CHAN_MAXLEN1 ? "1" : "",
1922: w & MUSYCC_CHAN_MAXLEN2 ? "2" : "",
1923: mu_proto[MUSYCC_CHAN_PROTO_GET(w)],
1924: w);
1925: w = READ4(0x0580 + i * 4);
1926: printf("%s%s%s%s%s%s%s %s [%08x]\n",
1927: w & MUSYCC_CHAN_MSKBUFF ? "B" : " ",
1928: w & MUSYCC_CHAN_MSKEOM ? "E" : " ",
1929: w & MUSYCC_CHAN_MSKMSG ? "M" : " ",
1930: w & MUSYCC_CHAN_MSKIDLE ? "I" : " ",
1931: w & MUSYCC_CHAN_FCS ? "F" : "",
1932: w & MUSYCC_CHAN_MAXLEN1 ? "1" : "",
1933: w & MUSYCC_CHAN_MAXLEN2 ? "2" : "",
1934: mu_proto[MUSYCC_CHAN_PROTO_GET(w)],
1935: w);
1936: }
1937: }
1938: printf("===========================================================\n");
1939: musycc_dump_dma(level, mg, 0);
1940:
1941: }
1942:
1943: void
1944: musycc_dump_dma(int level, struct musycc_group *mg, int dir)
1945: {
1946: struct musycc_grpdesc *md = mg->mg_group;
1947: struct dma_desc *dd;
1948: bus_addr_t base, addr;
1949: int i;
1950:
1951: if (level > accoom_debug)
1952: return;
1953:
1954: printf("DMA Pointers:\n%8s %8s %8s %8s\n",
1955: "tx head", "tx msg", "rx head", "rx msg");
1956: for (i = 0; i < 32; i++) {
1957: if (md->tx_headp[i] == 0 && md->rx_headp[i] == 0)
1958: continue;
1959: printf("%08x %08x %08x %08x\n",
1960: md->tx_headp[i], md->tx_msgp[i],
1961: md->rx_headp[i], md->rx_msgp[i]);
1962: }
1963:
1964: base = mg->mg_listmap->dm_segs[0].ds_addr;
1965: for (i = 0; dir & MUSYCC_SREQ_TX && i < 32; i++) {
1966: if (md->tx_headp[i] == 0)
1967: continue;
1968:
1969: printf("==================================================\n");
1970: printf("TX DMA Ring for channel %d\n", i);
1971: printf("pend: %p cur: %p cnt: %d use: %d pkgs: %d\n",
1972: mg->mg_dma_d[i].tx_pend, mg->mg_dma_d[i].tx_cur,
1973: mg->mg_dma_d[i].tx_cnt, mg->mg_dma_d[i].tx_use,
1974: mg->mg_dma_d[i].tx_pkts);
1975: printf(" %10s %8s %8s %8s %8s %10s\n",
1976: "addr", "paddr", "next", "status", "data", "mbuf");
1977: dd = mg->mg_dma_d[i].tx_pend;
1978: do {
1979: addr = htole32(base + ((caddr_t)dd - mg->mg_listkva));
1980: printf("%s %p %08x %08x %08x %08x %p\n",
1981: dd == mg->mg_dma_d[i].tx_pend ? ">" :
1982: dd == mg->mg_dma_d[i].tx_cur ? "*" : " ",
1983: dd, addr, dd->next, dd->status,
1984: dd->data, dd->mbuf);
1985: dd = dd->nextdesc;
1986: } while (dd != mg->mg_dma_d[i].tx_pend);
1987: }
1988: for (i = 0; dir & MUSYCC_SREQ_RX && i < 32; i++) {
1989: if (md->rx_headp[i] == 0)
1990: continue;
1991:
1992: printf("==================================================\n");
1993: printf("RX DMA Ring for channel %d\n", i);
1994: printf("prod: %p cnt: %d\n",
1995: mg->mg_dma_d[i].rx_prod, mg->mg_dma_d[i].rx_cnt);
1996: printf(" %8s %8s %8s %8s %10s\n",
1997: "addr", "paddr", "next", "status", "data", "mbuf");
1998: dd = mg->mg_dma_d[i].rx_prod;
1999: do {
2000: addr = htole32(base + ((caddr_t)dd - mg->mg_listkva));
2001: printf("%p %08x %08x %08x %08x %p\n", dd, addr,
2002: dd->next, dd->status, dd->data, dd->mbuf);
2003: dd = dd->nextdesc;
2004: } while (dd != mg->mg_dma_d[i].rx_prod);
2005: }
2006: }
2007: #endif
CVSweb