Annotation of sys/dev/pci/if_stge.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: if_stge.c,v 1.35 2006/12/29 22:16:05 kettenis Exp $ */
2: /* $NetBSD: if_stge.c,v 1.27 2005/05/16 21:35:32 bouyer Exp $ */
3:
4: /*-
5: * Copyright (c) 2001 The NetBSD Foundation, Inc.
6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
9: * by Jason R. Thorpe.
10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
39:
40: /*
41: * Device driver for the Sundance Tech. TC9021 10/100/1000
42: * Ethernet controller.
43: */
44:
45: #include "bpfilter.h"
46: #include "vlan.h"
47:
48: #include <sys/param.h>
49: #include <sys/systm.h>
50: #include <sys/timeout.h>
51: #include <sys/mbuf.h>
52: #include <sys/malloc.h>
53: #include <sys/kernel.h>
54: #include <sys/socket.h>
55: #include <sys/ioctl.h>
56: #include <sys/errno.h>
57: #include <sys/device.h>
58: #include <sys/queue.h>
59:
60: #include <net/if.h>
61: #include <net/if_dl.h>
62:
63: #ifdef INET
64: #include <netinet/in.h>
65: #include <netinet/in_systm.h>
66: #include <netinet/in_var.h>
67: #include <netinet/ip.h>
68: #include <netinet/if_ether.h>
69: #endif
70:
71: #include <net/if_media.h>
72:
73: #if NVLAN > 0
74: #include <net/if_types.h>
75: #include <net/if_vlan_var.h>
76: #endif
77:
78: #if NBPFILTER > 0
79: #include <net/bpf.h>
80: #endif
81:
82: #include <machine/bus.h>
83: #include <machine/intr.h>
84:
85: #include <dev/mii/mii.h>
86: #include <dev/mii/miivar.h>
87: #include <dev/mii/mii_bitbang.h>
88:
89: #include <dev/pci/pcireg.h>
90: #include <dev/pci/pcivar.h>
91: #include <dev/pci/pcidevs.h>
92:
93: #include <dev/pci/if_stgereg.h>
94:
95: void stge_start(struct ifnet *);
96: void stge_watchdog(struct ifnet *);
97: int stge_ioctl(struct ifnet *, u_long, caddr_t);
98: int stge_init(struct ifnet *);
99: void stge_stop(struct ifnet *, int);
100:
101: void stge_shutdown(void *);
102:
103: void stge_reset(struct stge_softc *);
104: void stge_rxdrain(struct stge_softc *);
105: int stge_add_rxbuf(struct stge_softc *, int);
106: void stge_read_eeprom(struct stge_softc *, int, uint16_t *);
107: void stge_tick(void *);
108:
109: void stge_stats_update(struct stge_softc *);
110:
111: void stge_set_filter(struct stge_softc *);
112:
113: int stge_intr(void *);
114: void stge_txintr(struct stge_softc *);
115: void stge_rxintr(struct stge_softc *);
116:
117: int stge_mii_readreg(struct device *, int, int);
118: void stge_mii_writereg(struct device *, int, int, int);
119: void stge_mii_statchg(struct device *);
120:
121: int stge_mediachange(struct ifnet *);
122: void stge_mediastatus(struct ifnet *, struct ifmediareq *);
123:
124: int stge_match(struct device *, void *, void *);
125: void stge_attach(struct device *, struct device *, void *);
126:
127: int stge_copy_small = 0;
128:
129: struct cfattach stge_ca = {
130: sizeof(struct stge_softc), stge_match, stge_attach,
131: };
132:
133: struct cfdriver stge_cd = {
134: 0, "stge", DV_IFNET
135: };
136:
137: uint32_t stge_mii_bitbang_read(struct device *);
138: void stge_mii_bitbang_write(struct device *, uint32_t);
139:
140: const struct mii_bitbang_ops stge_mii_bitbang_ops = {
141: stge_mii_bitbang_read,
142: stge_mii_bitbang_write,
143: {
144: PC_MgmtData, /* MII_BIT_MDO */
145: PC_MgmtData, /* MII_BIT_MDI */
146: PC_MgmtClk, /* MII_BIT_MDC */
147: PC_MgmtDir, /* MII_BIT_DIR_HOST_PHY */
148: 0, /* MII_BIT_DIR_PHY_HOST */
149: }
150: };
151:
152: /*
153: * Devices supported by this driver.
154: */
155: const struct pci_matchid stge_devices[] = {
156: { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST1023 },
157: { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST2021 },
158: { PCI_VENDOR_TAMARACK, PCI_PRODUCT_TAMARACK_TC9021 },
159: { PCI_VENDOR_TAMARACK, PCI_PRODUCT_TAMARACK_TC9021_ALT },
160: /*
161: * The Sundance sample boards use the Sundance vendor ID,
162: * but the Tamarack product ID.
163: */
164: { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_TC9021 },
165: { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_TC9021_ALT },
166: { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE550T },
167: { PCI_VENDOR_ANTARES, PCI_PRODUCT_ANTARES_TC9021 }
168: };
169:
170: int
171: stge_match(struct device *parent, void *match, void *aux)
172: {
173: return (pci_matchbyid((struct pci_attach_args *)aux, stge_devices,
174: sizeof(stge_devices) / sizeof(stge_devices[0])));
175: }
176:
177: void
178: stge_attach(struct device *parent, struct device *self, void *aux)
179: {
180: struct stge_softc *sc = (struct stge_softc *) self;
181: struct pci_attach_args *pa = aux;
182: struct ifnet *ifp = &sc->sc_arpcom.ac_if;
183: pci_chipset_tag_t pc = pa->pa_pc;
184: pci_intr_handle_t ih;
185: const char *intrstr = NULL;
186: bus_space_tag_t iot, memt;
187: bus_space_handle_t ioh, memh;
188: bus_dma_segment_t seg;
189: bus_size_t iosize;
190: int ioh_valid, memh_valid;
191: int i, rseg, error;
192: pcireg_t pmode;
193: int pmreg;
194:
195: timeout_set(&sc->sc_timeout, stge_tick, sc);
196:
197: sc->sc_rev = PCI_REVISION(pa->pa_class);
198:
199: /*
200: * Map the device.
201: */
202: ioh_valid = (pci_mapreg_map(pa, STGE_PCI_IOBA,
203: PCI_MAPREG_TYPE_IO, 0,
204: &iot, &ioh, NULL, &iosize, 0) == 0);
205: memh_valid = (pci_mapreg_map(pa, STGE_PCI_MMBA,
206: PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
207: &memt, &memh, NULL, &iosize, 0) == 0);
208:
209: if (memh_valid) {
210: sc->sc_st = memt;
211: sc->sc_sh = memh;
212: } else if (ioh_valid) {
213: sc->sc_st = iot;
214: sc->sc_sh = ioh;
215: } else {
216: printf(": unable to map device registers\n");
217: return;
218: }
219:
220: sc->sc_dmat = pa->pa_dmat;
221:
222: /* Get it out of power save mode if needed. */
223: if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
224: pmode = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
225: PCI_PMCSR_STATE_MASK;
226: if (pmode == PCI_PMCSR_STATE_D3) {
227: /*
228: * The card has lost all configuration data in
229: * this state, so punt.
230: */
231: printf(": unable to wake up from power state D3\n");
232: return;
233: }
234: if (pmode != 0) {
235: printf(": waking up from power state D%d\n", pmode);
236: pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
237: PCI_PMCSR_STATE_D0);
238: }
239: }
240:
241: /*
242: * Map and establish our interrupt.
243: */
244: if (pci_intr_map(pa, &ih)) {
245: printf(": unable to map interrupt\n");
246: goto fail_0;
247: }
248: intrstr = pci_intr_string(pc, ih);
249: sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, stge_intr, sc,
250: sc->sc_dev.dv_xname);
251: if (sc->sc_ih == NULL) {
252: printf(": unable to establish interrupt");
253: if (intrstr != NULL)
254: printf(" at %s", intrstr);
255: printf("\n");
256: goto fail_0;
257: }
258: printf(": %s", intrstr);
259:
260: /*
261: * Allocate the control data structures, and create and load the
262: * DMA map for it.
263: */
264: if ((error = bus_dmamem_alloc(sc->sc_dmat,
265: sizeof(struct stge_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
266: 0)) != 0) {
267: printf("%s: unable to allocate control data, error = %d\n",
268: sc->sc_dev.dv_xname, error);
269: goto fail_0;
270: }
271:
272: if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
273: sizeof(struct stge_control_data), (caddr_t *)&sc->sc_control_data,
274: BUS_DMA_COHERENT)) != 0) {
275: printf("%s: unable to map control data, error = %d\n",
276: sc->sc_dev.dv_xname, error);
277: goto fail_1;
278: }
279:
280: if ((error = bus_dmamap_create(sc->sc_dmat,
281: sizeof(struct stge_control_data), 1,
282: sizeof(struct stge_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
283: printf("%s: unable to create control data DMA map, "
284: "error = %d\n", sc->sc_dev.dv_xname, error);
285: goto fail_2;
286: }
287:
288: if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
289: sc->sc_control_data, sizeof(struct stge_control_data), NULL,
290: 0)) != 0) {
291: printf("%s: unable to load control data DMA map, error = %d\n",
292: sc->sc_dev.dv_xname, error);
293: goto fail_3;
294: }
295:
296: /*
297: * Create the transmit buffer DMA maps. Note that rev B.3
298: * and earlier seem to have a bug regarding multi-fragment
299: * packets. We need to limit the number of Tx segments on
300: * such chips to 1.
301: */
302: for (i = 0; i < STGE_NTXDESC; i++) {
303: if ((error = bus_dmamap_create(sc->sc_dmat,
304: STGE_JUMBO_FRAMELEN, STGE_NTXFRAGS, MCLBYTES, 0, 0,
305: &sc->sc_txsoft[i].ds_dmamap)) != 0) {
306: printf("%s: unable to create tx DMA map %d, "
307: "error = %d\n", sc->sc_dev.dv_xname, i, error);
308: goto fail_4;
309: }
310: }
311:
312: /*
313: * Create the receive buffer DMA maps.
314: */
315: for (i = 0; i < STGE_NRXDESC; i++) {
316: if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
317: MCLBYTES, 0, 0, &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
318: printf("%s: unable to create rx DMA map %d, "
319: "error = %d\n", sc->sc_dev.dv_xname, i, error);
320: goto fail_5;
321: }
322: sc->sc_rxsoft[i].ds_mbuf = NULL;
323: }
324:
325: /*
326: * Determine if we're copper or fiber. It affects how we
327: * reset the card.
328: */
329: if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia)
330: sc->sc_usefiber = 1;
331: else
332: sc->sc_usefiber = 0;
333:
334: /*
335: * Reset the chip to a known state.
336: */
337: stge_reset(sc);
338:
339: /*
340: * Reading the station address from the EEPROM doesn't seem
341: * to work, at least on my sample boards. Instead, since
342: * the reset sequence does AutoInit, read it from the station
343: * address registers. For Sundance 1023 you can only read it
344: * from EEPROM.
345: */
346: if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_SUNDANCE_ST1023) {
347: sc->sc_arpcom.ac_enaddr[0] = CSR_READ_2(sc,
348: STGE_StationAddress0) & 0xff;
349: sc->sc_arpcom.ac_enaddr[1] = CSR_READ_2(sc,
350: STGE_StationAddress0) >> 8;
351: sc->sc_arpcom.ac_enaddr[2] = CSR_READ_2(sc,
352: STGE_StationAddress1) & 0xff;
353: sc->sc_arpcom.ac_enaddr[3] = CSR_READ_2(sc,
354: STGE_StationAddress1) >> 8;
355: sc->sc_arpcom.ac_enaddr[4] = CSR_READ_2(sc,
356: STGE_StationAddress2) & 0xff;
357: sc->sc_arpcom.ac_enaddr[5] = CSR_READ_2(sc,
358: STGE_StationAddress2) >> 8;
359: sc->sc_stge1023 = 0;
360: } else {
361: uint16_t myaddr[ETHER_ADDR_LEN / 2];
362: for (i = 0; i <ETHER_ADDR_LEN / 2; i++) {
363: stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i,
364: &myaddr[i]);
365: myaddr[i] = letoh16(myaddr[i]);
366: }
367: (void)memcpy(sc->sc_arpcom.ac_enaddr, myaddr,
368: sizeof(sc->sc_arpcom.ac_enaddr));
369: sc->sc_stge1023 = 1;
370: }
371:
372: printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
373:
374: /*
375: * Read some important bits from the PhyCtrl register.
376: */
377: sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) &
378: (PC_PhyDuplexPolarity | PC_PhyLnkPolarity);
379:
380: /*
381: * Initialize our media structures and probe the MII.
382: */
383: sc->sc_mii.mii_ifp = ifp;
384: sc->sc_mii.mii_readreg = stge_mii_readreg;
385: sc->sc_mii.mii_writereg = stge_mii_writereg;
386: sc->sc_mii.mii_statchg = stge_mii_statchg;
387: ifmedia_init(&sc->sc_mii.mii_media, 0, stge_mediachange,
388: stge_mediastatus);
389: mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
390: MII_OFFSET_ANY, MIIF_DOPAUSE);
391: if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
392: ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
393: ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
394: } else
395: ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
396:
397: ifp = &sc->sc_arpcom.ac_if;
398: strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname);
399: ifp->if_softc = sc;
400: ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
401: ifp->if_ioctl = stge_ioctl;
402: ifp->if_start = stge_start;
403: ifp->if_watchdog = stge_watchdog;
404: #ifdef STGE_JUMBO
405: ifp->if_hardmtu = STGE_JUMBO_MTU;
406: #endif
407: IFQ_SET_MAXLEN(&ifp->if_snd, STGE_NTXDESC - 1);
408: IFQ_SET_READY(&ifp->if_snd);
409:
410: ifp->if_capabilities = IFCAP_VLAN_MTU;
411:
412: /*
413: * The manual recommends disabling early transmit, so we
414: * do. It's disabled anyway, if using IP checksumming,
415: * since the entire packet must be in the FIFO in order
416: * for the chip to perform the checksum.
417: */
418: sc->sc_txthresh = 0x0fff;
419:
420: /*
421: * Disable MWI if the PCI layer tells us to.
422: */
423: sc->sc_DMACtrl = 0;
424: #ifdef fake
425: if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0)
426: sc->sc_DMACtrl |= DMAC_MWIDisable;
427: #endif
428:
429: #ifdef STGE_CHECKSUM
430: /*
431: * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
432: */
433: sc->sc_arpcom.ac_if.if_capabilities |= IFCAP_CSUM_IPv4 |
434: IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
435: #endif
436:
437: /*
438: * Attach the interface.
439: */
440: if_attach(ifp);
441: ether_ifattach(ifp);
442:
443: /*
444: * Make sure the interface is shutdown during reboot.
445: */
446: sc->sc_sdhook = shutdownhook_establish(stge_shutdown, sc);
447: if (sc->sc_sdhook == NULL)
448: printf("%s: WARNING: unable to establish shutdown hook\n",
449: sc->sc_dev.dv_xname);
450: return;
451:
452: /*
453: * Free any resources we've allocated during the failed attach
454: * attempt. Do this in reverse order and fall through.
455: */
456: fail_5:
457: for (i = 0; i < STGE_NRXDESC; i++) {
458: if (sc->sc_rxsoft[i].ds_dmamap != NULL)
459: bus_dmamap_destroy(sc->sc_dmat,
460: sc->sc_rxsoft[i].ds_dmamap);
461: }
462: fail_4:
463: for (i = 0; i < STGE_NTXDESC; i++) {
464: if (sc->sc_txsoft[i].ds_dmamap != NULL)
465: bus_dmamap_destroy(sc->sc_dmat,
466: sc->sc_txsoft[i].ds_dmamap);
467: }
468: bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
469: fail_3:
470: bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
471: fail_2:
472: bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
473: sizeof(struct stge_control_data));
474: fail_1:
475: bus_dmamem_free(sc->sc_dmat, &seg, rseg);
476: fail_0:
477: bus_space_unmap(sc->sc_st, sc->sc_sh, iosize);
478: return;
479: }
480:
481: /*
482: * stge_shutdown:
483: *
484: * Make sure the interface is stopped at reboot time.
485: */
486: void
487: stge_shutdown(void *arg)
488: {
489: struct stge_softc *sc = arg;
490:
491: stge_stop(&sc->sc_arpcom.ac_if, 1);
492: }
493:
494: static void
495: stge_dma_wait(struct stge_softc *sc)
496: {
497: int i;
498:
499: for (i = 0; i < STGE_TIMEOUT; i++) {
500: delay(2);
501: if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0)
502: break;
503: }
504:
505: if (i == STGE_TIMEOUT)
506: printf("%s: DMA wait timed out\n", sc->sc_dev.dv_xname);
507: }
508:
509: /*
510: * stge_start: [ifnet interface function]
511: *
512: * Start packet transmission on the interface.
513: */
514: void
515: stge_start(struct ifnet *ifp)
516: {
517: struct stge_softc *sc = ifp->if_softc;
518: struct mbuf *m0;
519: struct stge_descsoft *ds;
520: struct stge_tfd *tfd;
521: bus_dmamap_t dmamap;
522: int error, firsttx, nexttx, opending, seg, totlen;
523: uint64_t csum_flags = 0;
524:
525: if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
526: return;
527:
528: /*
529: * Remember the previous number of pending transmissions
530: * and the first descriptor we will use.
531: */
532: opending = sc->sc_txpending;
533: firsttx = STGE_NEXTTX(sc->sc_txlast);
534:
535: /*
536: * Loop through the send queue, setting up transmit descriptors
537: * until we drain the queue, or use up all available transmit
538: * descriptors.
539: */
540: for (;;) {
541: /*
542: * Grab a packet off the queue.
543: */
544: IFQ_POLL(&ifp->if_snd, m0);
545: if (m0 == NULL)
546: break;
547:
548: /*
549: * Leave one unused descriptor at the end of the
550: * list to prevent wrapping completely around.
551: */
552: if (sc->sc_txpending == (STGE_NTXDESC - 1))
553: break;
554:
555: /*
556: * Get the last and next available transmit descriptor.
557: */
558: nexttx = STGE_NEXTTX(sc->sc_txlast);
559: tfd = &sc->sc_txdescs[nexttx];
560: ds = &sc->sc_txsoft[nexttx];
561:
562: dmamap = ds->ds_dmamap;
563:
564: /*
565: * Load the DMA map. If this fails, the packet either
566: * didn't fit in the alloted number of segments, or we
567: * were short on resources. For the too-many-segments
568: * case, we simply report an error and drop the packet,
569: * since we can't sanely copy a jumbo packet to a single
570: * buffer.
571: */
572: error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
573: BUS_DMA_NOWAIT);
574: if (error) {
575: if (error == EFBIG) {
576: printf("%s: Tx packet consumes too many "
577: "DMA segments (%u), dropping...\n",
578: sc->sc_dev.dv_xname, dmamap->dm_nsegs);
579: IFQ_DEQUEUE(&ifp->if_snd, m0);
580: m_freem(m0);
581: continue;
582: }
583: /*
584: * Short on resources, just stop for now.
585: */
586: break;
587: }
588:
589: IFQ_DEQUEUE(&ifp->if_snd, m0);
590:
591: /*
592: * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
593: */
594:
595: /* Sync the DMA map. */
596: bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
597: BUS_DMASYNC_PREWRITE);
598:
599: /* Initialize the fragment list. */
600: for (totlen = 0, seg = 0; seg < dmamap->dm_nsegs; seg++) {
601: tfd->tfd_frags[seg].frag_word0 =
602: htole64(FRAG_ADDR(dmamap->dm_segs[seg].ds_addr) |
603: FRAG_LEN(dmamap->dm_segs[seg].ds_len));
604: totlen += dmamap->dm_segs[seg].ds_len;
605: }
606:
607: #ifdef STGE_CHECKSUM
608: /*
609: * Initialize checksumming flags in the descriptor.
610: * Byte-swap constants so the compiler can optimize.
611: */
612: if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
613: csum_flags |= TFD_IPChecksumEnable;
614:
615: if (m0->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
616: csum_flags |= TFD_TCPChecksumEnable;
617: else if (m0->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
618: csum_flags |= TFD_UDPChecksumEnable;
619: #endif
620:
621: /*
622: * Initialize the descriptor and give it to the chip.
623: */
624: tfd->tfd_control = htole64(TFD_FrameId(nexttx) |
625: TFD_WordAlign(/*totlen & */3) |
626: TFD_FragCount(seg) | csum_flags |
627: (((nexttx & STGE_TXINTR_SPACING_MASK) == 0) ?
628: TFD_TxDMAIndicate : 0));
629:
630: /* Sync the descriptor. */
631: STGE_CDTXSYNC(sc, nexttx,
632: BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
633:
634: /*
635: * Kick the transmit DMA logic.
636: */
637: CSR_WRITE_4(sc, STGE_DMACtrl,
638: sc->sc_DMACtrl | DMAC_TxDMAPollNow);
639:
640: /*
641: * Store a pointer to the packet so we can free it later.
642: */
643: ds->ds_mbuf = m0;
644:
645: /* Advance the tx pointer. */
646: sc->sc_txpending++;
647: sc->sc_txlast = nexttx;
648:
649: #if NBPFILTER > 0
650: /*
651: * Pass the packet to any BPF listeners.
652: */
653: if (ifp->if_bpf)
654: bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
655: #endif /* NBPFILTER > 0 */
656: }
657:
658: if (sc->sc_txpending == (STGE_NTXDESC - 1)) {
659: /* No more slots left; notify upper layer. */
660: ifp->if_flags |= IFF_OACTIVE;
661: }
662:
663: if (sc->sc_txpending != opending) {
664: /*
665: * We enqueued packets. If the transmitter was idle,
666: * reset the txdirty pointer.
667: */
668: if (opending == 0)
669: sc->sc_txdirty = firsttx;
670:
671: /* Set a watchdog timer in case the chip flakes out. */
672: ifp->if_timer = 5;
673: }
674: }
675:
676: /*
677: * stge_watchdog: [ifnet interface function]
678: *
679: * Watchdog timer handler.
680: */
681: void
682: stge_watchdog(struct ifnet *ifp)
683: {
684: struct stge_softc *sc = ifp->if_softc;
685:
686: /*
687: * Sweep up first, since we don't interrupt every frame.
688: */
689: stge_txintr(sc);
690: if (sc->sc_txpending != 0) {
691: printf("%s: device timeout\n", sc->sc_dev.dv_xname);
692: ifp->if_oerrors++;
693:
694: (void) stge_init(ifp);
695:
696: /* Try to get more packets going. */
697: stge_start(ifp);
698: }
699: }
700:
701: /*
702: * stge_ioctl: [ifnet interface function]
703: *
704: * Handle control requests from the operator.
705: */
706: int
707: stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
708: {
709: struct stge_softc *sc = ifp->if_softc;
710: struct ifreq *ifr = (struct ifreq *)data;
711: struct ifaddr *ifa = (struct ifaddr *)data;
712: int s, error;
713:
714: s = splnet();
715:
716: if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
717: /* Try to get more packets going. */
718: stge_start(ifp);
719:
720: splx(s);
721: return (error);
722: }
723:
724: switch (cmd) {
725: case SIOCSIFADDR:
726: ifp->if_flags |= IFF_UP;
727: if (!(ifp->if_flags & IFF_RUNNING))
728: stge_init(ifp);
729:
730: #ifdef INET
731: if (ifa->ifa_addr->sa_family == AF_INET)
732: arp_ifinit(&sc->sc_arpcom, ifa);
733: #endif
734: break;
735:
736: case SIOCSIFMTU:
737: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
738: error = EINVAL;
739: else if (ifp->if_mtu != ifr->ifr_mtu)
740: ifp->if_mtu = ifr->ifr_mtu;
741: break;
742:
743: case SIOCSIFFLAGS:
744: if (ifp->if_flags & IFF_UP) {
745: if (ifp->if_flags & IFF_RUNNING &&
746: (ifp->if_flags ^ sc->stge_if_flags) &
747: IFF_PROMISC) {
748: stge_set_filter(sc);
749: } else {
750: if (!(ifp->if_flags & IFF_RUNNING))
751: stge_init(ifp);
752: }
753: } else {
754: if (ifp->if_flags & IFF_RUNNING)
755: stge_stop(ifp, 1);
756: }
757: sc->stge_if_flags = ifp->if_flags;
758: break;
759:
760: case SIOCADDMULTI:
761: case SIOCDELMULTI:
762: error = (cmd == SIOCADDMULTI) ?
763: ether_addmulti(ifr, &sc->sc_arpcom) :
764: ether_delmulti(ifr, &sc->sc_arpcom);
765:
766: if (error == ENETRESET) {
767: /*
768: * Multicast list has changed; set the hardware
769: * filter accordingly.
770: */
771: if (ifp->if_flags & IFF_RUNNING)
772: stge_set_filter(sc);
773: error = 0;
774: }
775: break;
776:
777: case SIOCSIFMEDIA:
778: case SIOCGIFMEDIA:
779: error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
780: break;
781:
782: default:
783: error = ENOTTY;
784: }
785:
786: /* Try to get more packets going. */
787: stge_start(ifp);
788:
789: splx(s);
790: return (error);
791: }
792:
793: /*
794: * stge_intr:
795: *
796: * Interrupt service routine.
797: */
798: int
799: stge_intr(void *arg)
800: {
801: struct stge_softc *sc = arg;
802: struct ifnet *ifp = &sc->sc_arpcom.ac_if;
803: uint32_t txstat;
804: int wantinit;
805: uint16_t isr;
806:
807: if ((CSR_READ_2(sc, STGE_IntStatus) & IS_InterruptStatus) == 0)
808: return (0);
809:
810: for (wantinit = 0; wantinit == 0;) {
811: isr = CSR_READ_2(sc, STGE_IntStatusAck);
812: if ((isr & sc->sc_IntEnable) == 0)
813: break;
814:
815: /* Host interface errors. */
816: if (isr & IS_HostError) {
817: printf("%s: Host interface error\n",
818: sc->sc_dev.dv_xname);
819: wantinit = 1;
820: continue;
821: }
822:
823: /* Receive interrupts. */
824: if (isr & (IS_RxDMAComplete|IS_RFDListEnd)) {
825: stge_rxintr(sc);
826: if (isr & IS_RFDListEnd) {
827: printf("%s: receive ring overflow\n",
828: sc->sc_dev.dv_xname);
829: /*
830: * XXX Should try to recover from this
831: * XXX more gracefully.
832: */
833: wantinit = 1;
834: }
835: }
836:
837: /* Transmit interrupts. */
838: if (isr & (IS_TxDMAComplete|IS_TxComplete))
839: stge_txintr(sc);
840:
841: /* Statistics overflow. */
842: if (isr & IS_UpdateStats)
843: stge_stats_update(sc);
844:
845: /* Transmission errors. */
846: if (isr & IS_TxComplete) {
847: for (;;) {
848: txstat = CSR_READ_4(sc, STGE_TxStatus);
849: if ((txstat & TS_TxComplete) == 0)
850: break;
851: if (txstat & TS_TxUnderrun) {
852: sc->sc_txthresh++;
853: if (sc->sc_txthresh > 0x0fff)
854: sc->sc_txthresh = 0x0fff;
855: printf("%s: transmit underrun, new "
856: "threshold: %d bytes\n",
857: sc->sc_dev.dv_xname,
858: sc->sc_txthresh << 5);
859: }
860: if (txstat & TS_MaxCollisions)
861: printf("%s: excessive collisions\n",
862: sc->sc_dev.dv_xname);
863: }
864: wantinit = 1;
865: }
866:
867: }
868:
869: if (wantinit)
870: stge_init(ifp);
871:
872: CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
873:
874: /* Try to get more packets going. */
875: stge_start(ifp);
876:
877: return (1);
878: }
879:
880: /*
881: * stge_txintr:
882: *
883: * Helper; handle transmit interrupts.
884: */
885: void
886: stge_txintr(struct stge_softc *sc)
887: {
888: struct ifnet *ifp = &sc->sc_arpcom.ac_if;
889: struct stge_descsoft *ds;
890: uint64_t control;
891: int i;
892:
893: ifp->if_flags &= ~IFF_OACTIVE;
894:
895: /*
896: * Go through our Tx list and free mbufs for those
897: * frames which have been transmitted.
898: */
899: for (i = sc->sc_txdirty; sc->sc_txpending != 0;
900: i = STGE_NEXTTX(i), sc->sc_txpending--) {
901: ds = &sc->sc_txsoft[i];
902:
903: STGE_CDTXSYNC(sc, i,
904: BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
905:
906: control = letoh64(sc->sc_txdescs[i].tfd_control);
907: if ((control & TFD_TFDDone) == 0)
908: break;
909:
910: bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
911: 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
912: bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
913: m_freem(ds->ds_mbuf);
914: ds->ds_mbuf = NULL;
915: }
916:
917: /* Update the dirty transmit buffer pointer. */
918: sc->sc_txdirty = i;
919:
920: /*
921: * If there are no more pending transmissions, cancel the watchdog
922: * timer.
923: */
924: if (sc->sc_txpending == 0)
925: ifp->if_timer = 0;
926: }
927:
928: /*
929: * stge_rxintr:
930: *
931: * Helper; handle receive interrupts.
932: */
933: void
934: stge_rxintr(struct stge_softc *sc)
935: {
936: struct ifnet *ifp = &sc->sc_arpcom.ac_if;
937: struct stge_descsoft *ds;
938: struct mbuf *m, *tailm;
939: uint64_t status;
940: int i, len;
941:
942: for (i = sc->sc_rxptr;; i = STGE_NEXTRX(i)) {
943: ds = &sc->sc_rxsoft[i];
944:
945: STGE_CDRXSYNC(sc, i,
946: BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
947:
948: status = letoh64(sc->sc_rxdescs[i].rfd_status);
949:
950: if ((status & RFD_RFDDone) == 0)
951: break;
952:
953: if (__predict_false(sc->sc_rxdiscard)) {
954: STGE_INIT_RXDESC(sc, i);
955: if (status & RFD_FrameEnd) {
956: /* Reset our state. */
957: sc->sc_rxdiscard = 0;
958: }
959: continue;
960: }
961:
962: bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
963: ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
964:
965: m = ds->ds_mbuf;
966:
967: /*
968: * Add a new receive buffer to the ring.
969: */
970: if (stge_add_rxbuf(sc, i) != 0) {
971: /*
972: * Failed, throw away what we've done so
973: * far, and discard the rest of the packet.
974: */
975: ifp->if_ierrors++;
976: bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
977: ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
978: STGE_INIT_RXDESC(sc, i);
979: if ((status & RFD_FrameEnd) == 0)
980: sc->sc_rxdiscard = 1;
981: if (sc->sc_rxhead != NULL)
982: m_freem(sc->sc_rxhead);
983: STGE_RXCHAIN_RESET(sc);
984: continue;
985: }
986:
987: #ifdef DIAGNOSTIC
988: if (status & RFD_FrameStart) {
989: KASSERT(sc->sc_rxhead == NULL);
990: KASSERT(sc->sc_rxtailp == &sc->sc_rxhead);
991: }
992: #endif
993:
994: STGE_RXCHAIN_LINK(sc, m);
995:
996: /*
997: * If this is not the end of the packet, keep
998: * looking.
999: */
1000: if ((status & RFD_FrameEnd) == 0) {
1001: sc->sc_rxlen += m->m_len;
1002: continue;
1003: }
1004:
1005: /*
1006: * Okay, we have the entire packet now...
1007: */
1008: *sc->sc_rxtailp = NULL;
1009: m = sc->sc_rxhead;
1010: tailm = sc->sc_rxtail;
1011:
1012: STGE_RXCHAIN_RESET(sc);
1013:
1014: /*
1015: * If the packet had an error, drop it. Note we
1016: * count the error later in the periodic stats update.
1017: */
1018: if (status & (RFD_RxFIFOOverrun | RFD_RxRuntFrame |
1019: RFD_RxAlignmentError | RFD_RxFCSError |
1020: RFD_RxLengthError)) {
1021: m_freem(m);
1022: continue;
1023: }
1024:
1025: /*
1026: * No errors.
1027: *
1028: * Note we have configured the chip to not include
1029: * the CRC at the end of the packet.
1030: */
1031: len = RFD_RxDMAFrameLen(status);
1032: tailm->m_len = len - sc->sc_rxlen;
1033:
1034: /*
1035: * If the packet is small enough to fit in a
1036: * single header mbuf, allocate one and copy
1037: * the data into it. This greatly reduces
1038: * memory consumption when we receive lots
1039: * of small packets.
1040: */
1041: if (stge_copy_small != 0 && len <= (MHLEN - 2)) {
1042: struct mbuf *nm;
1043: MGETHDR(nm, M_DONTWAIT, MT_DATA);
1044: if (nm == NULL) {
1045: ifp->if_ierrors++;
1046: m_freem(m);
1047: continue;
1048: }
1049: nm->m_data += 2;
1050: nm->m_pkthdr.len = nm->m_len = len;
1051: m_copydata(m, 0, len, mtod(nm, caddr_t));
1052: m_freem(m);
1053: m = nm;
1054: }
1055:
1056: /*
1057: * Set the incoming checksum information for the packet.
1058: */
1059: if (status & RFD_IPDetected) {
1060: if (!(status & RFD_IPError))
1061: m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1062: if ((status & RFD_TCPDetected) &&
1063: (!(status & RFD_TCPError)))
1064: m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
1065: else if ((status & RFD_UDPDetected) &&
1066: (!(status & RFD_UDPError)))
1067: m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
1068: }
1069:
1070: m->m_pkthdr.rcvif = ifp;
1071: m->m_pkthdr.len = len;
1072:
1073: #if NBPFILTER > 0
1074: /*
1075: * Pass this up to any BPF listeners, but only
1076: * pass if up the stack if it's for us.
1077: */
1078: if (ifp->if_bpf)
1079: bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1080: #endif /* NBPFILTER > 0 */
1081:
1082: /* Pass it on. */
1083: ether_input_mbuf(ifp, m);
1084: }
1085:
1086: /* Update the receive pointer. */
1087: sc->sc_rxptr = i;
1088: }
1089:
1090: /*
1091: * stge_tick:
1092: *
1093: * One second timer, used to tick the MII.
1094: */
1095: void
1096: stge_tick(void *arg)
1097: {
1098: struct stge_softc *sc = arg;
1099: int s;
1100:
1101: s = splnet();
1102: mii_tick(&sc->sc_mii);
1103: stge_stats_update(sc);
1104: splx(s);
1105:
1106: timeout_add(&sc->sc_timeout, hz);
1107: }
1108:
1109: /*
1110: * stge_stats_update:
1111: *
1112: * Read the TC9021 statistics counters.
1113: */
1114: void
1115: stge_stats_update(struct stge_softc *sc)
1116: {
1117: struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1118:
1119: (void) CSR_READ_4(sc, STGE_OctetRcvOk);
1120:
1121: ifp->if_ipackets +=
1122: CSR_READ_4(sc, STGE_FramesRcvdOk);
1123:
1124: ifp->if_ierrors +=
1125: (u_int) CSR_READ_2(sc, STGE_FramesLostRxErrors);
1126:
1127: (void) CSR_READ_4(sc, STGE_OctetXmtdOk);
1128:
1129: ifp->if_opackets +=
1130: CSR_READ_4(sc, STGE_FramesXmtdOk);
1131:
1132: ifp->if_collisions +=
1133: CSR_READ_4(sc, STGE_LateCollisions) +
1134: CSR_READ_4(sc, STGE_MultiColFrames) +
1135: CSR_READ_4(sc, STGE_SingleColFrames);
1136:
1137: ifp->if_oerrors +=
1138: (u_int) CSR_READ_2(sc, STGE_FramesAbortXSColls) +
1139: (u_int) CSR_READ_2(sc, STGE_FramesWEXDeferal);
1140: }
1141:
1142: /*
1143: * stge_reset:
1144: *
1145: * Perform a soft reset on the TC9021.
1146: */
1147: void
1148: stge_reset(struct stge_softc *sc)
1149: {
1150: uint32_t ac;
1151: int i;
1152:
1153: ac = CSR_READ_4(sc, STGE_AsicCtrl);
1154:
1155: /*
1156: * Only assert RstOut if we're fiber. We need GMII clocks
1157: * to be present in order for the reset to complete on fiber
1158: * cards.
1159: */
1160: CSR_WRITE_4(sc, STGE_AsicCtrl,
1161: ac | AC_GlobalReset | AC_RxReset | AC_TxReset |
1162: AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit |
1163: (sc->sc_usefiber ? AC_RstOut : 0));
1164:
1165: delay(50000);
1166:
1167: for (i = 0; i < STGE_TIMEOUT; i++) {
1168: delay(5000);
1169: if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0)
1170: break;
1171: }
1172:
1173: if (i == STGE_TIMEOUT)
1174: printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
1175:
1176: delay(1000);
1177: }
1178:
1179: /*
1180: * stge_init: [ ifnet interface function ]
1181: *
1182: * Initialize the interface. Must be called at splnet().
1183: */
1184: int
1185: stge_init(struct ifnet *ifp)
1186: {
1187: struct stge_softc *sc = ifp->if_softc;
1188: struct stge_descsoft *ds;
1189: int i, error = 0;
1190:
1191: /*
1192: * Cancel any pending I/O.
1193: */
1194: stge_stop(ifp, 0);
1195:
1196: /*
1197: * Reset the chip to a known state.
1198: */
1199: stge_reset(sc);
1200:
1201: /*
1202: * Initialize the transmit descriptor ring.
1203: */
1204: memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1205: for (i = 0; i < STGE_NTXDESC; i++) {
1206: sc->sc_txdescs[i].tfd_next = htole64(
1207: STGE_CDTXADDR(sc, STGE_NEXTTX(i)));
1208: sc->sc_txdescs[i].tfd_control = htole64(TFD_TFDDone);
1209: }
1210: sc->sc_txpending = 0;
1211: sc->sc_txdirty = 0;
1212: sc->sc_txlast = STGE_NTXDESC - 1;
1213:
1214: /*
1215: * Initialize the receive descriptor and receive job
1216: * descriptor rings.
1217: */
1218: for (i = 0; i < STGE_NRXDESC; i++) {
1219: ds = &sc->sc_rxsoft[i];
1220: if (ds->ds_mbuf == NULL) {
1221: if ((error = stge_add_rxbuf(sc, i)) != 0) {
1222: printf("%s: unable to allocate or map rx "
1223: "buffer %d, error = %d\n",
1224: sc->sc_dev.dv_xname, i, error);
1225: /*
1226: * XXX Should attempt to run with fewer receive
1227: * XXX buffers instead of just failing.
1228: */
1229: stge_rxdrain(sc);
1230: goto out;
1231: }
1232: } else
1233: STGE_INIT_RXDESC(sc, i);
1234: }
1235: sc->sc_rxptr = 0;
1236: sc->sc_rxdiscard = 0;
1237: STGE_RXCHAIN_RESET(sc);
1238:
1239: /* Set the station address. */
1240: for (i = 0; i < 6; i++)
1241: CSR_WRITE_1(sc, STGE_StationAddress0 + i,
1242: sc->sc_arpcom.ac_enaddr[i]);
1243:
1244: /*
1245: * Set the statistics masks. Disable all the RMON stats,
1246: * and disable selected stats in the non-RMON stats registers.
1247: */
1248: CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff);
1249: CSR_WRITE_4(sc, STGE_StatisticsMask,
1250: (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) |
1251: (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) |
1252: (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) |
1253: (1U << 21));
1254:
1255: /* Set up the receive filter. */
1256: stge_set_filter(sc);
1257:
1258: /*
1259: * Give the transmit and receive ring to the chip.
1260: */
1261: CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0); /* NOTE: 32-bit DMA */
1262: CSR_WRITE_4(sc, STGE_TFDListPtrLo,
1263: STGE_CDTXADDR(sc, sc->sc_txdirty));
1264:
1265: CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0); /* NOTE: 32-bit DMA */
1266: CSR_WRITE_4(sc, STGE_RFDListPtrLo,
1267: STGE_CDRXADDR(sc, sc->sc_rxptr));
1268:
1269: /*
1270: * Initialize the Tx auto-poll period. It's OK to make this number
1271: * large (255 is the max, but we use 127) -- we explicitly kick the
1272: * transmit engine when there's actually a packet.
1273: */
1274: CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127);
1275:
1276: /* ..and the Rx auto-poll period. */
1277: CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 64);
1278:
1279: /* Initialize the Tx start threshold. */
1280: CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh);
1281:
1282: /* RX DMA thresholds, from linux */
1283: CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30);
1284: CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30);
1285:
1286: /* Rx early threhold, from Linux */
1287: CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff);
1288:
1289: /* Tx DMA thresholds, from Linux */
1290: CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30);
1291: CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04);
1292:
1293: /*
1294: * Initialize the Rx DMA interrupt control register. We
1295: * request an interrupt after every incoming packet, but
1296: * defer it for 32us (64 * 512 ns). When the number of
1297: * interrupts pending reaches 8, we stop deferring the
1298: * interrupt, and signal it immediately.
1299: */
1300: CSR_WRITE_4(sc, STGE_RxDMAIntCtrl,
1301: RDIC_RxFrameCount(8) | RDIC_RxDMAWaitTime(512));
1302:
1303: /*
1304: * Initialize the interrupt mask.
1305: */
1306: sc->sc_IntEnable = IS_HostError | IS_TxComplete | IS_UpdateStats |
1307: IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd;
1308: CSR_WRITE_2(sc, STGE_IntStatus, 0xffff);
1309: CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable);
1310:
1311: /*
1312: * Configure the DMA engine.
1313: * XXX Should auto-tune TxBurstLimit.
1314: */
1315: CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl |
1316: DMAC_TxBurstLimit(3));
1317:
1318: /*
1319: * Send a PAUSE frame when we reach 29,696 bytes in the Rx
1320: * FIFO, and send an un-PAUSE frame when the FIFO is totally
1321: * empty again.
1322: */
1323: CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16);
1324: CSR_WRITE_2(sc, STGE_FlowOffThresh, 0);
1325:
1326: /*
1327: * Set the maximum frame size.
1328: */
1329: #ifdef STGE_JUMBO
1330: CSR_WRITE_2(sc, STGE_MaxFrameSize, STGE_JUMBO_FRAMELEN);
1331: #else
1332: CSR_WRITE_2(sc, STGE_MaxFrameSize, ETHER_MAX_LEN);
1333: #endif
1334:
1335: /*
1336: * Initialize MacCtrl -- do it before setting the media,
1337: * as setting the media will actually program the register.
1338: *
1339: * Note: We have to poke the IFS value before poking
1340: * anything else.
1341: */
1342: sc->sc_MACCtrl = MC_IFSSelect(0);
1343: CSR_WRITE_4(sc, STGE_MACCtrl, sc->sc_MACCtrl);
1344: sc->sc_MACCtrl |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable;
1345:
1346: if (sc->sc_rev >= 6) { /* >= B.2 */
1347: /* Multi-frag frame bug work-around. */
1348: CSR_WRITE_2(sc, STGE_DebugCtrl,
1349: CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200);
1350:
1351: /* Tx Poll Now bug work-around. */
1352: CSR_WRITE_2(sc, STGE_DebugCtrl,
1353: CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010);
1354: /* XXX ? from linux */
1355: CSR_WRITE_2(sc, STGE_DebugCtrl,
1356: CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020);
1357: }
1358:
1359: /*
1360: * Set the current media.
1361: */
1362: mii_mediachg(&sc->sc_mii);
1363:
1364: /*
1365: * Start the one second MII clock.
1366: */
1367: timeout_add(&sc->sc_timeout, hz);
1368:
1369: /*
1370: * ...all done!
1371: */
1372: ifp->if_flags |= IFF_RUNNING;
1373: ifp->if_flags &= ~IFF_OACTIVE;
1374:
1375: out:
1376: if (error)
1377: printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1378: return (error);
1379: }
1380:
1381: /*
1382: * stge_drain:
1383: *
1384: * Drain the receive queue.
1385: */
1386: void
1387: stge_rxdrain(struct stge_softc *sc)
1388: {
1389: struct stge_descsoft *ds;
1390: int i;
1391:
1392: for (i = 0; i < STGE_NRXDESC; i++) {
1393: ds = &sc->sc_rxsoft[i];
1394: if (ds->ds_mbuf != NULL) {
1395: bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1396: ds->ds_mbuf->m_next = NULL;
1397: m_freem(ds->ds_mbuf);
1398: ds->ds_mbuf = NULL;
1399: }
1400: }
1401: }
1402:
1403: /*
1404: * stge_stop: [ ifnet interface function ]
1405: *
1406: * Stop transmission on the interface.
1407: */
1408: void
1409: stge_stop(struct ifnet *ifp, int disable)
1410: {
1411: struct stge_softc *sc = ifp->if_softc;
1412: struct stge_descsoft *ds;
1413: int i;
1414:
1415: /*
1416: * Stop the one second clock.
1417: */
1418: timeout_del(&sc->sc_timeout);
1419:
1420: /*
1421: * Mark the interface down and cancel the watchdog timer.
1422: */
1423: ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1424: ifp->if_timer = 0;
1425:
1426: /* Down the MII. */
1427: mii_down(&sc->sc_mii);
1428:
1429: /*
1430: * Disable interrupts.
1431: */
1432: CSR_WRITE_2(sc, STGE_IntEnable, 0);
1433:
1434: /*
1435: * Stop receiver, transmitter, and stats update.
1436: */
1437: CSR_WRITE_4(sc, STGE_MACCtrl,
1438: MC_StatisticsDisable | MC_TxDisable | MC_RxDisable);
1439:
1440: /*
1441: * Stop the transmit and receive DMA.
1442: */
1443: stge_dma_wait(sc);
1444: CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0);
1445: CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0);
1446: CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0);
1447: CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0);
1448:
1449: /*
1450: * Release any queued transmit buffers.
1451: */
1452: for (i = 0; i < STGE_NTXDESC; i++) {
1453: ds = &sc->sc_txsoft[i];
1454: if (ds->ds_mbuf != NULL) {
1455: bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1456: m_freem(ds->ds_mbuf);
1457: ds->ds_mbuf = NULL;
1458: }
1459: }
1460:
1461: if (disable)
1462: stge_rxdrain(sc);
1463: }
1464:
1465: static int
1466: stge_eeprom_wait(struct stge_softc *sc)
1467: {
1468: int i;
1469:
1470: for (i = 0; i < STGE_TIMEOUT; i++) {
1471: delay(1000);
1472: if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0)
1473: return (0);
1474: }
1475: return (1);
1476: }
1477:
1478: /*
1479: * stge_read_eeprom:
1480: *
1481: * Read data from the serial EEPROM.
1482: */
1483: void
1484: stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data)
1485: {
1486:
1487: if (stge_eeprom_wait(sc))
1488: printf("%s: EEPROM failed to come ready\n",
1489: sc->sc_dev.dv_xname);
1490:
1491: CSR_WRITE_2(sc, STGE_EepromCtrl,
1492: EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR));
1493: if (stge_eeprom_wait(sc))
1494: printf("%s: EEPROM read timed out\n",
1495: sc->sc_dev.dv_xname);
1496: *data = CSR_READ_2(sc, STGE_EepromData);
1497: }
1498:
1499: /*
1500: * stge_add_rxbuf:
1501: *
1502: * Add a receive buffer to the indicated descriptor.
1503: */
1504: int
1505: stge_add_rxbuf(struct stge_softc *sc, int idx)
1506: {
1507: struct stge_descsoft *ds = &sc->sc_rxsoft[idx];
1508: struct mbuf *m;
1509: int error;
1510:
1511: MGETHDR(m, M_DONTWAIT, MT_DATA);
1512: if (m == NULL)
1513: return (ENOBUFS);
1514:
1515: MCLGET(m, M_DONTWAIT);
1516: if ((m->m_flags & M_EXT) == 0) {
1517: m_freem(m);
1518: return (ENOBUFS);
1519: }
1520:
1521: m->m_data = m->m_ext.ext_buf + 2;
1522: m->m_len = MCLBYTES - 2;
1523:
1524: if (ds->ds_mbuf != NULL)
1525: bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1526:
1527: ds->ds_mbuf = m;
1528:
1529: error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1530: m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1531: if (error) {
1532: printf("%s: can't load rx DMA map %d, error = %d\n",
1533: sc->sc_dev.dv_xname, idx, error);
1534: panic("stge_add_rxbuf"); /* XXX */
1535: }
1536:
1537: bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1538: ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1539:
1540: STGE_INIT_RXDESC(sc, idx);
1541:
1542: return (0);
1543: }
1544:
1545: /*
1546: * stge_set_filter:
1547: *
1548: * Set up the receive filter.
1549: */
1550: void
1551: stge_set_filter(struct stge_softc *sc)
1552: {
1553: struct arpcom *ac = &sc->sc_arpcom;
1554: struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1555: struct ether_multi *enm;
1556: struct ether_multistep step;
1557: uint32_t crc;
1558: uint32_t mchash[2];
1559:
1560: sc->sc_ReceiveMode = RM_ReceiveUnicast;
1561: if (ifp->if_flags & IFF_BROADCAST)
1562: sc->sc_ReceiveMode |= RM_ReceiveBroadcast;
1563:
1564: /* XXX: ST1023 only works in promiscuous mode */
1565: if (sc->sc_stge1023)
1566: ifp->if_flags |= IFF_PROMISC;
1567:
1568: if (ifp->if_flags & IFF_PROMISC) {
1569: sc->sc_ReceiveMode |= RM_ReceiveAllFrames;
1570: goto allmulti;
1571: }
1572:
1573: /*
1574: * Set up the multicast address filter by passing all multicast
1575: * addresses through a CRC generator, and then using the low-order
1576: * 6 bits as an index into the 64 bit multicast hash table. The
1577: * high order bits select the register, while the rest of the bits
1578: * select the bit within the register.
1579: */
1580:
1581: memset(mchash, 0, sizeof(mchash));
1582:
1583: ETHER_FIRST_MULTI(step, ac, enm);
1584: if (enm == NULL)
1585: goto done;
1586:
1587: while (enm != NULL) {
1588: if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1589: /*
1590: * We must listen to a range of multicast addresses.
1591: * For now, just accept all multicasts, rather than
1592: * trying to set only those filter bits needed to match
1593: * the range. (At this time, the only use of address
1594: * ranges is for IP multicast routing, for which the
1595: * range is big enough to require all bits set.)
1596: */
1597: goto allmulti;
1598: }
1599:
1600: crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1601:
1602: /* Just want the 6 least significant bits. */
1603: crc &= 0x3f;
1604:
1605: /* Set the corresponding bit in the hash table. */
1606: mchash[crc >> 5] |= 1 << (crc & 0x1f);
1607:
1608: ETHER_NEXT_MULTI(step, enm);
1609: }
1610:
1611: sc->sc_ReceiveMode |= RM_ReceiveMulticastHash;
1612:
1613: ifp->if_flags &= ~IFF_ALLMULTI;
1614: goto done;
1615:
1616: allmulti:
1617: ifp->if_flags |= IFF_ALLMULTI;
1618: sc->sc_ReceiveMode |= RM_ReceiveMulticast;
1619:
1620: done:
1621: if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
1622: /*
1623: * Program the multicast hash table.
1624: */
1625: CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]);
1626: CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]);
1627: }
1628:
1629: CSR_WRITE_2(sc, STGE_ReceiveMode, sc->sc_ReceiveMode);
1630: }
1631:
1632: /*
1633: * stge_mii_readreg: [mii interface function]
1634: *
1635: * Read a PHY register on the MII of the TC9021.
1636: */
1637: int
1638: stge_mii_readreg(struct device *self, int phy, int reg)
1639: {
1640:
1641: return (mii_bitbang_readreg(self, &stge_mii_bitbang_ops, phy, reg));
1642: }
1643:
1644: /*
1645: * stge_mii_writereg: [mii interface function]
1646: *
1647: * Write a PHY register on the MII of the TC9021.
1648: */
1649: void
1650: stge_mii_writereg(struct device *self, int phy, int reg, int val)
1651: {
1652:
1653: mii_bitbang_writereg(self, &stge_mii_bitbang_ops, phy, reg, val);
1654: }
1655:
1656: /*
1657: * stge_mii_statchg: [mii interface function]
1658: *
1659: * Callback from MII layer when media changes.
1660: */
1661: void
1662: stge_mii_statchg(struct device *self)
1663: {
1664: struct stge_softc *sc = (struct stge_softc *) self;
1665:
1666: if (sc->sc_mii.mii_media_active & IFM_FDX)
1667: sc->sc_MACCtrl |= MC_DuplexSelect;
1668: else
1669: sc->sc_MACCtrl &= ~MC_DuplexSelect;
1670:
1671: /* XXX 802.1x flow-control? */
1672:
1673: CSR_WRITE_4(sc, STGE_MACCtrl, sc->sc_MACCtrl);
1674: }
1675:
1676: /*
1677: * sste_mii_bitbang_read: [mii bit-bang interface function]
1678: *
1679: * Read the MII serial port for the MII bit-bang module.
1680: */
1681: uint32_t
1682: stge_mii_bitbang_read(struct device *self)
1683: {
1684: struct stge_softc *sc = (void *) self;
1685:
1686: return (CSR_READ_1(sc, STGE_PhyCtrl));
1687: }
1688:
1689: /*
1690: * stge_mii_bitbang_write: [mii big-bang interface function]
1691: *
1692: * Write the MII serial port for the MII bit-bang module.
1693: */
1694: void
1695: stge_mii_bitbang_write(struct device *self, uint32_t val)
1696: {
1697: struct stge_softc *sc = (void *) self;
1698:
1699: CSR_WRITE_1(sc, STGE_PhyCtrl, val | sc->sc_PhyCtrl);
1700: }
1701:
1702: /*
1703: * stge_mediastatus: [ifmedia interface function]
1704: *
1705: * Get the current interface media status.
1706: */
1707: void
1708: stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1709: {
1710: struct stge_softc *sc = ifp->if_softc;
1711:
1712: mii_pollstat(&sc->sc_mii);
1713: ifmr->ifm_status = sc->sc_mii.mii_media_status;
1714: ifmr->ifm_active = sc->sc_mii.mii_media_active;
1715: }
1716:
1717: /*
1718: * stge_mediachange: [ifmedia interface function]
1719: *
1720: * Set hardware to newly-selected media.
1721: */
1722: int
1723: stge_mediachange(struct ifnet *ifp)
1724: {
1725: struct stge_softc *sc = ifp->if_softc;
1726:
1727: if (ifp->if_flags & IFF_UP)
1728: mii_mediachg(&sc->sc_mii);
1729: return (0);
1730: }
CVSweb