Annotation of sys/dev/pci/if_nfe.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: if_nfe.c,v 1.69 2007/03/02 00:16:59 jsg Exp $ */
2:
3: /*-
4: * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr>
5: * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
6: *
7: * Permission to use, copy, modify, and distribute this software for any
8: * purpose with or without fee is hereby granted, provided that the above
9: * copyright notice and this permission notice appear in all copies.
10: *
11: * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12: * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13: * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14: * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15: * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16: * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17: * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18: */
19:
20: /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
21:
22: #include "bpfilter.h"
23: #include "vlan.h"
24:
25: #include <sys/param.h>
26: #include <sys/endian.h>
27: #include <sys/systm.h>
28: #include <sys/types.h>
29: #include <sys/sockio.h>
30: #include <sys/mbuf.h>
31: #include <sys/queue.h>
32: #include <sys/malloc.h>
33: #include <sys/kernel.h>
34: #include <sys/device.h>
35: #include <sys/timeout.h>
36: #include <sys/socket.h>
37:
38: #include <machine/bus.h>
39:
40: #include <net/if.h>
41: #include <net/if_dl.h>
42: #include <net/if_media.h>
43:
44: #ifdef INET
45: #include <netinet/in.h>
46: #include <netinet/in_systm.h>
47: #include <netinet/in_var.h>
48: #include <netinet/ip.h>
49: #include <netinet/if_ether.h>
50: #endif
51:
52: #if NVLAN > 0
53: #include <net/if_types.h>
54: #include <net/if_vlan_var.h>
55: #endif
56:
57: #if NBPFILTER > 0
58: #include <net/bpf.h>
59: #endif
60:
61: #include <dev/mii/mii.h>
62: #include <dev/mii/miivar.h>
63:
64: #include <dev/pci/pcireg.h>
65: #include <dev/pci/pcivar.h>
66: #include <dev/pci/pcidevs.h>
67:
68: #include <dev/pci/if_nfereg.h>
69: #include <dev/pci/if_nfevar.h>
70:
71: int nfe_match(struct device *, void *, void *);
72: void nfe_attach(struct device *, struct device *, void *);
73: void nfe_power(int, void *);
74: void nfe_miibus_statchg(struct device *);
75: int nfe_miibus_readreg(struct device *, int, int);
76: void nfe_miibus_writereg(struct device *, int, int, int);
77: int nfe_intr(void *);
78: int nfe_ioctl(struct ifnet *, u_long, caddr_t);
79: void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
80: void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
81: void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
82: void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
83: void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
84: void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
85: void nfe_rxeof(struct nfe_softc *);
86: void nfe_txeof(struct nfe_softc *);
87: int nfe_encap(struct nfe_softc *, struct mbuf *);
88: void nfe_start(struct ifnet *);
89: void nfe_watchdog(struct ifnet *);
90: int nfe_init(struct ifnet *);
91: void nfe_stop(struct ifnet *, int);
92: struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
93: void nfe_jfree(caddr_t, u_int, void *);
94: int nfe_jpool_alloc(struct nfe_softc *);
95: void nfe_jpool_free(struct nfe_softc *);
96: int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
97: void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
98: void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
99: int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
100: void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
101: void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
102: int nfe_ifmedia_upd(struct ifnet *);
103: void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
104: void nfe_setmulti(struct nfe_softc *);
105: void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
106: void nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
107: void nfe_tick(void *);
108:
109: struct cfattach nfe_ca = {
110: sizeof (struct nfe_softc), nfe_match, nfe_attach
111: };
112:
113: struct cfdriver nfe_cd = {
114: NULL, "nfe", DV_IFNET
115: };
116:
117: #ifdef NFE_DEBUG
118: int nfedebug = 0;
119: #define DPRINTF(x) do { if (nfedebug) printf x; } while (0)
120: #define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0)
121: #else
122: #define DPRINTF(x)
123: #define DPRINTFN(n,x)
124: #endif
125:
126: const struct pci_matchid nfe_devices[] = {
127: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
128: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
129: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
130: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
131: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
132: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
133: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
134: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
135: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
136: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
137: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
138: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
139: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
140: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
141: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
142: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 },
143: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 },
144: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 },
145: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 },
146: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 },
147: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 },
148: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 },
149: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 },
150: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 },
151: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 },
152: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 },
153: { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }
154: };
155:
156: int
157: nfe_match(struct device *dev, void *match, void *aux)
158: {
159: return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
160: sizeof (nfe_devices) / sizeof (nfe_devices[0]));
161: }
162:
163: void
164: nfe_attach(struct device *parent, struct device *self, void *aux)
165: {
166: struct nfe_softc *sc = (struct nfe_softc *)self;
167: struct pci_attach_args *pa = aux;
168: pci_chipset_tag_t pc = pa->pa_pc;
169: pci_intr_handle_t ih;
170: const char *intrstr;
171: struct ifnet *ifp;
172: bus_size_t memsize;
173: pcireg_t memtype;
174:
175: memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA);
176: switch (memtype) {
177: case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
178: case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
179: if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt,
180: &sc->sc_memh, NULL, &memsize, 0) == 0)
181: break;
182: /* FALLTHROUGH */
183: default:
184: printf(": could not map mem space\n");
185: return;
186: }
187:
188: if (pci_intr_map(pa, &ih) != 0) {
189: printf(": could not map interrupt\n");
190: return;
191: }
192:
193: intrstr = pci_intr_string(pc, ih);
194: sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
195: sc->sc_dev.dv_xname);
196: if (sc->sc_ih == NULL) {
197: printf(": could not establish interrupt");
198: if (intrstr != NULL)
199: printf(" at %s", intrstr);
200: printf("\n");
201: return;
202: }
203: printf(": %s", intrstr);
204:
205: sc->sc_dmat = pa->pa_dmat;
206:
207: nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
208: printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
209:
210: sc->sc_flags = 0;
211:
212: switch (PCI_PRODUCT(pa->pa_id)) {
213: case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
214: case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
215: case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
216: case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
217: sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
218: break;
219: case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
220: case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
221: case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
222: case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
223: case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
224: case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
225: case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
226: case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
227: case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
228: case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
229: sc->sc_flags |= NFE_40BIT_ADDR;
230: break;
231: case PCI_PRODUCT_NVIDIA_CK804_LAN1:
232: case PCI_PRODUCT_NVIDIA_CK804_LAN2:
233: case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
234: case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
235: sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
236: break;
237: case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
238: case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
239: case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
240: case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
241: sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR;
242: break;
243: case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
244: case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
245: sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
246: NFE_HW_VLAN;
247: break;
248: }
249:
250: /* enable jumbo frames for adapters that support it */
251: if (sc->sc_flags & NFE_JUMBO_SUP)
252: sc->sc_flags |= NFE_USE_JUMBO;
253:
254: /*
255: * Allocate Tx and Rx rings.
256: */
257: if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
258: printf("%s: could not allocate Tx ring\n",
259: sc->sc_dev.dv_xname);
260: return;
261: }
262:
263: if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
264: printf("%s: could not allocate Rx ring\n",
265: sc->sc_dev.dv_xname);
266: nfe_free_tx_ring(sc, &sc->txq);
267: return;
268: }
269:
270: ifp = &sc->sc_arpcom.ac_if;
271: ifp->if_softc = sc;
272: ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
273: ifp->if_ioctl = nfe_ioctl;
274: ifp->if_start = nfe_start;
275: ifp->if_watchdog = nfe_watchdog;
276: ifp->if_init = nfe_init;
277: ifp->if_baudrate = IF_Gbps(1);
278: IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
279: IFQ_SET_READY(&ifp->if_snd);
280: strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
281:
282: ifp->if_capabilities = IFCAP_VLAN_MTU;
283:
284: if (sc->sc_flags & NFE_USE_JUMBO)
285: ifp->if_hardmtu = NFE_JUMBO_MTU;
286:
287: #if NVLAN > 0
288: if (sc->sc_flags & NFE_HW_VLAN)
289: ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
290: #endif
291: if (sc->sc_flags & NFE_HW_CSUM) {
292: ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
293: IFCAP_CSUM_UDPv4;
294: }
295:
296: sc->sc_mii.mii_ifp = ifp;
297: sc->sc_mii.mii_readreg = nfe_miibus_readreg;
298: sc->sc_mii.mii_writereg = nfe_miibus_writereg;
299: sc->sc_mii.mii_statchg = nfe_miibus_statchg;
300:
301: ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
302: nfe_ifmedia_sts);
303: mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
304: MII_OFFSET_ANY, 0);
305: if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
306: printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
307: ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
308: 0, NULL);
309: ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
310: } else
311: ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
312:
313: if_attach(ifp);
314: ether_ifattach(ifp);
315:
316: timeout_set(&sc->sc_tick_ch, nfe_tick, sc);
317:
318: sc->sc_powerhook = powerhook_establish(nfe_power, sc);
319: }
320:
321: void
322: nfe_power(int why, void *arg)
323: {
324: struct nfe_softc *sc = arg;
325: struct ifnet *ifp;
326:
327: if (why == PWR_RESUME) {
328: ifp = &sc->sc_arpcom.ac_if;
329: if (ifp->if_flags & IFF_UP) {
330: nfe_init(ifp);
331: if (ifp->if_flags & IFF_RUNNING)
332: nfe_start(ifp);
333: }
334: }
335: }
336:
337: void
338: nfe_miibus_statchg(struct device *dev)
339: {
340: struct nfe_softc *sc = (struct nfe_softc *)dev;
341: struct mii_data *mii = &sc->sc_mii;
342: uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
343:
344: phy = NFE_READ(sc, NFE_PHY_IFACE);
345: phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
346:
347: seed = NFE_READ(sc, NFE_RNDSEED);
348: seed &= ~NFE_SEED_MASK;
349:
350: if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
351: phy |= NFE_PHY_HDX; /* half-duplex */
352: misc |= NFE_MISC1_HDX;
353: }
354:
355: switch (IFM_SUBTYPE(mii->mii_media_active)) {
356: case IFM_1000_T: /* full-duplex only */
357: link |= NFE_MEDIA_1000T;
358: seed |= NFE_SEED_1000T;
359: phy |= NFE_PHY_1000T;
360: break;
361: case IFM_100_TX:
362: link |= NFE_MEDIA_100TX;
363: seed |= NFE_SEED_100TX;
364: phy |= NFE_PHY_100TX;
365: break;
366: case IFM_10_T:
367: link |= NFE_MEDIA_10T;
368: seed |= NFE_SEED_10T;
369: break;
370: }
371:
372: NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
373:
374: NFE_WRITE(sc, NFE_PHY_IFACE, phy);
375: NFE_WRITE(sc, NFE_MISC1, misc);
376: NFE_WRITE(sc, NFE_LINKSPEED, link);
377: }
378:
379: int
380: nfe_miibus_readreg(struct device *dev, int phy, int reg)
381: {
382: struct nfe_softc *sc = (struct nfe_softc *)dev;
383: uint32_t val;
384: int ntries;
385:
386: NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
387:
388: if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
389: NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
390: DELAY(100);
391: }
392:
393: NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
394:
395: for (ntries = 0; ntries < 1000; ntries++) {
396: DELAY(100);
397: if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
398: break;
399: }
400: if (ntries == 1000) {
401: DPRINTFN(2, ("%s: timeout waiting for PHY\n",
402: sc->sc_dev.dv_xname));
403: return 0;
404: }
405:
406: if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
407: DPRINTFN(2, ("%s: could not read PHY\n",
408: sc->sc_dev.dv_xname));
409: return 0;
410: }
411:
412: val = NFE_READ(sc, NFE_PHY_DATA);
413: if (val != 0xffffffff && val != 0)
414: sc->mii_phyaddr = phy;
415:
416: DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n",
417: sc->sc_dev.dv_xname, phy, reg, val));
418:
419: return val;
420: }
421:
422: void
423: nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
424: {
425: struct nfe_softc *sc = (struct nfe_softc *)dev;
426: uint32_t ctl;
427: int ntries;
428:
429: NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
430:
431: if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
432: NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
433: DELAY(100);
434: }
435:
436: NFE_WRITE(sc, NFE_PHY_DATA, val);
437: ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
438: NFE_WRITE(sc, NFE_PHY_CTL, ctl);
439:
440: for (ntries = 0; ntries < 1000; ntries++) {
441: DELAY(100);
442: if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
443: break;
444: }
445: #ifdef NFE_DEBUG
446: if (nfedebug >= 2 && ntries == 1000)
447: printf("could not write to PHY\n");
448: #endif
449: }
450:
451: int
452: nfe_intr(void *arg)
453: {
454: struct nfe_softc *sc = arg;
455: struct ifnet *ifp = &sc->sc_arpcom.ac_if;
456: uint32_t r;
457:
458: if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0)
459: return 0; /* not for us */
460: NFE_WRITE(sc, NFE_IRQ_STATUS, r);
461:
462: DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
463:
464: if (r & NFE_IRQ_LINK) {
465: NFE_READ(sc, NFE_PHY_STATUS);
466: NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
467: DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname));
468: }
469:
470: if (ifp->if_flags & IFF_RUNNING) {
471: /* check Rx ring */
472: nfe_rxeof(sc);
473:
474: /* check Tx ring */
475: nfe_txeof(sc);
476: }
477:
478: return 1;
479: }
480:
481: int
482: nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
483: {
484: struct nfe_softc *sc = ifp->if_softc;
485: struct ifreq *ifr = (struct ifreq *)data;
486: struct ifaddr *ifa = (struct ifaddr *)data;
487: int s, error = 0;
488:
489: s = splnet();
490:
491: if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) {
492: splx(s);
493: return error;
494: }
495:
496: switch (cmd) {
497: case SIOCSIFADDR:
498: ifp->if_flags |= IFF_UP;
499: if (!(ifp->if_flags & IFF_RUNNING))
500: nfe_init(ifp);
501: #ifdef INET
502: if (ifa->ifa_addr->sa_family == AF_INET)
503: arp_ifinit(&sc->sc_arpcom, ifa);
504: #endif
505: break;
506: case SIOCSIFMTU:
507: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
508: error = EINVAL;
509: else if (ifp->if_mtu != ifr->ifr_mtu)
510: ifp->if_mtu = ifr->ifr_mtu;
511: break;
512: case SIOCSIFFLAGS:
513: if (ifp->if_flags & IFF_UP) {
514: /*
515: * If only the PROMISC or ALLMULTI flag changes, then
516: * don't do a full re-init of the chip, just update
517: * the Rx filter.
518: */
519: if ((ifp->if_flags & IFF_RUNNING) &&
520: ((ifp->if_flags ^ sc->sc_if_flags) &
521: (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
522: nfe_setmulti(sc);
523: } else {
524: if (!(ifp->if_flags & IFF_RUNNING))
525: nfe_init(ifp);
526: }
527: } else {
528: if (ifp->if_flags & IFF_RUNNING)
529: nfe_stop(ifp, 1);
530: }
531: sc->sc_if_flags = ifp->if_flags;
532: break;
533: case SIOCADDMULTI:
534: case SIOCDELMULTI:
535: error = (cmd == SIOCADDMULTI) ?
536: ether_addmulti(ifr, &sc->sc_arpcom) :
537: ether_delmulti(ifr, &sc->sc_arpcom);
538:
539: if (error == ENETRESET) {
540: if (ifp->if_flags & IFF_RUNNING)
541: nfe_setmulti(sc);
542: error = 0;
543: }
544: break;
545: case SIOCSIFMEDIA:
546: case SIOCGIFMEDIA:
547: error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
548: break;
549: default:
550: error = ENOTTY;
551: }
552:
553: splx(s);
554:
555: return error;
556: }
557:
558: void
559: nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
560: {
561: bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
562: (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
563: sizeof (struct nfe_desc32), ops);
564: }
565:
566: void
567: nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
568: {
569: bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
570: (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
571: sizeof (struct nfe_desc64), ops);
572: }
573:
574: void
575: nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
576: {
577: if (end > start) {
578: bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
579: (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
580: (caddr_t)&sc->txq.desc32[end] -
581: (caddr_t)&sc->txq.desc32[start], ops);
582: return;
583: }
584: /* sync from 'start' to end of ring */
585: bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
586: (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
587: (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] -
588: (caddr_t)&sc->txq.desc32[start], ops);
589:
590: /* sync from start of ring to 'end' */
591: bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
592: (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops);
593: }
594:
595: void
596: nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
597: {
598: if (end > start) {
599: bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
600: (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
601: (caddr_t)&sc->txq.desc64[end] -
602: (caddr_t)&sc->txq.desc64[start], ops);
603: return;
604: }
605: /* sync from 'start' to end of ring */
606: bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
607: (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
608: (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] -
609: (caddr_t)&sc->txq.desc64[start], ops);
610:
611: /* sync from start of ring to 'end' */
612: bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
613: (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops);
614: }
615:
616: void
617: nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
618: {
619: bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
620: (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
621: sizeof (struct nfe_desc32), ops);
622: }
623:
624: void
625: nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
626: {
627: bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
628: (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
629: sizeof (struct nfe_desc64), ops);
630: }
631:
632: void
633: nfe_rxeof(struct nfe_softc *sc)
634: {
635: struct ifnet *ifp = &sc->sc_arpcom.ac_if;
636: struct nfe_desc32 *desc32;
637: struct nfe_desc64 *desc64;
638: struct nfe_rx_data *data;
639: struct nfe_jbuf *jbuf;
640: struct mbuf *m, *mnew;
641: bus_addr_t physaddr;
642: uint16_t flags;
643: int error, len;
644:
645: for (;;) {
646: data = &sc->rxq.data[sc->rxq.cur];
647:
648: if (sc->sc_flags & NFE_40BIT_ADDR) {
649: desc64 = &sc->rxq.desc64[sc->rxq.cur];
650: nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
651:
652: flags = letoh16(desc64->flags);
653: len = letoh16(desc64->length) & 0x3fff;
654: } else {
655: desc32 = &sc->rxq.desc32[sc->rxq.cur];
656: nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
657:
658: flags = letoh16(desc32->flags);
659: len = letoh16(desc32->length) & 0x3fff;
660: }
661:
662: if (flags & NFE_RX_READY)
663: break;
664:
665: if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
666: if (!(flags & NFE_RX_VALID_V1))
667: goto skip;
668:
669: if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
670: flags &= ~NFE_RX_ERROR;
671: len--; /* fix buffer length */
672: }
673: } else {
674: if (!(flags & NFE_RX_VALID_V2))
675: goto skip;
676:
677: if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
678: flags &= ~NFE_RX_ERROR;
679: len--; /* fix buffer length */
680: }
681: }
682:
683: if (flags & NFE_RX_ERROR) {
684: ifp->if_ierrors++;
685: goto skip;
686: }
687:
688: /*
689: * Try to allocate a new mbuf for this ring element and load
690: * it before processing the current mbuf. If the ring element
691: * cannot be loaded, drop the received packet and reuse the
692: * old mbuf. In the unlikely case that the old mbuf can't be
693: * reloaded either, explicitly panic.
694: */
695: MGETHDR(mnew, M_DONTWAIT, MT_DATA);
696: if (mnew == NULL) {
697: ifp->if_ierrors++;
698: goto skip;
699: }
700:
701: if (sc->sc_flags & NFE_USE_JUMBO) {
702: if ((jbuf = nfe_jalloc(sc)) == NULL) {
703: m_freem(mnew);
704: ifp->if_ierrors++;
705: goto skip;
706: }
707: MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc);
708:
709: bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap,
710: mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES,
711: BUS_DMASYNC_POSTREAD);
712:
713: physaddr = jbuf->physaddr;
714: } else {
715: MCLGET(mnew, M_DONTWAIT);
716: if (!(mnew->m_flags & M_EXT)) {
717: m_freem(mnew);
718: ifp->if_ierrors++;
719: goto skip;
720: }
721:
722: bus_dmamap_sync(sc->sc_dmat, data->map, 0,
723: data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
724: bus_dmamap_unload(sc->sc_dmat, data->map);
725:
726: error = bus_dmamap_load(sc->sc_dmat, data->map,
727: mtod(mnew, void *), MCLBYTES, NULL,
728: BUS_DMA_READ | BUS_DMA_NOWAIT);
729: if (error != 0) {
730: m_freem(mnew);
731:
732: /* try to reload the old mbuf */
733: error = bus_dmamap_load(sc->sc_dmat, data->map,
734: mtod(data->m, void *), MCLBYTES, NULL,
735: BUS_DMA_READ | BUS_DMA_NOWAIT);
736: if (error != 0) {
737: /* very unlikely that it will fail.. */
738: panic("%s: could not load old rx mbuf",
739: sc->sc_dev.dv_xname);
740: }
741: ifp->if_ierrors++;
742: goto skip;
743: }
744: physaddr = data->map->dm_segs[0].ds_addr;
745: }
746:
747: /*
748: * New mbuf successfully loaded, update Rx ring and continue
749: * processing.
750: */
751: m = data->m;
752: data->m = mnew;
753:
754: /* finalize mbuf */
755: m->m_pkthdr.len = m->m_len = len;
756: m->m_pkthdr.rcvif = ifp;
757:
758: if ((sc->sc_flags & NFE_HW_CSUM) &&
759: (flags & NFE_RX_IP_CSUMOK)) {
760: m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
761: if (flags & NFE_RX_UDP_CSUMOK)
762: m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
763: if (flags & NFE_RX_TCP_CSUMOK)
764: m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
765: }
766:
767: #if NBPFILTER > 0
768: if (ifp->if_bpf)
769: bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
770: #endif
771: ifp->if_ipackets++;
772: ether_input_mbuf(ifp, m);
773:
774: /* update mapping address in h/w descriptor */
775: if (sc->sc_flags & NFE_40BIT_ADDR) {
776: #if defined(__LP64__)
777: desc64->physaddr[0] = htole32(physaddr >> 32);
778: #endif
779: desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
780: } else {
781: desc32->physaddr = htole32(physaddr);
782: }
783:
784: skip: if (sc->sc_flags & NFE_40BIT_ADDR) {
785: desc64->length = htole16(sc->rxq.bufsz);
786: desc64->flags = htole16(NFE_RX_READY);
787:
788: nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
789: } else {
790: desc32->length = htole16(sc->rxq.bufsz);
791: desc32->flags = htole16(NFE_RX_READY);
792:
793: nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
794: }
795:
796: sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
797: }
798: }
799:
800: void
801: nfe_txeof(struct nfe_softc *sc)
802: {
803: struct ifnet *ifp = &sc->sc_arpcom.ac_if;
804: struct nfe_desc32 *desc32;
805: struct nfe_desc64 *desc64;
806: struct nfe_tx_data *data = NULL;
807: uint16_t flags;
808:
809: while (sc->txq.next != sc->txq.cur) {
810: if (sc->sc_flags & NFE_40BIT_ADDR) {
811: desc64 = &sc->txq.desc64[sc->txq.next];
812: nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
813:
814: flags = letoh16(desc64->flags);
815: } else {
816: desc32 = &sc->txq.desc32[sc->txq.next];
817: nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
818:
819: flags = letoh16(desc32->flags);
820: }
821:
822: if (flags & NFE_TX_VALID)
823: break;
824:
825: data = &sc->txq.data[sc->txq.next];
826:
827: if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
828: if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
829: goto skip;
830:
831: if ((flags & NFE_TX_ERROR_V1) != 0) {
832: printf("%s: tx v1 error 0x%04b\n",
833: sc->sc_dev.dv_xname, flags, NFE_V1_TXERR);
834: ifp->if_oerrors++;
835: } else
836: ifp->if_opackets++;
837: } else {
838: if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
839: goto skip;
840:
841: if ((flags & NFE_TX_ERROR_V2) != 0) {
842: printf("%s: tx v2 error 0x%04b\n",
843: sc->sc_dev.dv_xname, flags, NFE_V2_TXERR);
844: ifp->if_oerrors++;
845: } else
846: ifp->if_opackets++;
847: }
848:
849: if (data->m == NULL) { /* should not get there */
850: printf("%s: last fragment bit w/o associated mbuf!\n",
851: sc->sc_dev.dv_xname);
852: goto skip;
853: }
854:
855: /* last fragment of the mbuf chain transmitted */
856: bus_dmamap_sync(sc->sc_dmat, data->active, 0,
857: data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
858: bus_dmamap_unload(sc->sc_dmat, data->active);
859: m_freem(data->m);
860: data->m = NULL;
861:
862: ifp->if_timer = 0;
863:
864: skip: sc->txq.queued--;
865: sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
866: }
867:
868: if (data != NULL) { /* at least one slot freed */
869: ifp->if_flags &= ~IFF_OACTIVE;
870: nfe_start(ifp);
871: }
872: }
873:
874: int
875: nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
876: {
877: struct nfe_desc32 *desc32;
878: struct nfe_desc64 *desc64;
879: struct nfe_tx_data *data;
880: bus_dmamap_t map;
881: uint16_t flags = 0;
882: #if NVLAN > 0
883: uint32_t vtag = 0;
884: #endif
885: int error, i, first = sc->txq.cur;
886:
887: map = sc->txq.data[first].map;
888:
889: error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
890: if (error != 0) {
891: printf("%s: could not map mbuf (error %d)\n",
892: sc->sc_dev.dv_xname, error);
893: return error;
894: }
895:
896: if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
897: bus_dmamap_unload(sc->sc_dmat, map);
898: return ENOBUFS;
899: }
900:
901: #if NVLAN > 0
902: /* setup h/w VLAN tagging */
903: if ((m0->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
904: m0->m_pkthdr.rcvif != NULL) {
905: struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc;
906: vtag = NFE_TX_VTAG | htons(ifv->ifv_tag);
907: }
908: #endif
909: if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
910: flags |= NFE_TX_IP_CSUM;
911: if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
912: flags |= NFE_TX_TCP_UDP_CSUM;
913:
914: for (i = 0; i < map->dm_nsegs; i++) {
915: data = &sc->txq.data[sc->txq.cur];
916:
917: if (sc->sc_flags & NFE_40BIT_ADDR) {
918: desc64 = &sc->txq.desc64[sc->txq.cur];
919: #if defined(__LP64__)
920: desc64->physaddr[0] =
921: htole32(map->dm_segs[i].ds_addr >> 32);
922: #endif
923: desc64->physaddr[1] =
924: htole32(map->dm_segs[i].ds_addr & 0xffffffff);
925: desc64->length = htole16(map->dm_segs[i].ds_len - 1);
926: desc64->flags = htole16(flags);
927: #if NVLAN > 0
928: desc64->vtag = htole32(vtag);
929: #endif
930: } else {
931: desc32 = &sc->txq.desc32[sc->txq.cur];
932:
933: desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
934: desc32->length = htole16(map->dm_segs[i].ds_len - 1);
935: desc32->flags = htole16(flags);
936: }
937:
938: if (map->dm_nsegs > 1) {
939: /*
940: * Checksum flags and vtag belong to the first fragment
941: * only.
942: */
943: flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
944: #if NVLAN > 0
945: vtag = 0;
946: #endif
947: /*
948: * Setting of the valid bit in the first descriptor is
949: * deferred until the whole chain is fully setup.
950: */
951: flags |= NFE_TX_VALID;
952: }
953:
954: sc->txq.queued++;
955: sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
956: }
957:
958: /* the whole mbuf chain has been setup */
959: if (sc->sc_flags & NFE_40BIT_ADDR) {
960: /* fix last descriptor */
961: flags |= NFE_TX_LASTFRAG_V2;
962: desc64->flags = htole16(flags);
963:
964: /* finally, set the valid bit in the first descriptor */
965: sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID);
966: } else {
967: /* fix last descriptor */
968: if (sc->sc_flags & NFE_JUMBO_SUP)
969: flags |= NFE_TX_LASTFRAG_V2;
970: else
971: flags |= NFE_TX_LASTFRAG_V1;
972: desc32->flags = htole16(flags);
973:
974: /* finally, set the valid bit in the first descriptor */
975: sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID);
976: }
977:
978: data->m = m0;
979: data->active = map;
980:
981: bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
982: BUS_DMASYNC_PREWRITE);
983:
984: return 0;
985: }
986:
987: void
988: nfe_start(struct ifnet *ifp)
989: {
990: struct nfe_softc *sc = ifp->if_softc;
991: int old = sc->txq.cur;
992: struct mbuf *m0;
993:
994: for (;;) {
995: IFQ_POLL(&ifp->if_snd, m0);
996: if (m0 == NULL)
997: break;
998:
999: if (nfe_encap(sc, m0) != 0) {
1000: ifp->if_flags |= IFF_OACTIVE;
1001: break;
1002: }
1003:
1004: /* packet put in h/w queue, remove from s/w queue */
1005: IFQ_DEQUEUE(&ifp->if_snd, m0);
1006:
1007: #if NBPFILTER > 0
1008: if (ifp->if_bpf != NULL)
1009: bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
1010: #endif
1011: }
1012: if (sc->txq.cur == old) /* nothing sent */
1013: return;
1014:
1015: if (sc->sc_flags & NFE_40BIT_ADDR)
1016: nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1017: else
1018: nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1019:
1020: /* kick Tx */
1021: NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1022:
1023: /*
1024: * Set a timeout in case the chip goes out to lunch.
1025: */
1026: ifp->if_timer = 5;
1027: }
1028:
1029: void
1030: nfe_watchdog(struct ifnet *ifp)
1031: {
1032: struct nfe_softc *sc = ifp->if_softc;
1033:
1034: printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1035:
1036: nfe_init(ifp);
1037:
1038: ifp->if_oerrors++;
1039: }
1040:
1041: int
1042: nfe_init(struct ifnet *ifp)
1043: {
1044: struct nfe_softc *sc = ifp->if_softc;
1045: uint32_t tmp;
1046:
1047: nfe_stop(ifp, 0);
1048:
1049: NFE_WRITE(sc, NFE_TX_UNK, 0);
1050: NFE_WRITE(sc, NFE_STATUS, 0);
1051:
1052: sc->rxtxctl = NFE_RXTX_BIT2;
1053: if (sc->sc_flags & NFE_40BIT_ADDR)
1054: sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1055: else if (sc->sc_flags & NFE_JUMBO_SUP)
1056: sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1057: if (sc->sc_flags & NFE_HW_CSUM)
1058: sc->rxtxctl |= NFE_RXTX_RXCSUM;
1059: #if NVLAN > 0
1060: /*
1061: * Although the adapter is capable of stripping VLAN tags from received
1062: * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1063: * purpose. This will be done in software by our network stack.
1064: */
1065: if (sc->sc_flags & NFE_HW_VLAN)
1066: sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1067: #endif
1068: NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1069: DELAY(10);
1070: NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1071:
1072: #if NVLAN
1073: if (sc->sc_flags & NFE_HW_VLAN)
1074: NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1075: #endif
1076:
1077: NFE_WRITE(sc, NFE_SETUP_R6, 0);
1078:
1079: /* set MAC address */
1080: nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
1081:
1082: /* tell MAC where rings are in memory */
1083: #ifdef __LP64__
1084: NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1085: #endif
1086: NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1087: #ifdef __LP64__
1088: NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1089: #endif
1090: NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1091:
1092: NFE_WRITE(sc, NFE_RING_SIZE,
1093: (NFE_RX_RING_COUNT - 1) << 16 |
1094: (NFE_TX_RING_COUNT - 1));
1095:
1096: NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1097:
1098: /* force MAC to wakeup */
1099: tmp = NFE_READ(sc, NFE_PWR_STATE);
1100: NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1101: DELAY(10);
1102: tmp = NFE_READ(sc, NFE_PWR_STATE);
1103: NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1104:
1105: #if 1
1106: /* configure interrupts coalescing/mitigation */
1107: NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1108: #else
1109: /* no interrupt mitigation: one interrupt per packet */
1110: NFE_WRITE(sc, NFE_IMTIMER, 970);
1111: #endif
1112:
1113: NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1114: NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1115: NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1116:
1117: /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1118: NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1119:
1120: NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1121: NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE);
1122:
1123: sc->rxtxctl &= ~NFE_RXTX_BIT2;
1124: NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1125: DELAY(10);
1126: NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1127:
1128: /* set Rx filter */
1129: nfe_setmulti(sc);
1130:
1131: nfe_ifmedia_upd(ifp);
1132:
1133: /* enable Rx */
1134: NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1135:
1136: /* enable Tx */
1137: NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1138:
1139: NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1140:
1141: /* enable interrupts */
1142: NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1143:
1144: timeout_add(&sc->sc_tick_ch, hz);
1145:
1146: ifp->if_flags |= IFF_RUNNING;
1147: ifp->if_flags &= ~IFF_OACTIVE;
1148:
1149: return 0;
1150: }
1151:
1152: void
1153: nfe_stop(struct ifnet *ifp, int disable)
1154: {
1155: struct nfe_softc *sc = ifp->if_softc;
1156:
1157: timeout_del(&sc->sc_tick_ch);
1158:
1159: ifp->if_timer = 0;
1160: ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1161:
1162: mii_down(&sc->sc_mii);
1163:
1164: /* abort Tx */
1165: NFE_WRITE(sc, NFE_TX_CTL, 0);
1166:
1167: /* disable Rx */
1168: NFE_WRITE(sc, NFE_RX_CTL, 0);
1169:
1170: /* disable interrupts */
1171: NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1172:
1173: /* reset Tx and Rx rings */
1174: nfe_reset_tx_ring(sc, &sc->txq);
1175: nfe_reset_rx_ring(sc, &sc->rxq);
1176: }
1177:
1178: int
1179: nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1180: {
1181: struct nfe_desc32 *desc32;
1182: struct nfe_desc64 *desc64;
1183: struct nfe_rx_data *data;
1184: struct nfe_jbuf *jbuf;
1185: void **desc;
1186: bus_addr_t physaddr;
1187: int i, nsegs, error, descsize;
1188:
1189: if (sc->sc_flags & NFE_40BIT_ADDR) {
1190: desc = (void **)&ring->desc64;
1191: descsize = sizeof (struct nfe_desc64);
1192: } else {
1193: desc = (void **)&ring->desc32;
1194: descsize = sizeof (struct nfe_desc32);
1195: }
1196:
1197: ring->cur = ring->next = 0;
1198: ring->bufsz = MCLBYTES;
1199:
1200: error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1201: NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1202: if (error != 0) {
1203: printf("%s: could not create desc DMA map\n",
1204: sc->sc_dev.dv_xname);
1205: goto fail;
1206: }
1207:
1208: error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1209: PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1210: if (error != 0) {
1211: printf("%s: could not allocate DMA memory\n",
1212: sc->sc_dev.dv_xname);
1213: goto fail;
1214: }
1215:
1216: error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1217: NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1218: if (error != 0) {
1219: printf("%s: could not map desc DMA memory\n",
1220: sc->sc_dev.dv_xname);
1221: goto fail;
1222: }
1223:
1224: error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1225: NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1226: if (error != 0) {
1227: printf("%s: could not load desc DMA map\n",
1228: sc->sc_dev.dv_xname);
1229: goto fail;
1230: }
1231:
1232: bzero(*desc, NFE_RX_RING_COUNT * descsize);
1233: ring->physaddr = ring->map->dm_segs[0].ds_addr;
1234:
1235: if (sc->sc_flags & NFE_USE_JUMBO) {
1236: ring->bufsz = NFE_JBYTES;
1237: if ((error = nfe_jpool_alloc(sc)) != 0) {
1238: printf("%s: could not allocate jumbo frames\n",
1239: sc->sc_dev.dv_xname);
1240: goto fail;
1241: }
1242: }
1243:
1244: /*
1245: * Pre-allocate Rx buffers and populate Rx ring.
1246: */
1247: for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1248: data = &sc->rxq.data[i];
1249:
1250: MGETHDR(data->m, M_DONTWAIT, MT_DATA);
1251: if (data->m == NULL) {
1252: printf("%s: could not allocate rx mbuf\n",
1253: sc->sc_dev.dv_xname);
1254: error = ENOMEM;
1255: goto fail;
1256: }
1257:
1258: if (sc->sc_flags & NFE_USE_JUMBO) {
1259: if ((jbuf = nfe_jalloc(sc)) == NULL) {
1260: printf("%s: could not allocate jumbo buffer\n",
1261: sc->sc_dev.dv_xname);
1262: goto fail;
1263: }
1264: MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree,
1265: sc);
1266:
1267: physaddr = jbuf->physaddr;
1268: } else {
1269: error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1270: MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
1271: if (error != 0) {
1272: printf("%s: could not create DMA map\n",
1273: sc->sc_dev.dv_xname);
1274: goto fail;
1275: }
1276: MCLGET(data->m, M_DONTWAIT);
1277: if (!(data->m->m_flags & M_EXT)) {
1278: printf("%s: could not allocate mbuf cluster\n",
1279: sc->sc_dev.dv_xname);
1280: error = ENOMEM;
1281: goto fail;
1282: }
1283:
1284: error = bus_dmamap_load(sc->sc_dmat, data->map,
1285: mtod(data->m, void *), MCLBYTES, NULL,
1286: BUS_DMA_READ | BUS_DMA_NOWAIT);
1287: if (error != 0) {
1288: printf("%s: could not load rx buf DMA map",
1289: sc->sc_dev.dv_xname);
1290: goto fail;
1291: }
1292: physaddr = data->map->dm_segs[0].ds_addr;
1293: }
1294:
1295: if (sc->sc_flags & NFE_40BIT_ADDR) {
1296: desc64 = &sc->rxq.desc64[i];
1297: #if defined(__LP64__)
1298: desc64->physaddr[0] = htole32(physaddr >> 32);
1299: #endif
1300: desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1301: desc64->length = htole16(sc->rxq.bufsz);
1302: desc64->flags = htole16(NFE_RX_READY);
1303: } else {
1304: desc32 = &sc->rxq.desc32[i];
1305: desc32->physaddr = htole32(physaddr);
1306: desc32->length = htole16(sc->rxq.bufsz);
1307: desc32->flags = htole16(NFE_RX_READY);
1308: }
1309: }
1310:
1311: bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1312: BUS_DMASYNC_PREWRITE);
1313:
1314: return 0;
1315:
1316: fail: nfe_free_rx_ring(sc, ring);
1317: return error;
1318: }
1319:
1320: void
1321: nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1322: {
1323: int i;
1324:
1325: for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1326: if (sc->sc_flags & NFE_40BIT_ADDR) {
1327: ring->desc64[i].length = htole16(ring->bufsz);
1328: ring->desc64[i].flags = htole16(NFE_RX_READY);
1329: } else {
1330: ring->desc32[i].length = htole16(ring->bufsz);
1331: ring->desc32[i].flags = htole16(NFE_RX_READY);
1332: }
1333: }
1334:
1335: bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1336: BUS_DMASYNC_PREWRITE);
1337:
1338: ring->cur = ring->next = 0;
1339: }
1340:
1341: void
1342: nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1343: {
1344: struct nfe_rx_data *data;
1345: void *desc;
1346: int i, descsize;
1347:
1348: if (sc->sc_flags & NFE_40BIT_ADDR) {
1349: desc = ring->desc64;
1350: descsize = sizeof (struct nfe_desc64);
1351: } else {
1352: desc = ring->desc32;
1353: descsize = sizeof (struct nfe_desc32);
1354: }
1355:
1356: if (desc != NULL) {
1357: bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1358: ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1359: bus_dmamap_unload(sc->sc_dmat, ring->map);
1360: bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1361: NFE_RX_RING_COUNT * descsize);
1362: bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1363: }
1364:
1365: for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1366: data = &ring->data[i];
1367:
1368: if (data->map != NULL) {
1369: bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1370: data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1371: bus_dmamap_unload(sc->sc_dmat, data->map);
1372: bus_dmamap_destroy(sc->sc_dmat, data->map);
1373: }
1374: if (data->m != NULL)
1375: m_freem(data->m);
1376: }
1377: }
1378:
1379: struct nfe_jbuf *
1380: nfe_jalloc(struct nfe_softc *sc)
1381: {
1382: struct nfe_jbuf *jbuf;
1383:
1384: jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1385: if (jbuf == NULL)
1386: return NULL;
1387: SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1388: return jbuf;
1389: }
1390:
1391: /*
1392: * This is called automatically by the network stack when the mbuf is freed.
1393: * Caution must be taken that the NIC might be reset by the time the mbuf is
1394: * freed.
1395: */
1396: void
1397: nfe_jfree(caddr_t buf, u_int size, void *arg)
1398: {
1399: struct nfe_softc *sc = arg;
1400: struct nfe_jbuf *jbuf;
1401: int i;
1402:
1403: /* find the jbuf from the base pointer */
1404: i = (buf - sc->rxq.jpool) / NFE_JBYTES;
1405: if (i < 0 || i >= NFE_JPOOL_COUNT) {
1406: printf("%s: request to free a buffer (%p) not managed by us\n",
1407: sc->sc_dev.dv_xname, buf);
1408: return;
1409: }
1410: jbuf = &sc->rxq.jbuf[i];
1411:
1412: /* ..and put it back in the free list */
1413: SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext);
1414: }
1415:
1416: int
1417: nfe_jpool_alloc(struct nfe_softc *sc)
1418: {
1419: struct nfe_rx_ring *ring = &sc->rxq;
1420: struct nfe_jbuf *jbuf;
1421: bus_addr_t physaddr;
1422: caddr_t buf;
1423: int i, nsegs, error;
1424:
1425: /*
1426: * Allocate a big chunk of DMA'able memory.
1427: */
1428: error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1,
1429: NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap);
1430: if (error != 0) {
1431: printf("%s: could not create jumbo DMA map\n",
1432: sc->sc_dev.dv_xname);
1433: goto fail;
1434: }
1435:
1436: error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0,
1437: &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT);
1438: if (error != 0) {
1439: printf("%s could not allocate jumbo DMA memory\n",
1440: sc->sc_dev.dv_xname);
1441: goto fail;
1442: }
1443:
1444: error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE,
1445: &ring->jpool, BUS_DMA_NOWAIT);
1446: if (error != 0) {
1447: printf("%s: could not map jumbo DMA memory\n",
1448: sc->sc_dev.dv_xname);
1449: goto fail;
1450: }
1451:
1452: error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool,
1453: NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
1454: if (error != 0) {
1455: printf("%s: could not load jumbo DMA map\n",
1456: sc->sc_dev.dv_xname);
1457: goto fail;
1458: }
1459:
1460: /* ..and split it into 9KB chunks */
1461: SLIST_INIT(&ring->jfreelist);
1462:
1463: buf = ring->jpool;
1464: physaddr = ring->jmap->dm_segs[0].ds_addr;
1465: for (i = 0; i < NFE_JPOOL_COUNT; i++) {
1466: jbuf = &ring->jbuf[i];
1467:
1468: jbuf->buf = buf;
1469: jbuf->physaddr = physaddr;
1470:
1471: SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1472:
1473: buf += NFE_JBYTES;
1474: physaddr += NFE_JBYTES;
1475: }
1476:
1477: return 0;
1478:
1479: fail: nfe_jpool_free(sc);
1480: return error;
1481: }
1482:
1483: void
1484: nfe_jpool_free(struct nfe_softc *sc)
1485: {
1486: struct nfe_rx_ring *ring = &sc->rxq;
1487:
1488: if (ring->jmap != NULL) {
1489: bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0,
1490: ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1491: bus_dmamap_unload(sc->sc_dmat, ring->jmap);
1492: bus_dmamap_destroy(sc->sc_dmat, ring->jmap);
1493: }
1494: if (ring->jpool != NULL) {
1495: bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE);
1496: bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1);
1497: }
1498: }
1499:
1500: int
1501: nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1502: {
1503: int i, nsegs, error;
1504: void **desc;
1505: int descsize;
1506:
1507: if (sc->sc_flags & NFE_40BIT_ADDR) {
1508: desc = (void **)&ring->desc64;
1509: descsize = sizeof (struct nfe_desc64);
1510: } else {
1511: desc = (void **)&ring->desc32;
1512: descsize = sizeof (struct nfe_desc32);
1513: }
1514:
1515: ring->queued = 0;
1516: ring->cur = ring->next = 0;
1517:
1518: error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1519: NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1520:
1521: if (error != 0) {
1522: printf("%s: could not create desc DMA map\n",
1523: sc->sc_dev.dv_xname);
1524: goto fail;
1525: }
1526:
1527: error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1528: PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1529: if (error != 0) {
1530: printf("%s: could not allocate DMA memory\n",
1531: sc->sc_dev.dv_xname);
1532: goto fail;
1533: }
1534:
1535: error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1536: NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1537: if (error != 0) {
1538: printf("%s: could not map desc DMA memory\n",
1539: sc->sc_dev.dv_xname);
1540: goto fail;
1541: }
1542:
1543: error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1544: NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1545: if (error != 0) {
1546: printf("%s: could not load desc DMA map\n",
1547: sc->sc_dev.dv_xname);
1548: goto fail;
1549: }
1550:
1551: bzero(*desc, NFE_TX_RING_COUNT * descsize);
1552: ring->physaddr = ring->map->dm_segs[0].ds_addr;
1553:
1554: for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1555: error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES,
1556: NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT,
1557: &ring->data[i].map);
1558: if (error != 0) {
1559: printf("%s: could not create DMA map\n",
1560: sc->sc_dev.dv_xname);
1561: goto fail;
1562: }
1563: }
1564:
1565: return 0;
1566:
1567: fail: nfe_free_tx_ring(sc, ring);
1568: return error;
1569: }
1570:
1571: void
1572: nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1573: {
1574: struct nfe_tx_data *data;
1575: int i;
1576:
1577: for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1578: if (sc->sc_flags & NFE_40BIT_ADDR)
1579: ring->desc64[i].flags = 0;
1580: else
1581: ring->desc32[i].flags = 0;
1582:
1583: data = &ring->data[i];
1584:
1585: if (data->m != NULL) {
1586: bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1587: data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1588: bus_dmamap_unload(sc->sc_dmat, data->active);
1589: m_freem(data->m);
1590: data->m = NULL;
1591: }
1592: }
1593:
1594: bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1595: BUS_DMASYNC_PREWRITE);
1596:
1597: ring->queued = 0;
1598: ring->cur = ring->next = 0;
1599: }
1600:
1601: void
1602: nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1603: {
1604: struct nfe_tx_data *data;
1605: void *desc;
1606: int i, descsize;
1607:
1608: if (sc->sc_flags & NFE_40BIT_ADDR) {
1609: desc = ring->desc64;
1610: descsize = sizeof (struct nfe_desc64);
1611: } else {
1612: desc = ring->desc32;
1613: descsize = sizeof (struct nfe_desc32);
1614: }
1615:
1616: if (desc != NULL) {
1617: bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1618: ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1619: bus_dmamap_unload(sc->sc_dmat, ring->map);
1620: bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1621: NFE_TX_RING_COUNT * descsize);
1622: bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1623: }
1624:
1625: for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1626: data = &ring->data[i];
1627:
1628: if (data->m != NULL) {
1629: bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1630: data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1631: bus_dmamap_unload(sc->sc_dmat, data->active);
1632: m_freem(data->m);
1633: }
1634: }
1635:
1636: /* ..and now actually destroy the DMA mappings */
1637: for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1638: data = &ring->data[i];
1639: if (data->map == NULL)
1640: continue;
1641: bus_dmamap_destroy(sc->sc_dmat, data->map);
1642: }
1643: }
1644:
1645: int
1646: nfe_ifmedia_upd(struct ifnet *ifp)
1647: {
1648: struct nfe_softc *sc = ifp->if_softc;
1649: struct mii_data *mii = &sc->sc_mii;
1650: struct mii_softc *miisc;
1651:
1652: if (mii->mii_instance != 0) {
1653: LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1654: mii_phy_reset(miisc);
1655: }
1656: return mii_mediachg(mii);
1657: }
1658:
1659: void
1660: nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1661: {
1662: struct nfe_softc *sc = ifp->if_softc;
1663: struct mii_data *mii = &sc->sc_mii;
1664:
1665: mii_pollstat(mii);
1666: ifmr->ifm_status = mii->mii_media_status;
1667: ifmr->ifm_active = mii->mii_media_active;
1668: }
1669:
1670: void
1671: nfe_setmulti(struct nfe_softc *sc)
1672: {
1673: struct arpcom *ac = &sc->sc_arpcom;
1674: struct ifnet *ifp = &ac->ac_if;
1675: struct ether_multi *enm;
1676: struct ether_multistep step;
1677: uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1678: uint32_t filter = NFE_RXFILTER_MAGIC;
1679: int i;
1680:
1681: if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1682: bzero(addr, ETHER_ADDR_LEN);
1683: bzero(mask, ETHER_ADDR_LEN);
1684: goto done;
1685: }
1686:
1687: bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1688: bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1689:
1690: ETHER_FIRST_MULTI(step, ac, enm);
1691: while (enm != NULL) {
1692: if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1693: ifp->if_flags |= IFF_ALLMULTI;
1694: bzero(addr, ETHER_ADDR_LEN);
1695: bzero(mask, ETHER_ADDR_LEN);
1696: goto done;
1697: }
1698: for (i = 0; i < ETHER_ADDR_LEN; i++) {
1699: addr[i] &= enm->enm_addrlo[i];
1700: mask[i] &= ~enm->enm_addrlo[i];
1701: }
1702: ETHER_NEXT_MULTI(step, enm);
1703: }
1704: for (i = 0; i < ETHER_ADDR_LEN; i++)
1705: mask[i] |= addr[i];
1706:
1707: done:
1708: addr[0] |= 0x01; /* make sure multicast bit is set */
1709:
1710: NFE_WRITE(sc, NFE_MULTIADDR_HI,
1711: addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1712: NFE_WRITE(sc, NFE_MULTIADDR_LO,
1713: addr[5] << 8 | addr[4]);
1714: NFE_WRITE(sc, NFE_MULTIMASK_HI,
1715: mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1716: NFE_WRITE(sc, NFE_MULTIMASK_LO,
1717: mask[5] << 8 | mask[4]);
1718:
1719: filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1720: NFE_WRITE(sc, NFE_RXFILTER, filter);
1721: }
1722:
1723: void
1724: nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1725: {
1726: uint32_t tmp;
1727:
1728: tmp = NFE_READ(sc, NFE_MACADDR_LO);
1729: addr[0] = (tmp >> 8) & 0xff;
1730: addr[1] = (tmp & 0xff);
1731:
1732: tmp = NFE_READ(sc, NFE_MACADDR_HI);
1733: addr[2] = (tmp >> 24) & 0xff;
1734: addr[3] = (tmp >> 16) & 0xff;
1735: addr[4] = (tmp >> 8) & 0xff;
1736: addr[5] = (tmp & 0xff);
1737: }
1738:
1739: void
1740: nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1741: {
1742: NFE_WRITE(sc, NFE_MACADDR_LO,
1743: addr[5] << 8 | addr[4]);
1744: NFE_WRITE(sc, NFE_MACADDR_HI,
1745: addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1746: }
1747:
1748: void
1749: nfe_tick(void *arg)
1750: {
1751: struct nfe_softc *sc = arg;
1752: int s;
1753:
1754: s = splnet();
1755: mii_tick(&sc->sc_mii);
1756: splx(s);
1757:
1758: timeout_add(&sc->sc_tick_ch, hz);
1759: }
CVSweb