Annotation of sys/dev/ic/xl.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: xl.c,v 1.78 2007/05/19 16:51:57 kettenis Exp $ */
2:
3: /*
4: * Copyright (c) 1997, 1998, 1999
5: * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: * 3. All advertising materials mentioning features or use of this software
16: * must display the following acknowledgement:
17: * This product includes software developed by Bill Paul.
18: * 4. Neither the name of the author nor the names of any co-contributors
19: * may be used to endorse or promote products derived from this software
20: * without specific prior written permission.
21: *
22: * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25: * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32: * THE POSSIBILITY OF SUCH DAMAGE.
33: *
34: * $FreeBSD: if_xl.c,v 1.77 2000/08/28 20:40:03 wpaul Exp $
35: */
36:
37: /*
38: * 3Com 3c90x Etherlink XL PCI NIC driver
39: *
40: * Supports the 3Com "boomerang", "cyclone", and "hurricane" PCI
41: * bus-master chips (3c90x cards and embedded controllers) including
42: * the following:
43: *
44: * 3Com 3c900-TPO 10Mbps/RJ-45
45: * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC
46: * 3Com 3c905-TX 10/100Mbps/RJ-45
47: * 3Com 3c905-T4 10/100Mbps/RJ-45
48: * 3Com 3c900B-TPO 10Mbps/RJ-45
49: * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC
50: * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC
51: * 3Com 3c900B-FL 10Mbps/Fiber-optic
52: * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC
53: * 3Com 3c905B-TX 10/100Mbps/RJ-45
54: * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic
55: * 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC)
56: * 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC)
57: * 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC)
58: * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC)
59: * 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC)
60: * 3Com 3c555 10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
61: * 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
62: * 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
63: * 3Com 3c575TX 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
64: * 3Com 3c575B 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
65: * 3Com 3c575C 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
66: * 3Com 3cxfem656 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
67: * 3Com 3cxfem656b 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
68: * 3Com 3cxfem656c 10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
69: * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
70: * Dell on-board 3c920 10/100Mbps/RJ-45
71: * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
72: * Dell Latitude laptop docking station embedded 3c905-TX
73: *
74: * Written by Bill Paul <wpaul@ctr.columbia.edu>
75: * Electrical Engineering Department
76: * Columbia University, New York City
77: */
78:
79: /*
80: * The 3c90x series chips use a bus-master DMA interface for transfering
81: * packets to and from the controller chip. Some of the "vortex" cards
82: * (3c59x) also supported a bus master mode, however for those chips
83: * you could only DMA packets to/from a contiguous memory buffer. For
84: * transmission this would mean copying the contents of the queued mbuf
85: * chain into an mbuf cluster and then DMAing the cluster. This extra
86: * copy would sort of defeat the purpose of the bus master support for
87: * any packet that doesn't fit into a single mbuf.
88: *
89: * By contrast, the 3c90x cards support a fragment-based bus master
90: * mode where mbuf chains can be encapsulated using TX descriptors.
91: * This is similar to other PCI chips such as the Texas Instruments
92: * ThunderLAN and the Intel 82557/82558.
93: *
94: * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
95: * bus master chips because they maintain the old PIO interface for
96: * backwards compatibility, but starting with the 3c905B and the
97: * "cyclone" chips, the compatibility interface has been dropped.
98: * Since using bus master DMA is a big win, we use this driver to
99: * support the PCI "boomerang" chips even though they work with the
100: * "vortex" driver in order to obtain better performance.
101: */
102:
103: #include "bpfilter.h"
104:
105: #include <sys/param.h>
106: #include <sys/systm.h>
107: #include <sys/mbuf.h>
108: #include <sys/protosw.h>
109: #include <sys/socket.h>
110: #include <sys/ioctl.h>
111: #include <sys/errno.h>
112: #include <sys/malloc.h>
113: #include <sys/kernel.h>
114: #include <sys/proc.h> /* only for declaration of wakeup() used by vm.h */
115: #include <sys/device.h>
116:
117: #include <net/if.h>
118: #include <net/if_dl.h>
119: #include <net/if_types.h>
120: #include <net/if_media.h>
121:
122: #ifdef INET
123: #include <netinet/in.h>
124: #include <netinet/in_systm.h>
125: #include <netinet/in_var.h>
126: #include <netinet/ip.h>
127: #include <netinet/if_ether.h>
128: #endif
129:
130: #include <dev/mii/mii.h>
131: #include <dev/mii/miivar.h>
132:
133: #include <machine/bus.h>
134:
135: #if NBPFILTER > 0
136: #include <net/bpf.h>
137: #endif
138:
139: #include <dev/ic/xlreg.h>
140:
141: /*
142: * TX Checksumming is disabled by default for two reasons:
143: * - TX Checksumming will occasionally produce corrupt packets
144: * - TX Checksumming seems to reduce performance
145: *
146: * Only 905B/C cards were reported to have this problem, it is possible
147: * that later chips _may_ be immune.
148: */
149: #define XL905B_TXCSUM_BROKEN 1
150:
151: int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *);
152: void xl_stats_update(void *);
153: int xl_encap(struct xl_softc *, struct xl_chain *,
154: struct mbuf * );
155: void xl_rxeof(struct xl_softc *);
156: int xl_rx_resync(struct xl_softc *);
157: void xl_txeof(struct xl_softc *);
158: void xl_txeof_90xB(struct xl_softc *);
159: void xl_txeoc(struct xl_softc *);
160: int xl_intr(void *);
161: void xl_start(struct ifnet *);
162: void xl_start_90xB(struct ifnet *);
163: int xl_ioctl(struct ifnet *, u_long, caddr_t);
164: void xl_init(void *);
165: void xl_stop(struct xl_softc *);
166: void xl_freetxrx(struct xl_softc *);
167: void xl_watchdog(struct ifnet *);
168: void xl_shutdown(void *);
169: int xl_ifmedia_upd(struct ifnet *);
170: void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
171:
172: int xl_eeprom_wait(struct xl_softc *);
173: int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int);
174: void xl_mii_sync(struct xl_softc *);
175: void xl_mii_send(struct xl_softc *, u_int32_t, int);
176: int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *);
177: int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *);
178:
179: void xl_setcfg(struct xl_softc *);
180: void xl_setmode(struct xl_softc *, int);
181: void xl_setmulti(struct xl_softc *);
182: void xl_setmulti_hash(struct xl_softc *);
183: void xl_setpromisc(struct xl_softc *);
184: void xl_reset(struct xl_softc *);
185: int xl_list_rx_init(struct xl_softc *);
186: int xl_list_tx_init(struct xl_softc *);
187: int xl_list_tx_init_90xB(struct xl_softc *);
188: void xl_wait(struct xl_softc *);
189: void xl_mediacheck(struct xl_softc *);
190: void xl_choose_xcvr(struct xl_softc *, int);
191: #ifdef notdef
192: void xl_testpacket(struct xl_softc *);
193: #endif
194:
195: int xl_miibus_readreg(struct device *, int, int);
196: void xl_miibus_writereg(struct device *, int, int, int);
197: void xl_miibus_statchg(struct device *);
198:
199: void xl_power(int, void *);
200:
201: void
202: xl_power(int why, void *arg)
203: {
204: struct xl_softc *sc = arg;
205: struct ifnet *ifp;
206: int s;
207:
208: s = splnet();
209: if (why != PWR_RESUME)
210: xl_stop(sc);
211: else {
212: ifp = &sc->sc_arpcom.ac_if;
213: if (ifp->if_flags & IFF_UP) {
214: xl_reset(sc);
215: xl_init(sc);
216: }
217: }
218: splx(s);
219: }
220:
221: /*
222: * Murphy's law says that it's possible the chip can wedge and
223: * the 'command in progress' bit may never clear. Hence, we wait
224: * only a finite amount of time to avoid getting caught in an
225: * infinite loop. Normally this delay routine would be a macro,
226: * but it isn't called during normal operation so we can afford
227: * to make it a function.
228: */
229: void
230: xl_wait(struct xl_softc *sc)
231: {
232: int i;
233:
234: for (i = 0; i < XL_TIMEOUT; i++) {
235: if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
236: break;
237: }
238:
239: if (i == XL_TIMEOUT)
240: printf("%s: command never completed!\n", sc->sc_dev.dv_xname);
241: }
242:
243: /*
244: * MII access routines are provided for adapters with external
245: * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
246: * autoneg logic that's faked up to look like a PHY (3c905B-TX).
247: * Note: if you don't perform the MDIO operations just right,
248: * it's possible to end up with code that works correctly with
249: * some chips/CPUs/processor speeds/bus speeds/etc but not
250: * with others.
251: */
252: #define MII_SET(x) \
253: CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
254: CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
255:
256: #define MII_CLR(x) \
257: CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
258: CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
259:
260: /*
261: * Sync the PHYs by setting data bit and strobing the clock 32 times.
262: */
263: void
264: xl_mii_sync(struct xl_softc *sc)
265: {
266: int i;
267:
268: XL_SEL_WIN(4);
269: MII_SET(XL_MII_DIR|XL_MII_DATA);
270:
271: for (i = 0; i < 32; i++) {
272: MII_SET(XL_MII_CLK);
273: MII_SET(XL_MII_DATA);
274: MII_SET(XL_MII_DATA);
275: MII_CLR(XL_MII_CLK);
276: MII_SET(XL_MII_DATA);
277: MII_SET(XL_MII_DATA);
278: }
279: }
280:
281: /*
282: * Clock a series of bits through the MII.
283: */
284: void
285: xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt)
286: {
287: int i;
288:
289: XL_SEL_WIN(4);
290: MII_CLR(XL_MII_CLK);
291:
292: for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
293: if (bits & i) {
294: MII_SET(XL_MII_DATA);
295: } else {
296: MII_CLR(XL_MII_DATA);
297: }
298: MII_CLR(XL_MII_CLK);
299: MII_SET(XL_MII_CLK);
300: }
301: }
302:
303: /*
304: * Read an PHY register through the MII.
305: */
306: int
307: xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame)
308: {
309: int i, ack, s;
310:
311: s = splnet();
312:
313: /*
314: * Set up frame for RX.
315: */
316: frame->mii_stdelim = XL_MII_STARTDELIM;
317: frame->mii_opcode = XL_MII_READOP;
318: frame->mii_turnaround = 0;
319: frame->mii_data = 0;
320:
321: /*
322: * Select register window 4.
323: */
324:
325: XL_SEL_WIN(4);
326:
327: CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
328: /*
329: * Turn on data xmit.
330: */
331: MII_SET(XL_MII_DIR);
332:
333: xl_mii_sync(sc);
334:
335: /*
336: * Send command/address info.
337: */
338: xl_mii_send(sc, frame->mii_stdelim, 2);
339: xl_mii_send(sc, frame->mii_opcode, 2);
340: xl_mii_send(sc, frame->mii_phyaddr, 5);
341: xl_mii_send(sc, frame->mii_regaddr, 5);
342:
343: /* Idle bit */
344: MII_CLR((XL_MII_CLK|XL_MII_DATA));
345: MII_SET(XL_MII_CLK);
346:
347: /* Turn off xmit. */
348: MII_CLR(XL_MII_DIR);
349:
350: /* Check for ack */
351: MII_CLR(XL_MII_CLK);
352: ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
353: MII_SET(XL_MII_CLK);
354:
355: /*
356: * Now try reading data bits. If the ack failed, we still
357: * need to clock through 16 cycles to keep the PHY(s) in sync.
358: */
359: if (ack) {
360: for(i = 0; i < 16; i++) {
361: MII_CLR(XL_MII_CLK);
362: MII_SET(XL_MII_CLK);
363: }
364: goto fail;
365: }
366:
367: for (i = 0x8000; i; i >>= 1) {
368: MII_CLR(XL_MII_CLK);
369: if (!ack) {
370: if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
371: frame->mii_data |= i;
372: }
373: MII_SET(XL_MII_CLK);
374: }
375:
376: fail:
377:
378: MII_CLR(XL_MII_CLK);
379: MII_SET(XL_MII_CLK);
380:
381: splx(s);
382:
383: if (ack)
384: return (1);
385: return (0);
386: }
387:
388: /*
389: * Write to a PHY register through the MII.
390: */
391: int
392: xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame)
393: {
394: int s;
395:
396: s = splnet();
397:
398: /*
399: * Set up frame for TX.
400: */
401:
402: frame->mii_stdelim = XL_MII_STARTDELIM;
403: frame->mii_opcode = XL_MII_WRITEOP;
404: frame->mii_turnaround = XL_MII_TURNAROUND;
405:
406: /*
407: * Select the window 4.
408: */
409: XL_SEL_WIN(4);
410:
411: /*
412: * Turn on data output.
413: */
414: MII_SET(XL_MII_DIR);
415:
416: xl_mii_sync(sc);
417:
418: xl_mii_send(sc, frame->mii_stdelim, 2);
419: xl_mii_send(sc, frame->mii_opcode, 2);
420: xl_mii_send(sc, frame->mii_phyaddr, 5);
421: xl_mii_send(sc, frame->mii_regaddr, 5);
422: xl_mii_send(sc, frame->mii_turnaround, 2);
423: xl_mii_send(sc, frame->mii_data, 16);
424:
425: /* Idle bit. */
426: MII_SET(XL_MII_CLK);
427: MII_CLR(XL_MII_CLK);
428:
429: /*
430: * Turn off xmit.
431: */
432: MII_CLR(XL_MII_DIR);
433:
434: splx(s);
435:
436: return (0);
437: }
438:
439: int
440: xl_miibus_readreg(struct device *self, int phy, int reg)
441: {
442: struct xl_softc *sc = (struct xl_softc *)self;
443: struct xl_mii_frame frame;
444:
445: if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
446: return (0);
447:
448: bzero((char *)&frame, sizeof(frame));
449:
450: frame.mii_phyaddr = phy;
451: frame.mii_regaddr = reg;
452: xl_mii_readreg(sc, &frame);
453:
454: return (frame.mii_data);
455: }
456:
457: void
458: xl_miibus_writereg(struct device *self, int phy, int reg, int data)
459: {
460: struct xl_softc *sc = (struct xl_softc *)self;
461: struct xl_mii_frame frame;
462:
463: if (!(sc->xl_flags & XL_FLAG_PHYOK) && phy != 24)
464: return;
465:
466: bzero((char *)&frame, sizeof(frame));
467:
468: frame.mii_phyaddr = phy;
469: frame.mii_regaddr = reg;
470: frame.mii_data = data;
471:
472: xl_mii_writereg(sc, &frame);
473: }
474:
475: void
476: xl_miibus_statchg(struct device *self)
477: {
478: struct xl_softc *sc = (struct xl_softc *)self;
479:
480: xl_setcfg(sc);
481:
482: /* Set ASIC's duplex mode to match the PHY. */
483: XL_SEL_WIN(3);
484: if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
485: CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
486: else
487: CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
488: (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
489: }
490:
491: /*
492: * The EEPROM is slow: give it time to come ready after issuing
493: * it a command.
494: */
495: int
496: xl_eeprom_wait(struct xl_softc *sc)
497: {
498: int i;
499:
500: for (i = 0; i < 100; i++) {
501: if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
502: DELAY(162);
503: else
504: break;
505: }
506:
507: if (i == 100) {
508: printf("%s: eeprom failed to come ready\n", sc->sc_dev.dv_xname);
509: return (1);
510: }
511:
512: return (0);
513: }
514:
515: /*
516: * Read a sequence of words from the EEPROM. Note that ethernet address
517: * data is stored in the EEPROM in network byte order.
518: */
519: int
520: xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap)
521: {
522: int err = 0, i;
523: u_int16_t word = 0, *ptr;
524: #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
525: #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
526: /* WARNING! DANGER!
527: * It's easy to accidentally overwrite the rom content!
528: * Note: the 3c575 uses 8bit EEPROM offsets.
529: */
530: XL_SEL_WIN(0);
531:
532: if (xl_eeprom_wait(sc))
533: return (1);
534:
535: if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
536: off += 0x30;
537:
538: for (i = 0; i < cnt; i++) {
539: if (sc->xl_flags & XL_FLAG_8BITROM)
540: CSR_WRITE_2(sc, XL_W0_EE_CMD,
541: XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
542: else
543: CSR_WRITE_2(sc, XL_W0_EE_CMD,
544: XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
545: err = xl_eeprom_wait(sc);
546: if (err)
547: break;
548: word = CSR_READ_2(sc, XL_W0_EE_DATA);
549: ptr = (u_int16_t *)(dest + (i * 2));
550: if (swap)
551: *ptr = ntohs(word);
552: else
553: *ptr = word;
554: }
555:
556: return (err ? 1 : 0);
557: }
558:
559: /*
560: * NICs older than the 3c905B have only one multicast option, which
561: * is to enable reception of all multicast frames.
562: */
563: void
564: xl_setmulti(struct xl_softc *sc)
565: {
566: struct ifnet *ifp;
567: struct arpcom *ac = &sc->sc_arpcom;
568: u_int8_t rxfilt;
569:
570: ifp = &sc->sc_arpcom.ac_if;
571:
572: XL_SEL_WIN(5);
573: rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
574:
575: if (ifp->if_flags & IFF_ALLMULTI) {
576: rxfilt |= XL_RXFILTER_ALLMULTI;
577: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
578: return;
579: }
580:
581: if (ac->ac_multicnt > 0)
582: rxfilt |= XL_RXFILTER_ALLMULTI;
583: else
584: rxfilt &= ~XL_RXFILTER_ALLMULTI;
585:
586: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
587: }
588:
589: /*
590: * 3c905B adapters have a hash filter that we can program.
591: */
592: void
593: xl_setmulti_hash(struct xl_softc *sc)
594: {
595: struct ifnet *ifp;
596: int h = 0, i;
597: struct arpcom *ac = &sc->sc_arpcom;
598: struct ether_multi *enm;
599: struct ether_multistep step;
600: u_int8_t rxfilt;
601: int mcnt = 0;
602:
603: ifp = &sc->sc_arpcom.ac_if;
604:
605: XL_SEL_WIN(5);
606: rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
607:
608: if (ifp->if_flags & IFF_ALLMULTI) {
609: allmulti:
610: rxfilt |= XL_RXFILTER_ALLMULTI;
611: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
612: return;
613: } else
614: rxfilt &= ~XL_RXFILTER_ALLMULTI;
615:
616:
617: /* first, zot all the existing hash bits */
618: for (i = 0; i < XL_HASHFILT_SIZE; i++)
619: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
620:
621: /* now program new ones */
622: ETHER_FIRST_MULTI(step, ac, enm);
623: while (enm != NULL) {
624: if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
625: ifp->if_flags |= IFF_ALLMULTI;
626: goto allmulti;
627: }
628: h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) &
629: 0x000000FF;
630: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|XL_HASH_SET|h);
631: mcnt++;
632: ETHER_NEXT_MULTI(step, enm);
633: }
634:
635: if (mcnt)
636: rxfilt |= XL_RXFILTER_MULTIHASH;
637: else
638: rxfilt &= ~XL_RXFILTER_MULTIHASH;
639:
640: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
641: }
642:
643: void
644: xl_setpromisc(struct xl_softc *sc)
645: {
646: struct ifnet *ifp;
647: u_int8_t rxfilt;
648:
649: ifp = &sc->sc_arpcom.ac_if;
650:
651: XL_SEL_WIN(5);
652: rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
653:
654: if (ifp->if_flags & IFF_PROMISC)
655: rxfilt |= XL_RXFILTER_ALLFRAMES;
656: else
657: rxfilt &= ~XL_RXFILTER_ALLFRAMES;
658:
659: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
660: }
661:
662:
663: #ifdef notdef
664: void
665: xl_testpacket(struct xl_softc *sc)
666: {
667: struct mbuf *m;
668: struct ifnet *ifp;
669: int error;
670:
671: ifp = &sc->sc_arpcom.ac_if;
672:
673: MGETHDR(m, M_DONTWAIT, MT_DATA);
674:
675: if (m == NULL)
676: return;
677:
678: bcopy(&sc->sc_arpcom.ac_enaddr,
679: mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
680: bcopy(&sc->sc_arpcom.ac_enaddr,
681: mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
682: mtod(m, struct ether_header *)->ether_type = htons(3);
683: mtod(m, unsigned char *)[14] = 0;
684: mtod(m, unsigned char *)[15] = 0;
685: mtod(m, unsigned char *)[16] = 0xE3;
686: m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
687: IFQ_ENQUEUE(&ifp->if_snd, m, NULL, error);
688: xl_start(ifp);
689: }
690: #endif
691:
692: void
693: xl_setcfg(struct xl_softc *sc)
694: {
695: u_int32_t icfg;
696:
697: XL_SEL_WIN(3);
698: icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
699: icfg &= ~XL_ICFG_CONNECTOR_MASK;
700: if (sc->xl_media & XL_MEDIAOPT_MII ||
701: sc->xl_media & XL_MEDIAOPT_BT4)
702: icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
703: if (sc->xl_media & XL_MEDIAOPT_BTX)
704: icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
705:
706: CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
707: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
708: }
709:
710: void
711: xl_setmode(struct xl_softc *sc, int media)
712: {
713: struct ifnet *ifp = &sc->sc_arpcom.ac_if;
714: u_int32_t icfg;
715: u_int16_t mediastat;
716:
717: XL_SEL_WIN(4);
718: mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
719: XL_SEL_WIN(3);
720: icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
721:
722: if (sc->xl_media & XL_MEDIAOPT_BT) {
723: if (IFM_SUBTYPE(media) == IFM_10_T) {
724: ifp->if_baudrate = IF_Mbps(10);
725: sc->xl_xcvr = XL_XCVR_10BT;
726: icfg &= ~XL_ICFG_CONNECTOR_MASK;
727: icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
728: mediastat |= XL_MEDIASTAT_LINKBEAT|
729: XL_MEDIASTAT_JABGUARD;
730: mediastat &= ~XL_MEDIASTAT_SQEENB;
731: }
732: }
733:
734: if (sc->xl_media & XL_MEDIAOPT_BFX) {
735: if (IFM_SUBTYPE(media) == IFM_100_FX) {
736: ifp->if_baudrate = IF_Mbps(100);
737: sc->xl_xcvr = XL_XCVR_100BFX;
738: icfg &= ~XL_ICFG_CONNECTOR_MASK;
739: icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
740: mediastat |= XL_MEDIASTAT_LINKBEAT;
741: mediastat &= ~XL_MEDIASTAT_SQEENB;
742: }
743: }
744:
745: if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
746: if (IFM_SUBTYPE(media) == IFM_10_5) {
747: ifp->if_baudrate = IF_Mbps(10);
748: sc->xl_xcvr = XL_XCVR_AUI;
749: icfg &= ~XL_ICFG_CONNECTOR_MASK;
750: icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
751: mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
752: XL_MEDIASTAT_JABGUARD);
753: mediastat |= ~XL_MEDIASTAT_SQEENB;
754: }
755: if (IFM_SUBTYPE(media) == IFM_10_FL) {
756: ifp->if_baudrate = IF_Mbps(10);
757: sc->xl_xcvr = XL_XCVR_AUI;
758: icfg &= ~XL_ICFG_CONNECTOR_MASK;
759: icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
760: mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
761: XL_MEDIASTAT_JABGUARD);
762: mediastat |= ~XL_MEDIASTAT_SQEENB;
763: }
764: }
765:
766: if (sc->xl_media & XL_MEDIAOPT_BNC) {
767: if (IFM_SUBTYPE(media) == IFM_10_2) {
768: ifp->if_baudrate = IF_Mbps(10);
769: sc->xl_xcvr = XL_XCVR_COAX;
770: icfg &= ~XL_ICFG_CONNECTOR_MASK;
771: icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
772: mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
773: XL_MEDIASTAT_JABGUARD|
774: XL_MEDIASTAT_SQEENB);
775: }
776: }
777:
778: if ((media & IFM_GMASK) == IFM_FDX ||
779: IFM_SUBTYPE(media) == IFM_100_FX) {
780: XL_SEL_WIN(3);
781: CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
782: } else {
783: XL_SEL_WIN(3);
784: CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
785: (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
786: }
787:
788: if (IFM_SUBTYPE(media) == IFM_10_2)
789: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
790: else
791: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
792: CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
793: XL_SEL_WIN(4);
794: CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
795: DELAY(800);
796: XL_SEL_WIN(7);
797: }
798:
799: void
800: xl_reset(struct xl_softc *sc)
801: {
802: int i;
803:
804: XL_SEL_WIN(0);
805: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
806: ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
807: XL_RESETOPT_DISADVFD:0));
808:
809: /*
810: * Pause briefly after issuing the reset command before trying
811: * to access any other registers. With my 3c575C cardbus card,
812: * failing to do this results in the system locking up while
813: * trying to poll the command busy bit in the status register.
814: */
815: DELAY(100000);
816:
817: for (i = 0; i < XL_TIMEOUT; i++) {
818: DELAY(10);
819: if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
820: break;
821: }
822:
823: if (i == XL_TIMEOUT)
824: printf("%s: reset didn't complete\n", sc->sc_dev.dv_xname);
825:
826: /* Note: the RX reset takes an absurd amount of time
827: * on newer versions of the Tornado chips such as those
828: * on the 3c905CX and newer 3c908C cards. We wait an
829: * extra amount of time so that xl_wait() doesn't complain
830: * and annoy the users.
831: */
832: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
833: DELAY(100000);
834: xl_wait(sc);
835: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
836: xl_wait(sc);
837:
838: if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
839: sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
840: XL_SEL_WIN(2);
841: CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc,
842: XL_W2_RESET_OPTIONS)
843: | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR)?XL_RESETOPT_INVERT_LED:0)
844: | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR)?XL_RESETOPT_INVERT_MII:0)
845: );
846: }
847:
848: /* Wait a little while for the chip to get its brains in order. */
849: DELAY(100000);
850: }
851:
852: /*
853: * This routine is a kludge to work around possible hardware faults
854: * or manufacturing defects that can cause the media options register
855: * (or reset options register, as it's called for the first generation
856: * 3c90x adapters) to return an incorrect result. I have encountered
857: * one Dell Latitude laptop docking station with an integrated 3c905-TX
858: * which doesn't have any of the 'mediaopt' bits set. This screws up
859: * the attach routine pretty badly because it doesn't know what media
860: * to look for. If we find ourselves in this predicament, this routine
861: * will try to guess the media options values and warn the user of a
862: * possible manufacturing defect with his adapter/system/whatever.
863: */
864: void
865: xl_mediacheck(struct xl_softc *sc)
866: {
867: /*
868: * If some of the media options bits are set, assume they are
869: * correct. If not, try to figure it out down below.
870: * XXX I should check for 10baseFL, but I don't have an adapter
871: * to test with.
872: */
873: if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
874: /*
875: * Check the XCVR value. If it's not in the normal range
876: * of values, we need to fake it up here.
877: */
878: if (sc->xl_xcvr <= XL_XCVR_AUTO)
879: return;
880: else {
881: printf("%s: bogus xcvr value "
882: "in EEPROM (%x)\n", sc->sc_dev.dv_xname, sc->xl_xcvr);
883: printf("%s: choosing new default based "
884: "on card type\n", sc->sc_dev.dv_xname);
885: }
886: } else {
887: if (sc->xl_type == XL_TYPE_905B &&
888: sc->xl_media & XL_MEDIAOPT_10FL)
889: return;
890: printf("%s: WARNING: no media options bits set in "
891: "the media options register!!\n", sc->sc_dev.dv_xname);
892: printf("%s: this could be a manufacturing defect in "
893: "your adapter or system\n", sc->sc_dev.dv_xname);
894: printf("%s: attempting to guess media type; you "
895: "should probably consult your vendor\n", sc->sc_dev.dv_xname);
896: }
897:
898: xl_choose_xcvr(sc, 1);
899: }
900:
901: void
902: xl_choose_xcvr(struct xl_softc *sc, int verbose)
903: {
904: u_int16_t devid;
905:
906: /*
907: * Read the device ID from the EEPROM.
908: * This is what's loaded into the PCI device ID register, so it has
909: * to be correct otherwise we wouldn't have gotten this far.
910: */
911: xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
912:
913: switch(devid) {
914: case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */
915: case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */
916: sc->xl_media = XL_MEDIAOPT_BT;
917: sc->xl_xcvr = XL_XCVR_10BT;
918: if (verbose)
919: printf("%s: guessing 10BaseT transceiver\n",
920: sc->sc_dev.dv_xname);
921: break;
922: case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */
923: case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */
924: sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
925: sc->xl_xcvr = XL_XCVR_10BT;
926: if (verbose)
927: printf("%s: guessing COMBO (AUI/BNC/TP)\n",
928: sc->sc_dev.dv_xname);
929: break;
930: case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */
931: sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
932: sc->xl_xcvr = XL_XCVR_10BT;
933: if (verbose)
934: printf("%s: guessing TPC (BNC/TP)\n", sc->sc_dev.dv_xname);
935: break;
936: case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */
937: sc->xl_media = XL_MEDIAOPT_10FL;
938: sc->xl_xcvr = XL_XCVR_AUI;
939: if (verbose)
940: printf("%s: guessing 10baseFL\n", sc->sc_dev.dv_xname);
941: break;
942: case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */
943: case TC_DEVICEID_HURRICANE_555: /* 3c555 */
944: case TC_DEVICEID_HURRICANE_556: /* 3c556 */
945: case TC_DEVICEID_HURRICANE_556B: /* 3c556B */
946: case TC_DEVICEID_HURRICANE_575A: /* 3c575TX */
947: case TC_DEVICEID_HURRICANE_575B: /* 3c575B */
948: case TC_DEVICEID_HURRICANE_575C: /* 3c575C */
949: case TC_DEVICEID_HURRICANE_656: /* 3c656 */
950: case TC_DEVICEID_HURRICANE_656B: /* 3c656B */
951: case TC_DEVICEID_TORNADO_656C: /* 3c656C */
952: case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
953: sc->xl_media = XL_MEDIAOPT_MII;
954: sc->xl_xcvr = XL_XCVR_MII;
955: if (verbose)
956: printf("%s: guessing MII\n", sc->sc_dev.dv_xname);
957: break;
958: case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */
959: case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */
960: sc->xl_media = XL_MEDIAOPT_BT4;
961: sc->xl_xcvr = XL_XCVR_MII;
962: if (verbose)
963: printf("%s: guessing 100BaseT4/MII\n", sc->sc_dev.dv_xname);
964: break;
965: case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */
966: case TC_DEVICEID_HURRICANE_10_100BT_SERV:/* 3c980-TX */
967: case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */
968: case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */
969: case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */
970: case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */
971: sc->xl_media = XL_MEDIAOPT_BTX;
972: sc->xl_xcvr = XL_XCVR_AUTO;
973: if (verbose)
974: printf("%s: guessing 10/100 internal\n",
975: sc->sc_dev.dv_xname);
976: break;
977: case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */
978: sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
979: sc->xl_xcvr = XL_XCVR_AUTO;
980: if (verbose)
981: printf("%s: guessing 10/100 plus BNC/AUI\n",
982: sc->sc_dev.dv_xname);
983: break;
984: default:
985: printf("%s: unknown device ID: %x -- "
986: "defaulting to 10baseT\n", sc->sc_dev.dv_xname, devid);
987: sc->xl_media = XL_MEDIAOPT_BT;
988: break;
989: }
990: }
991:
992: /*
993: * Initialize the transmit descriptors.
994: */
995: int
996: xl_list_tx_init(struct xl_softc *sc)
997: {
998: struct xl_chain_data *cd;
999: struct xl_list_data *ld;
1000: int i;
1001:
1002: cd = &sc->xl_cdata;
1003: ld = sc->xl_ldata;
1004: for (i = 0; i < XL_TX_LIST_CNT; i++) {
1005: cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1006: if (i == (XL_TX_LIST_CNT - 1))
1007: cd->xl_tx_chain[i].xl_next = NULL;
1008: else
1009: cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1010: }
1011:
1012: cd->xl_tx_free = &cd->xl_tx_chain[0];
1013: cd->xl_tx_tail = cd->xl_tx_head = NULL;
1014:
1015: return (0);
1016: }
1017:
1018: /*
1019: * Initialize the transmit descriptors.
1020: */
1021: int
1022: xl_list_tx_init_90xB(struct xl_softc *sc)
1023: {
1024: struct xl_chain_data *cd;
1025: struct xl_list_data *ld;
1026: int i, next, prev;
1027:
1028: cd = &sc->xl_cdata;
1029: ld = sc->xl_ldata;
1030: for (i = 0; i < XL_TX_LIST_CNT; i++) {
1031: if (i == (XL_TX_LIST_CNT - 1))
1032: next = 0;
1033: else
1034: next = i + 1;
1035: if (i == 0)
1036: prev = XL_TX_LIST_CNT - 1;
1037: else
1038: prev = i - 1;
1039: cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
1040: cd->xl_tx_chain[i].xl_phys =
1041: sc->sc_listmap->dm_segs[0].ds_addr +
1042: offsetof(struct xl_list_data, xl_tx_list[i]);
1043: cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[next];
1044: cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[prev];
1045: }
1046:
1047: bzero((char *)ld->xl_tx_list, sizeof(struct xl_list) * XL_TX_LIST_CNT);
1048: ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
1049:
1050: cd->xl_tx_prod = 1;
1051: cd->xl_tx_cons = 1;
1052: cd->xl_tx_cnt = 0;
1053:
1054: return (0);
1055: }
1056:
1057: /*
1058: * Initialize the RX descriptors and allocate mbufs for them. Note that
1059: * we arrange the descriptors in a closed ring, so that the last descriptor
1060: * points back to the first.
1061: */
1062: int
1063: xl_list_rx_init(struct xl_softc *sc)
1064: {
1065: struct xl_chain_data *cd;
1066: struct xl_list_data *ld;
1067: int i, n;
1068: bus_addr_t next;
1069:
1070: cd = &sc->xl_cdata;
1071: ld = sc->xl_ldata;
1072:
1073: for (i = 0; i < XL_RX_LIST_CNT; i++) {
1074: cd->xl_rx_chain[i].xl_ptr =
1075: (struct xl_list_onefrag *)&ld->xl_rx_list[i];
1076: if (xl_newbuf(sc, &cd->xl_rx_chain[i]) == ENOBUFS)
1077: return(ENOBUFS);
1078: if (i == (XL_RX_LIST_CNT - 1))
1079: n = 0;
1080: else
1081: n = i + 1;
1082: cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[n];
1083: next = sc->sc_listmap->dm_segs[0].ds_addr +
1084: offsetof(struct xl_list_data, xl_rx_list[n]);
1085: ld->xl_rx_list[i].xl_next = htole32(next);
1086: }
1087:
1088: cd->xl_rx_head = &cd->xl_rx_chain[0];
1089:
1090: return (0);
1091: }
1092:
1093: /*
1094: * Initialize an RX descriptor and attach an MBUF cluster.
1095: */
1096: int
1097: xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c)
1098: {
1099: struct mbuf *m_new = NULL;
1100: bus_dmamap_t map;
1101:
1102: MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1103: if (m_new == NULL)
1104: return (ENOBUFS);
1105:
1106: MCLGET(m_new, M_DONTWAIT);
1107: if (!(m_new->m_flags & M_EXT)) {
1108: m_freem(m_new);
1109: return (ENOBUFS);
1110: }
1111:
1112: m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1113: if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_sparemap,
1114: mtod(m_new, caddr_t), MCLBYTES, NULL, BUS_DMA_NOWAIT) != 0) {
1115: m_freem(m_new);
1116: return (ENOBUFS);
1117: }
1118:
1119: /* sync the old map, and unload it (if necessary) */
1120: if (c->map->dm_nsegs != 0) {
1121: bus_dmamap_sync(sc->sc_dmat, c->map,
1122: 0, c->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1123: bus_dmamap_unload(sc->sc_dmat, c->map);
1124: }
1125:
1126: map = c->map;
1127: c->map = sc->sc_rx_sparemap;
1128: sc->sc_rx_sparemap = map;
1129:
1130: /* Force longword alignment for packet payload. */
1131: m_adj(m_new, ETHER_ALIGN);
1132:
1133: bus_dmamap_sync(sc->sc_dmat, c->map, 0, c->map->dm_mapsize,
1134: BUS_DMASYNC_PREREAD);
1135:
1136: c->xl_mbuf = m_new;
1137: c->xl_ptr->xl_frag.xl_addr =
1138: htole32(c->map->dm_segs[0].ds_addr + ETHER_ALIGN);
1139: c->xl_ptr->xl_frag.xl_len =
1140: htole32(c->map->dm_segs[0].ds_len | XL_LAST_FRAG);
1141: c->xl_ptr->xl_status = htole32(0);
1142:
1143: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1144: ((caddr_t)c->xl_ptr - sc->sc_listkva), sizeof(struct xl_list),
1145: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1146:
1147: return (0);
1148: }
1149:
1150: int
1151: xl_rx_resync(struct xl_softc *sc)
1152: {
1153: struct xl_chain_onefrag *pos;
1154: int i;
1155:
1156: pos = sc->xl_cdata.xl_rx_head;
1157:
1158: for (i = 0; i < XL_RX_LIST_CNT; i++) {
1159: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1160: ((caddr_t)pos->xl_ptr - sc->sc_listkva),
1161: sizeof(struct xl_list),
1162: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1163:
1164: if (pos->xl_ptr->xl_status)
1165: break;
1166: pos = pos->xl_next;
1167: }
1168:
1169: if (i == XL_RX_LIST_CNT)
1170: return (0);
1171:
1172: sc->xl_cdata.xl_rx_head = pos;
1173:
1174: return (EAGAIN);
1175: }
1176:
1177: /*
1178: * A frame has been uploaded: pass the resulting mbuf chain up to
1179: * the higher level protocols.
1180: */
1181: void
1182: xl_rxeof(struct xl_softc *sc)
1183: {
1184: struct mbuf *m;
1185: struct ifnet *ifp;
1186: struct xl_chain_onefrag *cur_rx;
1187: int total_len = 0, sumflags = 0;
1188: u_int32_t rxstat;
1189:
1190: ifp = &sc->sc_arpcom.ac_if;
1191:
1192: again:
1193:
1194: while ((rxstat = letoh32(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))
1195: != 0) {
1196: cur_rx = sc->xl_cdata.xl_rx_head;
1197: sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
1198: total_len = rxstat & XL_RXSTAT_LENMASK;
1199:
1200: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1201: ((caddr_t)cur_rx->xl_ptr - sc->sc_listkva),
1202: sizeof(struct xl_list),
1203: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1204:
1205: /*
1206: * Since we have told the chip to allow large frames,
1207: * we need to trap giant frame errors in software. We allow
1208: * a little more than the normal frame size to account for
1209: * frames with VLAN tags.
1210: */
1211: if (total_len > XL_MAX_FRAMELEN)
1212: rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
1213:
1214: /*
1215: * If an error occurs, update stats, clear the
1216: * status word and leave the mbuf cluster in place:
1217: * it should simply get re-used next time this descriptor
1218: * comes up in the ring.
1219: */
1220: if (rxstat & XL_RXSTAT_UP_ERROR) {
1221: ifp->if_ierrors++;
1222: cur_rx->xl_ptr->xl_status = htole32(0);
1223: continue;
1224: }
1225:
1226: /*
1227: * If the error bit was not set, the upload complete
1228: * bit should be set which means we have a valid packet.
1229: * If not, something truly strange has happened.
1230: */
1231: if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
1232: printf("%s: bad receive status -- "
1233: "packet dropped\n", sc->sc_dev.dv_xname);
1234: ifp->if_ierrors++;
1235: cur_rx->xl_ptr->xl_status = htole32(0);
1236: continue;
1237: }
1238:
1239: /* No errors; receive the packet. */
1240: m = cur_rx->xl_mbuf;
1241:
1242: /*
1243: * Try to conjure up a new mbuf cluster. If that
1244: * fails, it means we have an out of memory condition and
1245: * should leave the buffer in place and continue. This will
1246: * result in a lost packet, but there's little else we
1247: * can do in this situation.
1248: */
1249: if (xl_newbuf(sc, cur_rx) == ENOBUFS) {
1250: ifp->if_ierrors++;
1251: cur_rx->xl_ptr->xl_status = htole32(0);
1252: continue;
1253: }
1254:
1255: ifp->if_ipackets++;
1256: m->m_pkthdr.rcvif = ifp;
1257: m->m_pkthdr.len = m->m_len = total_len;
1258: #if NBPFILTER > 0
1259: /*
1260: * Handle BPF listeners. Let the BPF user see the packet.
1261: */
1262: if (ifp->if_bpf) {
1263: bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1264: }
1265: #endif
1266:
1267: if (sc->xl_type == XL_TYPE_905B) {
1268: if (!(rxstat & XL_RXSTAT_IPCKERR) &&
1269: (rxstat & XL_RXSTAT_IPCKOK))
1270: sumflags |= M_IPV4_CSUM_IN_OK;
1271:
1272: if (!(rxstat & XL_RXSTAT_TCPCKERR) &&
1273: (rxstat & XL_RXSTAT_TCPCKOK))
1274: sumflags |= M_TCP_CSUM_IN_OK;
1275:
1276: if (!(rxstat & XL_RXSTAT_UDPCKERR) &&
1277: (rxstat & XL_RXSTAT_UDPCKOK))
1278: sumflags |= M_UDP_CSUM_IN_OK;
1279:
1280: m->m_pkthdr.csum_flags = sumflags;
1281: }
1282:
1283: ether_input_mbuf(ifp, m);
1284: }
1285:
1286: /*
1287: * Handle the 'end of channel' condition. When the upload
1288: * engine hits the end of the RX ring, it will stall. This
1289: * is our cue to flush the RX ring, reload the uplist pointer
1290: * register and unstall the engine.
1291: * XXX This is actually a little goofy. With the ThunderLAN
1292: * chip, you get an interrupt when the receiver hits the end
1293: * of the receive ring, which tells you exactly when you
1294: * you need to reload the ring pointer. Here we have to
1295: * fake it. I'm mad at myself for not being clever enough
1296: * to avoid the use of a goto here.
1297: */
1298: if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
1299: CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
1300: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
1301: xl_wait(sc);
1302: CSR_WRITE_4(sc, XL_UPLIST_PTR,
1303: sc->sc_listmap->dm_segs[0].ds_addr +
1304: offsetof(struct xl_list_data, xl_rx_list[0]));
1305: sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
1306: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
1307: goto again;
1308: }
1309: }
1310:
1311: /*
1312: * A frame was downloaded to the chip. It's safe for us to clean up
1313: * the list buffers.
1314: */
1315: void
1316: xl_txeof(struct xl_softc *sc)
1317: {
1318: struct xl_chain *cur_tx;
1319: struct ifnet *ifp;
1320:
1321: ifp = &sc->sc_arpcom.ac_if;
1322:
1323: /* Clear the timeout timer. */
1324: ifp->if_timer = 0;
1325:
1326: /*
1327: * Go through our tx list and free mbufs for those
1328: * frames that have been uploaded. Note: the 3c905B
1329: * sets a special bit in the status word to let us
1330: * know that a frame has been downloaded, but the
1331: * original 3c900/3c905 adapters don't do that.
1332: * Consequently, we have to use a different test if
1333: * xl_type != XL_TYPE_905B.
1334: */
1335: while (sc->xl_cdata.xl_tx_head != NULL) {
1336: cur_tx = sc->xl_cdata.xl_tx_head;
1337:
1338: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1339: ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva),
1340: sizeof(struct xl_list),
1341: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1342:
1343: if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
1344: break;
1345:
1346: sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
1347: ifp->if_opackets++;
1348: if (cur_tx->map->dm_nsegs != 0) {
1349: bus_dmamap_t map = cur_tx->map;
1350:
1351: bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1352: BUS_DMASYNC_POSTWRITE);
1353: bus_dmamap_unload(sc->sc_dmat, map);
1354: }
1355: if (cur_tx->xl_mbuf != NULL) {
1356: m_freem(cur_tx->xl_mbuf);
1357: cur_tx->xl_mbuf = NULL;
1358: }
1359: cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
1360: sc->xl_cdata.xl_tx_free = cur_tx;
1361: }
1362:
1363: if (sc->xl_cdata.xl_tx_head == NULL) {
1364: ifp->if_flags &= ~IFF_OACTIVE;
1365: sc->xl_cdata.xl_tx_tail = NULL;
1366: } else {
1367: if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
1368: !CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
1369: CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1370: sc->sc_listmap->dm_segs[0].ds_addr +
1371: ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr -
1372: sc->sc_listkva));
1373: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1374: }
1375: }
1376: }
1377:
1378: void
1379: xl_txeof_90xB(struct xl_softc *sc)
1380: {
1381: struct xl_chain *cur_tx = NULL;
1382: struct ifnet *ifp;
1383: int idx;
1384:
1385: ifp = &sc->sc_arpcom.ac_if;
1386:
1387: idx = sc->xl_cdata.xl_tx_cons;
1388: while (idx != sc->xl_cdata.xl_tx_prod) {
1389:
1390: cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1391:
1392: if ((cur_tx->xl_ptr->xl_status &
1393: htole32(XL_TXSTAT_DL_COMPLETE)) == 0)
1394: break;
1395:
1396: if (cur_tx->xl_mbuf != NULL) {
1397: m_freem(cur_tx->xl_mbuf);
1398: cur_tx->xl_mbuf = NULL;
1399: }
1400:
1401: if (cur_tx->map->dm_nsegs != 0) {
1402: bus_dmamap_sync(sc->sc_dmat, cur_tx->map,
1403: 0, cur_tx->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1404: bus_dmamap_unload(sc->sc_dmat, cur_tx->map);
1405: }
1406:
1407: ifp->if_opackets++;
1408:
1409: sc->xl_cdata.xl_tx_cnt--;
1410: XL_INC(idx, XL_TX_LIST_CNT);
1411: ifp->if_timer = 0;
1412: }
1413:
1414: sc->xl_cdata.xl_tx_cons = idx;
1415:
1416: if (cur_tx != NULL)
1417: ifp->if_flags &= ~IFF_OACTIVE;
1418: }
1419:
1420: /*
1421: * TX 'end of channel' interrupt handler. Actually, we should
1422: * only get a 'TX complete' interrupt if there's a transmit error,
1423: * so this is really TX error handler.
1424: */
1425: void
1426: xl_txeoc(struct xl_softc *sc)
1427: {
1428: u_int8_t txstat;
1429:
1430: while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
1431: if (txstat & XL_TXSTATUS_UNDERRUN ||
1432: txstat & XL_TXSTATUS_JABBER ||
1433: txstat & XL_TXSTATUS_RECLAIM) {
1434: if (txstat != 0x90) {
1435: printf("%s: transmission error: %x\n",
1436: sc->sc_dev.dv_xname, txstat);
1437: }
1438: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1439: xl_wait(sc);
1440: if (sc->xl_type == XL_TYPE_905B) {
1441: if (sc->xl_cdata.xl_tx_cnt) {
1442: int i;
1443: struct xl_chain *c;
1444:
1445: i = sc->xl_cdata.xl_tx_cons;
1446: c = &sc->xl_cdata.xl_tx_chain[i];
1447: CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1448: c->xl_phys);
1449: CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
1450: }
1451: } else {
1452: if (sc->xl_cdata.xl_tx_head != NULL)
1453: CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1454: sc->sc_listmap->dm_segs[0].ds_addr +
1455: ((caddr_t)sc->xl_cdata.xl_tx_head->xl_ptr -
1456: sc->sc_listkva));
1457: }
1458: /*
1459: * Remember to set this for the
1460: * first generation 3c90X chips.
1461: */
1462: CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1463: if (txstat & XL_TXSTATUS_UNDERRUN &&
1464: sc->xl_tx_thresh < XL_PACKET_SIZE) {
1465: sc->xl_tx_thresh += XL_MIN_FRAMELEN;
1466: #ifdef notdef
1467: printf("%s: tx underrun, increasing tx start"
1468: " threshold to %d\n", sc->sc_dev.dv_xname,
1469: sc->xl_tx_thresh);
1470: #endif
1471: }
1472: CSR_WRITE_2(sc, XL_COMMAND,
1473: XL_CMD_TX_SET_START|sc->xl_tx_thresh);
1474: if (sc->xl_type == XL_TYPE_905B) {
1475: CSR_WRITE_2(sc, XL_COMMAND,
1476: XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
1477: }
1478: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1479: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1480: } else {
1481: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
1482: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1483: }
1484: /*
1485: * Write an arbitrary byte to the TX_STATUS register
1486: * to clear this interrupt/error and advance to the next.
1487: */
1488: CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
1489: }
1490: }
1491:
1492: int
1493: xl_intr(void *arg)
1494: {
1495: struct xl_softc *sc;
1496: struct ifnet *ifp;
1497: u_int16_t status;
1498: int claimed = 0;
1499:
1500: sc = arg;
1501: ifp = &sc->sc_arpcom.ac_if;
1502:
1503: while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && status != 0xFFFF) {
1504:
1505: claimed = 1;
1506:
1507: CSR_WRITE_2(sc, XL_COMMAND,
1508: XL_CMD_INTR_ACK|(status & XL_INTRS));
1509:
1510: if (sc->intr_ack)
1511: (*sc->intr_ack)(sc);
1512:
1513: if (status & XL_STAT_UP_COMPLETE) {
1514: int curpkts;
1515:
1516: curpkts = ifp->if_ipackets;
1517: xl_rxeof(sc);
1518: if (curpkts == ifp->if_ipackets) {
1519: while (xl_rx_resync(sc))
1520: xl_rxeof(sc);
1521: }
1522: }
1523:
1524: if (status & XL_STAT_DOWN_COMPLETE) {
1525: if (sc->xl_type == XL_TYPE_905B)
1526: xl_txeof_90xB(sc);
1527: else
1528: xl_txeof(sc);
1529: }
1530:
1531: if (status & XL_STAT_TX_COMPLETE) {
1532: ifp->if_oerrors++;
1533: xl_txeoc(sc);
1534: }
1535:
1536: if (status & XL_STAT_ADFAIL) {
1537: xl_reset(sc);
1538: xl_init(sc);
1539: }
1540:
1541: if (status & XL_STAT_STATSOFLOW) {
1542: sc->xl_stats_no_timeout = 1;
1543: xl_stats_update(sc);
1544: sc->xl_stats_no_timeout = 0;
1545: }
1546: }
1547:
1548: if (!IFQ_IS_EMPTY(&ifp->if_snd))
1549: (*ifp->if_start)(ifp);
1550:
1551: return (claimed);
1552: }
1553:
1554: void
1555: xl_stats_update(void *xsc)
1556: {
1557: struct xl_softc *sc;
1558: struct ifnet *ifp;
1559: struct xl_stats xl_stats;
1560: u_int8_t *p;
1561: int i;
1562: struct mii_data *mii = NULL;
1563:
1564: bzero((char *)&xl_stats, sizeof(struct xl_stats));
1565:
1566: sc = xsc;
1567: ifp = &sc->sc_arpcom.ac_if;
1568: if (sc->xl_hasmii)
1569: mii = &sc->sc_mii;
1570:
1571: p = (u_int8_t *)&xl_stats;
1572:
1573: /* Read all the stats registers. */
1574: XL_SEL_WIN(6);
1575:
1576: for (i = 0; i < 16; i++)
1577: *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
1578:
1579: ifp->if_ierrors += xl_stats.xl_rx_overrun;
1580:
1581: ifp->if_collisions += xl_stats.xl_tx_multi_collision +
1582: xl_stats.xl_tx_single_collision +
1583: xl_stats.xl_tx_late_collision;
1584:
1585: /*
1586: * Boomerang and cyclone chips have an extra stats counter
1587: * in window 4 (BadSSD). We have to read this too in order
1588: * to clear out all the stats registers and avoid a statsoflow
1589: * interrupt.
1590: */
1591: XL_SEL_WIN(4);
1592: CSR_READ_1(sc, XL_W4_BADSSD);
1593:
1594: if (mii != NULL && (!sc->xl_stats_no_timeout))
1595: mii_tick(mii);
1596:
1597: XL_SEL_WIN(7);
1598:
1599: if (!sc->xl_stats_no_timeout)
1600: timeout_add(&sc->xl_stsup_tmo, hz);
1601: }
1602:
1603: /*
1604: * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1605: * pointers to the fragment pointers.
1606: */
1607: int
1608: xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf *m_head)
1609: {
1610: int error, frag, total_len;
1611: u_int32_t status;
1612: bus_dmamap_t map;
1613:
1614: map = sc->sc_tx_sparemap;
1615:
1616: reload:
1617: error = bus_dmamap_load_mbuf(sc->sc_dmat, map,
1618: m_head, BUS_DMA_NOWAIT);
1619:
1620: if (error && error != EFBIG) {
1621: m_freem(m_head);
1622: return (1);
1623: }
1624:
1625: /*
1626: * Start packing the mbufs in this chain into
1627: * the fragment pointers. Stop when we run out
1628: * of fragments or hit the end of the mbuf chain.
1629: */
1630: for (frag = 0, total_len = 0; frag < map->dm_nsegs; frag++) {
1631: if (frag == XL_MAXFRAGS)
1632: break;
1633: total_len += map->dm_segs[frag].ds_len;
1634: c->xl_ptr->xl_frag[frag].xl_addr =
1635: htole32(map->dm_segs[frag].ds_addr);
1636: c->xl_ptr->xl_frag[frag].xl_len =
1637: htole32(map->dm_segs[frag].ds_len);
1638: }
1639:
1640: /*
1641: * Handle special case: we used up all 63 fragments,
1642: * but we have more mbufs left in the chain. Copy the
1643: * data into an mbuf cluster. Note that we don't
1644: * bother clearing the values in the other fragment
1645: * pointers/counters; it wouldn't gain us anything,
1646: * and would waste cycles.
1647: */
1648: if (error) {
1649: struct mbuf *m_new = NULL;
1650:
1651: MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1652: if (m_new == NULL) {
1653: m_freem(m_head);
1654: return (1);
1655: }
1656: if (m_head->m_pkthdr.len > MHLEN) {
1657: MCLGET(m_new, M_DONTWAIT);
1658: if (!(m_new->m_flags & M_EXT)) {
1659: m_freem(m_new);
1660: m_freem(m_head);
1661: return (1);
1662: }
1663: }
1664: m_copydata(m_head, 0, m_head->m_pkthdr.len,
1665: mtod(m_new, caddr_t));
1666: m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1667: m_freem(m_head);
1668: m_head = m_new;
1669: goto reload;
1670: }
1671:
1672: bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1673: BUS_DMASYNC_PREWRITE);
1674:
1675: if (c->map->dm_nsegs != 0) {
1676: bus_dmamap_sync(sc->sc_dmat, c->map,
1677: 0, c->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1678: bus_dmamap_unload(sc->sc_dmat, c->map);
1679: }
1680:
1681: c->xl_mbuf = m_head;
1682: sc->sc_tx_sparemap = c->map;
1683: c->map = map;
1684: c->xl_ptr->xl_frag[frag - 1].xl_len |= htole32(XL_LAST_FRAG);
1685: c->xl_ptr->xl_status = htole32(total_len);
1686: c->xl_ptr->xl_next = 0;
1687:
1688: if (sc->xl_type == XL_TYPE_905B) {
1689: status = XL_TXSTAT_RND_DEFEAT;
1690:
1691: #ifndef XL905B_TXCSUM_BROKEN
1692: if (m_head->m_pkthdr.csum_flags) {
1693: if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1694: status |= XL_TXSTAT_IPCKSUM;
1695: if (m_head->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
1696: status |= XL_TXSTAT_TCPCKSUM;
1697: if (m_head->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
1698: status |= XL_TXSTAT_UDPCKSUM;
1699: }
1700: #endif
1701: c->xl_ptr->xl_status = htole32(status);
1702: }
1703:
1704: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1705: offsetof(struct xl_list_data, xl_tx_list[0]),
1706: sizeof(struct xl_list) * XL_TX_LIST_CNT,
1707: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1708:
1709: return (0);
1710: }
1711:
1712: /*
1713: * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1714: * to the mbuf data regions directly in the transmit lists. We also save a
1715: * copy of the pointers since the transmit list fragment pointers are
1716: * physical addresses.
1717: */
1718: void
1719: xl_start(struct ifnet *ifp)
1720: {
1721: struct xl_softc *sc;
1722: struct mbuf *m_head = NULL;
1723: struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
1724: struct xl_chain *prev_tx;
1725: int error;
1726:
1727: sc = ifp->if_softc;
1728:
1729: /*
1730: * Check for an available queue slot. If there are none,
1731: * punt.
1732: */
1733: if (sc->xl_cdata.xl_tx_free == NULL) {
1734: xl_txeoc(sc);
1735: xl_txeof(sc);
1736: if (sc->xl_cdata.xl_tx_free == NULL) {
1737: ifp->if_flags |= IFF_OACTIVE;
1738: return;
1739: }
1740: }
1741:
1742: start_tx = sc->xl_cdata.xl_tx_free;
1743:
1744: while (sc->xl_cdata.xl_tx_free != NULL) {
1745: IFQ_DEQUEUE(&ifp->if_snd, m_head);
1746: if (m_head == NULL)
1747: break;
1748:
1749: /* Pick a descriptor off the free list. */
1750: prev_tx = cur_tx;
1751: cur_tx = sc->xl_cdata.xl_tx_free;
1752:
1753: /* Pack the data into the descriptor. */
1754: error = xl_encap(sc, cur_tx, m_head);
1755: if (error) {
1756: cur_tx = prev_tx;
1757: continue;
1758: }
1759:
1760: sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
1761: cur_tx->xl_next = NULL;
1762:
1763: /* Chain it together. */
1764: if (prev != NULL) {
1765: prev->xl_next = cur_tx;
1766: prev->xl_ptr->xl_next =
1767: sc->sc_listmap->dm_segs[0].ds_addr +
1768: ((caddr_t)cur_tx->xl_ptr - sc->sc_listkva);
1769:
1770: }
1771: prev = cur_tx;
1772:
1773: #if NBPFILTER > 0
1774: /*
1775: * If there's a BPF listener, bounce a copy of this frame
1776: * to him.
1777: */
1778: if (ifp->if_bpf)
1779: bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf,
1780: BPF_DIRECTION_OUT);
1781: #endif
1782: }
1783:
1784: /*
1785: * If there are no packets queued, bail.
1786: */
1787: if (cur_tx == NULL)
1788: return;
1789:
1790: /*
1791: * Place the request for the upload interrupt
1792: * in the last descriptor in the chain. This way, if
1793: * we're chaining several packets at once, we'll only
1794: * get an interrupt once for the whole chain rather than
1795: * once for each packet.
1796: */
1797: cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
1798:
1799: /*
1800: * Queue the packets. If the TX channel is clear, update
1801: * the downlist pointer register.
1802: */
1803: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
1804: xl_wait(sc);
1805:
1806: if (sc->xl_cdata.xl_tx_head != NULL) {
1807: sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
1808: sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
1809: sc->sc_listmap->dm_segs[0].ds_addr +
1810: ((caddr_t)start_tx->xl_ptr - sc->sc_listkva);
1811: sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &=
1812: htole32(~XL_TXSTAT_DL_INTR);
1813: sc->xl_cdata.xl_tx_tail = cur_tx;
1814: } else {
1815: sc->xl_cdata.xl_tx_head = start_tx;
1816: sc->xl_cdata.xl_tx_tail = cur_tx;
1817: }
1818: if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
1819: CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
1820: sc->sc_listmap->dm_segs[0].ds_addr +
1821: ((caddr_t)start_tx->xl_ptr - sc->sc_listkva));
1822:
1823: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
1824:
1825: XL_SEL_WIN(7);
1826:
1827: /*
1828: * Set a timeout in case the chip goes out to lunch.
1829: */
1830: ifp->if_timer = 5;
1831:
1832: /*
1833: * XXX Under certain conditions, usually on slower machines
1834: * where interrupts may be dropped, it's possible for the
1835: * adapter to chew up all the buffers in the receive ring
1836: * and stall, without us being able to do anything about it.
1837: * To guard against this, we need to make a pass over the
1838: * RX queue to make sure there aren't any packets pending.
1839: * Doing it here means we can flush the receive ring at the
1840: * same time the chip is DMAing the transmit descriptors we
1841: * just gave it.
1842: *
1843: * 3Com goes to some lengths to emphasize the Parallel Tasking (tm)
1844: * nature of their chips in all their marketing literature;
1845: * we may as well take advantage of it. :)
1846: */
1847: xl_rxeof(sc);
1848: }
1849:
1850: void
1851: xl_start_90xB(struct ifnet *ifp)
1852: {
1853: struct xl_softc *sc;
1854: struct mbuf *m_head = NULL;
1855: struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
1856: struct xl_chain *prev_tx;
1857: int error, idx;
1858:
1859: sc = ifp->if_softc;
1860:
1861: if (ifp->if_flags & IFF_OACTIVE)
1862: return;
1863:
1864: idx = sc->xl_cdata.xl_tx_prod;
1865: start_tx = &sc->xl_cdata.xl_tx_chain[idx];
1866:
1867: while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) {
1868:
1869: if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
1870: ifp->if_flags |= IFF_OACTIVE;
1871: break;
1872: }
1873:
1874: IFQ_DEQUEUE(&ifp->if_snd, m_head);
1875: if (m_head == NULL)
1876: break;
1877:
1878: prev_tx = cur_tx;
1879: cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
1880:
1881: /* Pack the data into the descriptor. */
1882: error = xl_encap(sc, cur_tx, m_head);
1883: if (error) {
1884: cur_tx = prev_tx;
1885: continue;
1886: }
1887:
1888: /* Chain it together. */
1889: if (prev != NULL)
1890: prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
1891: prev = cur_tx;
1892:
1893: #if NBPFILTER > 0
1894: /*
1895: * If there's a BPF listener, bounce a copy of this frame
1896: * to him.
1897: */
1898: if (ifp->if_bpf)
1899: bpf_mtap(ifp->if_bpf, cur_tx->xl_mbuf,
1900: BPF_DIRECTION_OUT);
1901: #endif
1902:
1903: XL_INC(idx, XL_TX_LIST_CNT);
1904: sc->xl_cdata.xl_tx_cnt++;
1905: }
1906:
1907: /*
1908: * If there are no packets queued, bail.
1909: */
1910: if (cur_tx == NULL)
1911: return;
1912:
1913: /*
1914: * Place the request for the upload interrupt
1915: * in the last descriptor in the chain. This way, if
1916: * we're chaining several packets at once, we'll only
1917: * get an interrupt once for the whole chain rather than
1918: * once for each packet.
1919: */
1920: cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR);
1921:
1922: /* Start transmission */
1923: sc->xl_cdata.xl_tx_prod = idx;
1924: start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
1925:
1926: /*
1927: * Set a timeout in case the chip goes out to lunch.
1928: */
1929: ifp->if_timer = 5;
1930: }
1931:
1932: void
1933: xl_init(void *xsc)
1934: {
1935: struct xl_softc *sc = xsc;
1936: struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1937: int s, i;
1938: u_int16_t rxfilt = 0;
1939: struct mii_data *mii = NULL;
1940:
1941: s = splnet();
1942:
1943: /*
1944: * Cancel pending I/O and free all RX/TX buffers.
1945: */
1946: xl_stop(sc);
1947:
1948: if (sc->xl_hasmii)
1949: mii = &sc->sc_mii;
1950:
1951: if (mii == NULL) {
1952: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1953: xl_wait(sc);
1954: }
1955: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1956: xl_wait(sc);
1957: DELAY(10000);
1958:
1959: /* Init our MAC address */
1960: XL_SEL_WIN(2);
1961: for (i = 0; i < ETHER_ADDR_LEN; i++) {
1962: CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
1963: sc->sc_arpcom.ac_enaddr[i]);
1964: }
1965:
1966: /* Clear the station mask. */
1967: for (i = 0; i < 3; i++)
1968: CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
1969: #ifdef notdef
1970: /* Reset TX and RX. */
1971: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
1972: xl_wait(sc);
1973: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
1974: xl_wait(sc);
1975: #endif
1976: /* Init circular RX list. */
1977: if (xl_list_rx_init(sc) == ENOBUFS) {
1978: printf("%s: initialization failed: no "
1979: "memory for rx buffers\n", sc->sc_dev.dv_xname);
1980: xl_stop(sc);
1981: splx(s);
1982: return;
1983: }
1984:
1985: /* Init TX descriptors. */
1986: if (sc->xl_type == XL_TYPE_905B)
1987: xl_list_tx_init_90xB(sc);
1988: else
1989: xl_list_tx_init(sc);
1990:
1991: /*
1992: * Set the TX freethresh value.
1993: * Note that this has no effect on 3c905B "cyclone"
1994: * cards but is required for 3c900/3c905 "boomerang"
1995: * cards in order to enable the download engine.
1996: */
1997: CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
1998:
1999: /* Set the TX start threshold for best performance. */
2000: sc->xl_tx_thresh = XL_MIN_FRAMELEN;
2001: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
2002:
2003: /*
2004: * If this is a 3c905B, also set the tx reclaim threshold.
2005: * This helps cut down on the number of tx reclaim errors
2006: * that could happen on a busy network. The chip multiplies
2007: * the register value by 16 to obtain the actual threshold
2008: * in bytes, so we divide by 16 when setting the value here.
2009: * The existing threshold value can be examined by reading
2010: * the register at offset 9 in window 5.
2011: */
2012: if (sc->xl_type == XL_TYPE_905B) {
2013: CSR_WRITE_2(sc, XL_COMMAND,
2014: XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2015: }
2016:
2017: /* Set RX filter bits. */
2018: XL_SEL_WIN(5);
2019: rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
2020:
2021: /* Set the individual bit to receive frames for this host only. */
2022: rxfilt |= XL_RXFILTER_INDIVIDUAL;
2023:
2024: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2025:
2026: /* Set promiscuous mode. */
2027: xl_setpromisc(sc);
2028:
2029: rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
2030:
2031: /*
2032: * Set capture broadcast bit to capture broadcast frames.
2033: */
2034: if (ifp->if_flags & IFF_BROADCAST)
2035: rxfilt |= XL_RXFILTER_BROADCAST;
2036: else
2037: rxfilt &= ~XL_RXFILTER_BROADCAST;
2038:
2039: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2040:
2041: /*
2042: * Program the multicast filter, if necessary.
2043: */
2044: if (sc->xl_type == XL_TYPE_905B)
2045: xl_setmulti_hash(sc);
2046: else
2047: xl_setmulti(sc);
2048:
2049: /*
2050: * Load the address of the RX list. We have to
2051: * stall the upload engine before we can manipulate
2052: * the uplist pointer register, then unstall it when
2053: * we're finished. We also have to wait for the
2054: * stall command to complete before proceeding.
2055: * Note that we have to do this after any RX resets
2056: * have completed since the uplist register is cleared
2057: * by a reset.
2058: */
2059: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2060: xl_wait(sc);
2061: CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->sc_listmap->dm_segs[0].ds_addr +
2062: offsetof(struct xl_list_data, xl_rx_list[0]));
2063: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2064: xl_wait(sc);
2065:
2066: if (sc->xl_type == XL_TYPE_905B) {
2067: /* Set polling interval */
2068: CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2069: /* Load the address of the TX list */
2070: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2071: xl_wait(sc);
2072: CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2073: sc->sc_listmap->dm_segs[0].ds_addr +
2074: offsetof(struct xl_list_data, xl_tx_list[0]));
2075: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2076: xl_wait(sc);
2077: }
2078:
2079: /*
2080: * If the coax transceiver is on, make sure to enable
2081: * the DC-DC converter.
2082: */
2083: XL_SEL_WIN(3);
2084: if (sc->xl_xcvr == XL_XCVR_COAX)
2085: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
2086: else
2087: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2088:
2089: /*
2090: * increase packet size to allow reception of 802.1q or ISL packets.
2091: * For the 3c90x chip, set the 'allow large packets' bit in the MAC
2092: * control register. For 3c90xB/C chips, use the RX packet size
2093: * register.
2094: */
2095:
2096: if (sc->xl_type == XL_TYPE_905B)
2097: CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
2098: else {
2099: u_int8_t macctl;
2100: macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
2101: macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
2102: CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
2103: }
2104:
2105: /* Clear out the stats counters. */
2106: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2107: sc->xl_stats_no_timeout = 1;
2108: xl_stats_update(sc);
2109: sc->xl_stats_no_timeout = 0;
2110: XL_SEL_WIN(4);
2111: CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2112: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2113:
2114: /*
2115: * Enable interrupts.
2116: */
2117: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF);
2118: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS);
2119: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS);
2120:
2121: if (sc->intr_ack)
2122: (*sc->intr_ack)(sc);
2123:
2124: /* Set the RX early threshold */
2125: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2126: CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2127:
2128: /* Enable receiver and transmitter. */
2129: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2130: xl_wait(sc);
2131: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2132: xl_wait(sc);
2133:
2134: /* Restore state of BMCR */
2135: if (mii != NULL)
2136: mii_mediachg(mii);
2137:
2138: /* Select window 7 for normal operations. */
2139: XL_SEL_WIN(7);
2140:
2141: ifp->if_flags |= IFF_RUNNING;
2142: ifp->if_flags &= ~IFF_OACTIVE;
2143:
2144: splx(s);
2145:
2146: timeout_add(&sc->xl_stsup_tmo, hz);
2147: }
2148:
2149: /*
2150: * Set media options.
2151: */
2152: int
2153: xl_ifmedia_upd(struct ifnet *ifp)
2154: {
2155: struct xl_softc *sc;
2156: struct ifmedia *ifm = NULL;
2157: struct mii_data *mii = NULL;
2158:
2159: sc = ifp->if_softc;
2160:
2161: if (sc->xl_hasmii)
2162: mii = &sc->sc_mii;
2163: if (mii == NULL)
2164: ifm = &sc->ifmedia;
2165: else
2166: ifm = &mii->mii_media;
2167:
2168: switch(IFM_SUBTYPE(ifm->ifm_media)) {
2169: case IFM_100_FX:
2170: case IFM_10_FL:
2171: case IFM_10_2:
2172: case IFM_10_5:
2173: xl_setmode(sc, ifm->ifm_media);
2174: return (0);
2175: break;
2176: default:
2177: break;
2178: }
2179:
2180: if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2181: || sc->xl_media & XL_MEDIAOPT_BT4) {
2182: xl_init(sc);
2183: } else {
2184: xl_setmode(sc, ifm->ifm_media);
2185: }
2186:
2187: return (0);
2188: }
2189:
2190: /*
2191: * Report current media status.
2192: */
2193: void
2194: xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2195: {
2196: struct xl_softc *sc;
2197: u_int32_t icfg;
2198: u_int16_t status = 0;
2199: struct mii_data *mii = NULL;
2200:
2201: sc = ifp->if_softc;
2202: if (sc->xl_hasmii != 0)
2203: mii = &sc->sc_mii;
2204:
2205: XL_SEL_WIN(4);
2206: status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2207:
2208: XL_SEL_WIN(3);
2209: icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
2210: icfg >>= XL_ICFG_CONNECTOR_BITS;
2211:
2212: ifmr->ifm_active = IFM_ETHER;
2213: ifmr->ifm_status = IFM_AVALID;
2214:
2215: if ((status & XL_MEDIASTAT_CARRIER) == 0)
2216: ifmr->ifm_status |= IFM_ACTIVE;
2217:
2218: switch(icfg) {
2219: case XL_XCVR_10BT:
2220: ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2221: if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2222: ifmr->ifm_active |= IFM_FDX;
2223: else
2224: ifmr->ifm_active |= IFM_HDX;
2225: break;
2226: case XL_XCVR_AUI:
2227: if (sc->xl_type == XL_TYPE_905B &&
2228: sc->xl_media == XL_MEDIAOPT_10FL) {
2229: ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
2230: if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
2231: ifmr->ifm_active |= IFM_FDX;
2232: else
2233: ifmr->ifm_active |= IFM_FDX;
2234: } else
2235: ifmr->ifm_active = IFM_ETHER|IFM_10_5;
2236: break;
2237: case XL_XCVR_COAX:
2238: ifmr->ifm_active = IFM_ETHER|IFM_10_2;
2239: break;
2240: /*
2241: * XXX MII and BTX/AUTO should be separate cases.
2242: */
2243:
2244: case XL_XCVR_100BTX:
2245: case XL_XCVR_AUTO:
2246: case XL_XCVR_MII:
2247: if (mii != NULL) {
2248: mii_pollstat(mii);
2249: ifmr->ifm_active = mii->mii_media_active;
2250: ifmr->ifm_status = mii->mii_media_status;
2251: }
2252: break;
2253: case XL_XCVR_100BFX:
2254: ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
2255: break;
2256: default:
2257: printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname, icfg);
2258: break;
2259: }
2260: }
2261:
2262: int
2263: xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2264: {
2265: struct xl_softc *sc = ifp->if_softc;
2266: struct ifreq *ifr = (struct ifreq *)data;
2267: struct ifaddr *ifa = (struct ifaddr *)data;
2268: int s, error = 0;
2269: struct mii_data *mii = NULL;
2270:
2271: s = splnet();
2272:
2273: if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) {
2274: splx(s);
2275: return error;
2276: }
2277:
2278: switch(command) {
2279: case SIOCSIFADDR:
2280: ifp->if_flags |= IFF_UP;
2281: if (!(ifp->if_flags & IFF_RUNNING))
2282: xl_init(sc);
2283: #ifdef INET
2284: if (ifa->ifa_addr->sa_family == AF_INET)
2285: arp_ifinit(&sc->sc_arpcom, ifa);
2286: #endif /* INET */
2287: break;
2288:
2289: case SIOCSIFMTU:
2290: if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN)
2291: error = EINVAL;
2292: else if (ifp->if_mtu != ifr->ifr_mtu)
2293: ifp->if_mtu = ifr->ifr_mtu;
2294: break;
2295:
2296: case SIOCSIFFLAGS:
2297: XL_SEL_WIN(5);
2298: if (ifp->if_flags & IFF_UP) {
2299: if (ifp->if_flags & IFF_RUNNING &&
2300: (ifp->if_flags ^ sc->xl_if_flags) &
2301: IFF_PROMISC) {
2302: xl_setpromisc(sc);
2303: XL_SEL_WIN(7);
2304: } else {
2305: if (!(ifp->if_flags & IFF_RUNNING))
2306: xl_init(sc);
2307: }
2308: } else {
2309: if (ifp->if_flags & IFF_RUNNING)
2310: xl_stop(sc);
2311: }
2312: sc->xl_if_flags = ifp->if_flags;
2313: break;
2314: case SIOCADDMULTI:
2315: case SIOCDELMULTI:
2316: error = (command == SIOCADDMULTI) ?
2317: ether_addmulti(ifr, &sc->sc_arpcom) :
2318: ether_delmulti(ifr, &sc->sc_arpcom);
2319:
2320: if (error == ENETRESET) {
2321: /*
2322: * Multicast list has changed; set the hardware
2323: * filter accordingly.
2324: */
2325: if (ifp->if_flags & IFF_RUNNING) {
2326: if (sc->xl_type == XL_TYPE_905B)
2327: xl_setmulti_hash(sc);
2328: else
2329: xl_setmulti(sc);
2330: }
2331: error = 0;
2332: }
2333: break;
2334: case SIOCGIFMEDIA:
2335: case SIOCSIFMEDIA:
2336: if (sc->xl_hasmii != 0)
2337: mii = &sc->sc_mii;
2338: if (mii == NULL)
2339: error = ifmedia_ioctl(ifp, ifr,
2340: &sc->ifmedia, command);
2341: else
2342: error = ifmedia_ioctl(ifp, ifr,
2343: &mii->mii_media, command);
2344: break;
2345: default:
2346: error = EINVAL;
2347: break;
2348: }
2349:
2350: splx(s);
2351:
2352: return (error);
2353: }
2354:
2355: void
2356: xl_watchdog(struct ifnet *ifp)
2357: {
2358: struct xl_softc *sc;
2359: u_int16_t status = 0;
2360:
2361: sc = ifp->if_softc;
2362:
2363: ifp->if_oerrors++;
2364: XL_SEL_WIN(4);
2365: status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
2366: printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2367:
2368: if (status & XL_MEDIASTAT_CARRIER)
2369: printf("%s: no carrier - transceiver cable problem?\n",
2370: sc->sc_dev.dv_xname);
2371: xl_txeoc(sc);
2372: xl_txeof(sc);
2373: xl_rxeof(sc);
2374: xl_reset(sc);
2375: xl_init(sc);
2376:
2377: if (!IFQ_IS_EMPTY(&ifp->if_snd))
2378: (*ifp->if_start)(ifp);
2379: }
2380:
2381: void
2382: xl_freetxrx(struct xl_softc *sc)
2383: {
2384: bus_dmamap_t map;
2385: int i;
2386:
2387: /*
2388: * Free data in the RX lists.
2389: */
2390: for (i = 0; i < XL_RX_LIST_CNT; i++) {
2391: if (sc->xl_cdata.xl_rx_chain[i].map->dm_nsegs != 0) {
2392: map = sc->xl_cdata.xl_rx_chain[i].map;
2393:
2394: bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2395: BUS_DMASYNC_POSTREAD);
2396: bus_dmamap_unload(sc->sc_dmat, map);
2397: }
2398: if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
2399: m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
2400: sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
2401: }
2402: }
2403: bzero((char *)&sc->xl_ldata->xl_rx_list,
2404: sizeof(sc->xl_ldata->xl_rx_list));
2405: /*
2406: * Free the TX list buffers.
2407: */
2408: for (i = 0; i < XL_TX_LIST_CNT; i++) {
2409: if (sc->xl_cdata.xl_tx_chain[i].map->dm_nsegs != 0) {
2410: map = sc->xl_cdata.xl_tx_chain[i].map;
2411:
2412: bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2413: BUS_DMASYNC_POSTWRITE);
2414: bus_dmamap_unload(sc->sc_dmat, map);
2415: }
2416: if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
2417: m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
2418: sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
2419: }
2420: }
2421: bzero((char *)&sc->xl_ldata->xl_tx_list,
2422: sizeof(sc->xl_ldata->xl_tx_list));
2423: }
2424:
2425: /*
2426: * Stop the adapter and free any mbufs allocated to the
2427: * RX and TX lists.
2428: */
2429: void
2430: xl_stop(struct xl_softc *sc)
2431: {
2432: struct ifnet *ifp;
2433:
2434: ifp = &sc->sc_arpcom.ac_if;
2435: ifp->if_timer = 0;
2436:
2437: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
2438: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2439: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
2440: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
2441: xl_wait(sc);
2442: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
2443: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2444: DELAY(800);
2445:
2446: #ifdef foo
2447: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2448: xl_wait(sc);
2449: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2450: xl_wait(sc);
2451: #endif
2452:
2453: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
2454: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
2455: CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
2456:
2457: if (sc->intr_ack)
2458: (*sc->intr_ack)(sc);
2459:
2460: /* Stop the stats updater. */
2461: timeout_del(&sc->xl_stsup_tmo);
2462:
2463: ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2464:
2465: xl_freetxrx(sc);
2466: }
2467:
2468: void
2469: xl_attach(struct xl_softc *sc)
2470: {
2471: u_int8_t enaddr[ETHER_ADDR_LEN];
2472: u_int16_t xcvr[2];
2473: struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2474: int i, media = IFM_ETHER|IFM_100_TX|IFM_FDX;
2475: struct ifmedia *ifm;
2476:
2477: i = splnet();
2478: xl_reset(sc);
2479: splx(i);
2480:
2481: /*
2482: * Get station address from the EEPROM.
2483: */
2484: if (xl_read_eeprom(sc, (caddr_t)&enaddr, XL_EE_OEM_ADR0, 3, 1)) {
2485: printf("\n%s: failed to read station address\n",
2486: sc->sc_dev.dv_xname);
2487: return;
2488: }
2489: bcopy(enaddr, (char *)&sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
2490:
2491: if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct xl_list_data),
2492: PAGE_SIZE, 0, sc->sc_listseg, 1, &sc->sc_listnseg,
2493: BUS_DMA_NOWAIT) != 0) {
2494: printf(": can't alloc list mem\n");
2495: return;
2496: }
2497: if (bus_dmamem_map(sc->sc_dmat, sc->sc_listseg, sc->sc_listnseg,
2498: sizeof(struct xl_list_data), &sc->sc_listkva,
2499: BUS_DMA_NOWAIT) != 0) {
2500: printf(": can't map list mem\n");
2501: return;
2502: }
2503: if (bus_dmamap_create(sc->sc_dmat, sizeof(struct xl_list_data), 1,
2504: sizeof(struct xl_list_data), 0, BUS_DMA_NOWAIT,
2505: &sc->sc_listmap) != 0) {
2506: printf(": can't alloc list map\n");
2507: return;
2508: }
2509: if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, sc->sc_listkva,
2510: sizeof(struct xl_list_data), NULL, BUS_DMA_NOWAIT) != 0) {
2511: printf(": can't load list map\n");
2512: return;
2513: }
2514: sc->xl_ldata = (struct xl_list_data *)sc->sc_listkva;
2515: bzero(sc->xl_ldata, sizeof(struct xl_list_data));
2516:
2517: for (i = 0; i < XL_RX_LIST_CNT; i++) {
2518: if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
2519: 0, BUS_DMA_NOWAIT,
2520: &sc->xl_cdata.xl_rx_chain[i].map) != 0) {
2521: printf(": can't create rx map\n");
2522: return;
2523: }
2524: }
2525: if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
2526: BUS_DMA_NOWAIT, &sc->sc_rx_sparemap) != 0) {
2527: printf(": can't create rx spare map\n");
2528: return;
2529: }
2530:
2531: for (i = 0; i < XL_TX_LIST_CNT; i++) {
2532: if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
2533: XL_TX_LIST_CNT - 3, MCLBYTES, 0, BUS_DMA_NOWAIT,
2534: &sc->xl_cdata.xl_tx_chain[i].map) != 0) {
2535: printf(": can't create tx map\n");
2536: return;
2537: }
2538: }
2539: if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, XL_TX_LIST_CNT - 3,
2540: MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_tx_sparemap) != 0) {
2541: printf(": can't create tx spare map\n");
2542: return;
2543: }
2544:
2545: printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
2546:
2547: if (sc->xl_flags & (XL_FLAG_INVERT_LED_PWR|XL_FLAG_INVERT_MII_PWR)) {
2548: u_int16_t n;
2549:
2550: XL_SEL_WIN(2);
2551: n = CSR_READ_2(sc, 12);
2552:
2553: if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR)
2554: n |= 0x0010;
2555:
2556: if (sc->xl_flags & XL_FLAG_INVERT_MII_PWR)
2557: n |= 0x4000;
2558:
2559: CSR_WRITE_2(sc, 12, n);
2560: }
2561:
2562: /*
2563: * Figure out the card type. 3c905B adapters have the
2564: * 'supportsNoTxLength' bit set in the capabilities
2565: * word in the EEPROM.
2566: * Note: my 3c575C cardbus card lies. It returns a value
2567: * of 0x1578 for its capabilities word, which is somewhat
2568: * nonsensical. Another way to distinguish a 3c90x chip
2569: * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
2570: * bit. This will only be set for 3c90x boomerage chips.
2571: */
2572: xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
2573: if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
2574: !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
2575: sc->xl_type = XL_TYPE_905B;
2576: else
2577: sc->xl_type = XL_TYPE_90X;
2578:
2579: timeout_set(&sc->xl_stsup_tmo, xl_stats_update, sc);
2580:
2581: ifp->if_softc = sc;
2582: ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2583: ifp->if_ioctl = xl_ioctl;
2584: if (sc->xl_type == XL_TYPE_905B)
2585: ifp->if_start = xl_start_90xB;
2586: else
2587: ifp->if_start = xl_start;
2588: ifp->if_watchdog = xl_watchdog;
2589: ifp->if_baudrate = 10000000;
2590: IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1);
2591: IFQ_SET_READY(&ifp->if_snd);
2592: bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
2593:
2594: ifp->if_capabilities = IFCAP_VLAN_MTU;
2595:
2596: #ifndef XL905B_TXCSUM_BROKEN
2597: ifp->if_capabilities |= IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|
2598: IFCAP_CSUM_UDPv4;
2599: #endif
2600:
2601: XL_SEL_WIN(3);
2602: sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
2603:
2604: xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
2605: sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
2606: sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
2607: sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
2608:
2609: xl_mediacheck(sc);
2610:
2611: if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2612: || sc->xl_media & XL_MEDIAOPT_BT4) {
2613: ifmedia_init(&sc->sc_mii.mii_media, 0,
2614: xl_ifmedia_upd, xl_ifmedia_sts);
2615: sc->xl_hasmii = 1;
2616: sc->sc_mii.mii_ifp = ifp;
2617: sc->sc_mii.mii_readreg = xl_miibus_readreg;
2618: sc->sc_mii.mii_writereg = xl_miibus_writereg;
2619: sc->sc_mii.mii_statchg = xl_miibus_statchg;
2620: xl_setcfg(sc);
2621: mii_attach((struct device *)sc, &sc->sc_mii, 0xffffffff,
2622: MII_PHY_ANY, MII_OFFSET_ANY, 0);
2623:
2624: if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2625: ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE,
2626: 0, NULL);
2627: ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2628: }
2629: else {
2630: ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2631: }
2632: ifm = &sc->sc_mii.mii_media;
2633: }
2634: else {
2635: ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
2636: sc->xl_hasmii = 0;
2637: ifm = &sc->ifmedia;
2638: }
2639:
2640: /*
2641: * Sanity check. If the user has selected "auto" and this isn't
2642: * a 10/100 card of some kind, we need to force the transceiver
2643: * type to something sane.
2644: */
2645: if (sc->xl_xcvr == XL_XCVR_AUTO) {
2646: xl_choose_xcvr(sc, 0);
2647: i = splnet();
2648: xl_reset(sc);
2649: splx(i);
2650: }
2651:
2652: if (sc->xl_media & XL_MEDIAOPT_BT) {
2653: ifmedia_add(ifm, IFM_ETHER|IFM_10_T, 0, NULL);
2654: ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
2655: if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2656: ifmedia_add(ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
2657: }
2658:
2659: if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
2660: /*
2661: * Check for a 10baseFL board in disguise.
2662: */
2663: if (sc->xl_type == XL_TYPE_905B &&
2664: sc->xl_media == XL_MEDIAOPT_10FL) {
2665: ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL);
2666: ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX,
2667: 0, NULL);
2668: if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
2669: ifmedia_add(ifm,
2670: IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
2671: } else {
2672: ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL);
2673: }
2674: }
2675:
2676: if (sc->xl_media & XL_MEDIAOPT_BNC) {
2677: ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL);
2678: }
2679:
2680: if (sc->xl_media & XL_MEDIAOPT_BFX) {
2681: ifp->if_baudrate = 100000000;
2682: ifmedia_add(ifm, IFM_ETHER|IFM_100_FX, 0, NULL);
2683: }
2684:
2685: /* Choose a default media. */
2686: switch(sc->xl_xcvr) {
2687: case XL_XCVR_10BT:
2688: media = IFM_ETHER|IFM_10_T;
2689: xl_setmode(sc, media);
2690: break;
2691: case XL_XCVR_AUI:
2692: if (sc->xl_type == XL_TYPE_905B &&
2693: sc->xl_media == XL_MEDIAOPT_10FL) {
2694: media = IFM_ETHER|IFM_10_FL;
2695: xl_setmode(sc, media);
2696: } else {
2697: media = IFM_ETHER|IFM_10_5;
2698: xl_setmode(sc, media);
2699: }
2700: break;
2701: case XL_XCVR_COAX:
2702: media = IFM_ETHER|IFM_10_2;
2703: xl_setmode(sc, media);
2704: break;
2705: case XL_XCVR_AUTO:
2706: case XL_XCVR_100BTX:
2707: case XL_XCVR_MII:
2708: /* Chosen by miibus */
2709: break;
2710: case XL_XCVR_100BFX:
2711: media = IFM_ETHER|IFM_100_FX;
2712: xl_setmode(sc, media);
2713: break;
2714: default:
2715: printf("%s: unknown XCVR type: %d\n", sc->sc_dev.dv_xname,
2716: sc->xl_xcvr);
2717: /*
2718: * This will probably be wrong, but it prevents
2719: * the ifmedia code from panicking.
2720: */
2721: media = IFM_ETHER | IFM_10_T;
2722: break;
2723: }
2724:
2725: if (sc->xl_hasmii == 0)
2726: ifmedia_set(&sc->ifmedia, media);
2727:
2728: if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
2729: XL_SEL_WIN(0);
2730: CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
2731: }
2732:
2733: /*
2734: * Call MI attach routines.
2735: */
2736: if_attach(ifp);
2737: ether_ifattach(ifp);
2738:
2739: sc->sc_sdhook = shutdownhook_establish(xl_shutdown, sc);
2740: sc->sc_pwrhook = powerhook_establish(xl_power, sc);
2741: }
2742:
2743: void
2744: xl_shutdown(void *v)
2745: {
2746: struct xl_softc *sc = (struct xl_softc *)v;
2747:
2748: xl_reset(sc);
2749: xl_stop(sc);
2750: }
2751:
2752: struct cfdriver xl_cd = {
2753: 0, "xl", DV_IFNET
2754: };
CVSweb