Annotation of sys/dev/pci/if_tl.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: if_tl.c,v 1.43 2007/05/08 21:19:13 deraadt Exp $ */
2:
3: /*
4: * Copyright (c) 1997, 1998
5: * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: * 3. All advertising materials mentioning features or use of this software
16: * must display the following acknowledgement:
17: * This product includes software developed by Bill Paul.
18: * 4. Neither the name of the author nor the names of any co-contributors
19: * may be used to endorse or promote products derived from this software
20: * without specific prior written permission.
21: *
22: * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25: * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32: * THE POSSIBILITY OF SUCH DAMAGE.
33: *
34: * $FreeBSD: src/sys/pci/if_tl.c,v 1.64 2001/02/06 10:11:48 phk Exp $
35: */
36:
37: /*
38: * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x.
39: * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller,
40: * the National Semiconductor DP83840A physical interface and the
41: * Microchip Technology 24Cxx series serial EEPROM.
42: *
43: * Written using the following four documents:
44: *
45: * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com)
46: * National Semiconductor DP83840A data sheet (www.national.com)
47: * Microchip Technology 24C02C data sheet (www.microchip.com)
48: * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com)
49: *
50: * Written by Bill Paul <wpaul@ctr.columbia.edu>
51: * Electrical Engineering Department
52: * Columbia University, New York City
53: */
54:
55: /*
56: * Some notes about the ThunderLAN:
57: *
58: * The ThunderLAN controller is a single chip containing PCI controller
59: * logic, approximately 3K of on-board SRAM, a LAN controller, and media
60: * independent interface (MII) bus. The MII allows the ThunderLAN chip to
61: * control up to 32 different physical interfaces (PHYs). The ThunderLAN
62: * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller
63: * to act as a complete ethernet interface.
64: *
65: * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards
66: * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec
67: * in full or half duplex. Some of the Compaq Deskpro machines use a
68: * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters
69: * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in
70: * concert with the ThunderLAN's internal PHY to provide full 10/100
71: * support. This is cheaper than using a standalone external PHY for both
72: * 10/100 modes and letting the ThunderLAN's internal PHY go to waste.
73: * A serial EEPROM is also attached to the ThunderLAN chip to provide
74: * power-up default register settings and for storing the adapter's
75: * station address. Although not supported by this driver, the ThunderLAN
76: * chip can also be connected to token ring PHYs.
77: *
78: * The ThunderLAN has a set of registers which can be used to issue
79: * commands, acknowledge interrupts, and to manipulate other internal
80: * registers on its DIO bus. The primary registers can be accessed
81: * using either programmed I/O (inb/outb) or via PCI memory mapping,
82: * depending on how the card is configured during the PCI probing
83: * phase. It is even possible to have both PIO and memory mapped
84: * access turned on at the same time.
85: *
86: * Frame reception and transmission with the ThunderLAN chip is done
87: * using frame 'lists.' A list structure looks more or less like this:
88: *
89: * struct tl_frag {
90: * u_int32_t fragment_address;
91: * u_int32_t fragment_size;
92: * };
93: * struct tl_list {
94: * u_int32_t forward_pointer;
95: * u_int16_t cstat;
96: * u_int16_t frame_size;
97: * struct tl_frag fragments[10];
98: * };
99: *
100: * The forward pointer in the list header can be either a 0 or the address
101: * of another list, which allows several lists to be linked together. Each
102: * list contains up to 10 fragment descriptors. This means the chip allows
103: * ethernet frames to be broken up into up to 10 chunks for transfer to
104: * and from the SRAM. Note that the forward pointer and fragment buffer
105: * addresses are physical memory addresses, not virtual. Note also that
106: * a single ethernet frame can not span lists: if the host wants to
107: * transmit a frame and the frame data is split up over more than 10
108: * buffers, the frame has to collapsed before it can be transmitted.
109: *
110: * To receive frames, the driver sets up a number of lists and populates
111: * the fragment descriptors, then it sends an RX GO command to the chip.
112: * When a frame is received, the chip will DMA it into the memory regions
113: * specified by the fragment descriptors and then trigger an RX 'end of
114: * frame interrupt' when done. The driver may choose to use only one
115: * fragment per list; this may result is slighltly less efficient use
116: * of memory in exchange for improving performance.
117: *
118: * To transmit frames, the driver again sets up lists and fragment
119: * descriptors, only this time the buffers contain frame data that
120: * is to be DMA'ed into the chip instead of out of it. Once the chip
121: * has transferred the data into its on-board SRAM, it will trigger a
122: * TX 'end of frame' interrupt. It will also generate an 'end of channel'
123: * interrupt when it reaches the end of the list.
124: */
125:
126: /*
127: * Some notes about this driver:
128: *
129: * The ThunderLAN chip provides a couple of different ways to organize
130: * reception, transmission and interrupt handling. The simplest approach
131: * is to use one list each for transmission and reception. In this mode,
132: * the ThunderLAN will generate two interrupts for every received frame
133: * (one RX EOF and one RX EOC) and two for each transmitted frame (one
134: * TX EOF and one TX EOC). This may make the driver simpler but it hurts
135: * performance to have to handle so many interrupts.
136: *
137: * Initially I wanted to create a circular list of receive buffers so
138: * that the ThunderLAN chip would think there was an infinitely long
139: * receive channel and never deliver an RXEOC interrupt. However this
140: * doesn't work correctly under heavy load: while the manual says the
141: * chip will trigger an RXEOF interrupt each time a frame is copied into
142: * memory, you can't count on the chip waiting around for you to acknowledge
143: * the interrupt before it starts trying to DMA the next frame. The result
144: * is that the chip might traverse the entire circular list and then wrap
145: * around before you have a chance to do anything about it. Consequently,
146: * the receive list is terminated (with a 0 in the forward pointer in the
147: * last element). Each time an RXEOF interrupt arrives, the used list
148: * is shifted to the end of the list. This gives the appearance of an
149: * infinitely large RX chain so long as the driver doesn't fall behind
150: * the chip and allow all of the lists to be filled up.
151: *
152: * If all the lists are filled, the adapter will deliver an RX 'end of
153: * channel' interrupt when it hits the 0 forward pointer at the end of
154: * the chain. The RXEOC handler then cleans out the RX chain and resets
155: * the list head pointer in the ch_parm register and restarts the receiver.
156: *
157: * For frame transmission, it is possible to program the ThunderLAN's
158: * transmit interrupt threshold so that the chip can acknowledge multiple
159: * lists with only a single TX EOF interrupt. This allows the driver to
160: * queue several frames in one shot, and only have to handle a total
161: * two interrupts (one TX EOF and one TX EOC) no matter how many frames
162: * are transmitted. Frame transmission is done directly out of the
163: * mbufs passed to the tl_start() routine via the interface send queue.
164: * The driver simply sets up the fragment descriptors in the transmit
165: * lists to point to the mbuf data regions and sends a TX GO command.
166: *
167: * Note that since the RX and TX lists themselves are always used
168: * only by the driver, the are malloc()ed once at driver initialization
169: * time and never free()ed.
170: *
171: * Also, in order to remain as platform independent as possible, this
172: * driver uses memory mapped register access to manipulate the card
173: * as opposed to programmed I/O. This avoids the use of the inb/outb
174: * (and related) instructions which are specific to the i386 platform.
175: *
176: * Using these techniques, this driver achieves very high performance
177: * by minimizing the amount of interrupts generated during large
178: * transfers and by completely avoiding buffer copies. Frame transfer
179: * to and from the ThunderLAN chip is performed entirely by the chip
180: * itself thereby reducing the load on the host CPU.
181: */
182:
183: #include "bpfilter.h"
184:
185: #include <sys/param.h>
186: #include <sys/systm.h>
187: #include <sys/sockio.h>
188: #include <sys/mbuf.h>
189: #include <sys/malloc.h>
190: #include <sys/kernel.h>
191: #include <sys/socket.h>
192: #include <sys/device.h>
193: #include <sys/timeout.h>
194:
195: #include <net/if.h>
196:
197: #ifdef INET
198: #include <netinet/in.h>
199: #include <netinet/in_systm.h>
200: #include <netinet/in_var.h>
201: #include <netinet/ip.h>
202: #include <netinet/if_ether.h>
203: #endif
204:
205: #include <net/if_dl.h>
206: #include <net/if_media.h>
207:
208: #if NBPFILTER > 0
209: #include <net/bpf.h>
210: #endif
211:
212: #include <uvm/uvm_extern.h> /* for vtophys */
213: #define VTOPHYS(v) vtophys((vaddr_t)(v))
214:
215: #include <dev/mii/mii.h>
216: #include <dev/mii/miivar.h>
217:
218: #include <dev/pci/pcireg.h>
219: #include <dev/pci/pcivar.h>
220: #include <dev/pci/pcidevs.h>
221:
222: /*
223: * Default to using PIO register access mode to pacify certain
224: * laptop docking stations with built-in ThunderLAN chips that
225: * don't seem to handle memory mapped mode properly.
226: */
227: #define TL_USEIOSPACE
228:
229: #include <dev/pci/if_tlreg.h>
230: #include <dev/mii/tlphyvar.h>
231:
232: const struct tl_products tl_prods[] = {
233: { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_N100TX, TLPHY_MEDIA_NO_10_T },
234: { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_N10T, TLPHY_MEDIA_10_5 },
235: { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_IntNF3P, TLPHY_MEDIA_10_2 },
236: { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_IntPL100TX, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T },
237: { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_DPNet100TX, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T },
238: { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_DP4000, TLPHY_MEDIA_10_5|TLPHY_MEDIA_NO_10_T },
239: { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_NF3P_BNC, TLPHY_MEDIA_10_2 },
240: { PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_NF3P, TLPHY_MEDIA_10_5 },
241: { PCI_VENDOR_TI, PCI_PRODUCT_TI_TLAN, 0 },
242: { 0, 0, 0 }
243: };
244:
245: int tl_probe(struct device *, void *, void *);
246: void tl_attach(struct device *, struct device *, void *);
247: void tl_wait_up(void *);
248: int tl_intvec_rxeoc(void *, u_int32_t);
249: int tl_intvec_txeoc(void *, u_int32_t);
250: int tl_intvec_txeof(void *, u_int32_t);
251: int tl_intvec_rxeof(void *, u_int32_t);
252: int tl_intvec_adchk(void *, u_int32_t);
253: int tl_intvec_netsts(void *, u_int32_t);
254:
255: int tl_newbuf(struct tl_softc *,
256: struct tl_chain_onefrag *);
257: void tl_stats_update(void *);
258: int tl_encap(struct tl_softc *, struct tl_chain *,
259: struct mbuf *);
260:
261: int tl_intr(void *);
262: void tl_start(struct ifnet *);
263: int tl_ioctl(struct ifnet *, u_long, caddr_t);
264: void tl_init(void *);
265: void tl_stop(struct tl_softc *);
266: void tl_watchdog(struct ifnet *);
267: void tl_shutdown(void *);
268: int tl_ifmedia_upd(struct ifnet *);
269: void tl_ifmedia_sts(struct ifnet *, struct ifmediareq *);
270:
271: u_int8_t tl_eeprom_putbyte(struct tl_softc *, int);
272: u_int8_t tl_eeprom_getbyte(struct tl_softc *,
273: int, u_int8_t *);
274: int tl_read_eeprom(struct tl_softc *, caddr_t, int, int);
275:
276: void tl_mii_sync(struct tl_softc *);
277: void tl_mii_send(struct tl_softc *, u_int32_t, int);
278: int tl_mii_readreg(struct tl_softc *, struct tl_mii_frame *);
279: int tl_mii_writereg(struct tl_softc *, struct tl_mii_frame *);
280: int tl_miibus_readreg(struct device *, int, int);
281: void tl_miibus_writereg(struct device *, int, int, int);
282: void tl_miibus_statchg(struct device *);
283:
284: void tl_setmode(struct tl_softc *, int);
285: #if 0
286: int tl_calchash(caddr_t);
287: #endif
288: void tl_setmulti(struct tl_softc *);
289: void tl_setfilt(struct tl_softc *, caddr_t, int);
290: void tl_softreset(struct tl_softc *, int);
291: void tl_hardreset(struct device *);
292: int tl_list_rx_init(struct tl_softc *);
293: int tl_list_tx_init(struct tl_softc *);
294:
295: u_int8_t tl_dio_read8(struct tl_softc *, int);
296: u_int16_t tl_dio_read16(struct tl_softc *, int);
297: u_int32_t tl_dio_read32(struct tl_softc *, int);
298: void tl_dio_write8(struct tl_softc *, int, int);
299: void tl_dio_write16(struct tl_softc *, int, int);
300: void tl_dio_write32(struct tl_softc *, int, int);
301: void tl_dio_setbit(struct tl_softc *, int, int);
302: void tl_dio_clrbit(struct tl_softc *, int, int);
303: void tl_dio_setbit16(struct tl_softc *, int, int);
304: void tl_dio_clrbit16(struct tl_softc *, int, int);
305:
306: u_int8_t tl_dio_read8(sc, reg)
307: struct tl_softc *sc;
308: int reg;
309: {
310: CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
311: return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)));
312: }
313:
314: u_int16_t tl_dio_read16(sc, reg)
315: struct tl_softc *sc;
316: int reg;
317: {
318: CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
319: return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)));
320: }
321:
322: u_int32_t tl_dio_read32(sc, reg)
323: struct tl_softc *sc;
324: int reg;
325: {
326: CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
327: return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3)));
328: }
329:
330: void tl_dio_write8(sc, reg, val)
331: struct tl_softc *sc;
332: int reg;
333: int val;
334: {
335: CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
336: CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val);
337: return;
338: }
339:
340: void tl_dio_write16(sc, reg, val)
341: struct tl_softc *sc;
342: int reg;
343: int val;
344: {
345: CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
346: CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val);
347: return;
348: }
349:
350: void tl_dio_write32(sc, reg, val)
351: struct tl_softc *sc;
352: int reg;
353: int val;
354: {
355: CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
356: CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val);
357: return;
358: }
359:
360: void tl_dio_setbit(sc, reg, bit)
361: struct tl_softc *sc;
362: int reg;
363: int bit;
364: {
365: u_int8_t f;
366:
367: CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
368: f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
369: f |= bit;
370: CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
371:
372: return;
373: }
374:
375: void tl_dio_clrbit(sc, reg, bit)
376: struct tl_softc *sc;
377: int reg;
378: int bit;
379: {
380: u_int8_t f;
381:
382: CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
383: f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3));
384: f &= ~bit;
385: CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f);
386:
387: return;
388: }
389:
390: void tl_dio_setbit16(sc, reg, bit)
391: struct tl_softc *sc;
392: int reg;
393: int bit;
394: {
395: u_int16_t f;
396:
397: CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
398: f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
399: f |= bit;
400: CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
401:
402: return;
403: }
404:
405: void tl_dio_clrbit16(sc, reg, bit)
406: struct tl_softc *sc;
407: int reg;
408: int bit;
409: {
410: u_int16_t f;
411:
412: CSR_WRITE_2(sc, TL_DIO_ADDR, reg);
413: f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3));
414: f &= ~bit;
415: CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f);
416:
417: return;
418: }
419:
420: /*
421: * Send an instruction or address to the EEPROM, check for ACK.
422: */
423: u_int8_t tl_eeprom_putbyte(sc, byte)
424: struct tl_softc *sc;
425: int byte;
426: {
427: int i, ack = 0;
428:
429: /*
430: * Make sure we're in TX mode.
431: */
432: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN);
433:
434: /*
435: * Feed in each bit and strobe the clock.
436: */
437: for (i = 0x80; i; i >>= 1) {
438: if (byte & i) {
439: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA);
440: } else {
441: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA);
442: }
443: DELAY(1);
444: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
445: DELAY(1);
446: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
447: }
448:
449: /*
450: * Turn off TX mode.
451: */
452: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
453:
454: /*
455: * Check for ack.
456: */
457: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
458: ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA;
459: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
460:
461: return(ack);
462: }
463:
464: /*
465: * Read a byte of data stored in the EEPROM at address 'addr.'
466: */
467: u_int8_t tl_eeprom_getbyte(sc, addr, dest)
468: struct tl_softc *sc;
469: int addr;
470: u_int8_t *dest;
471: {
472: int i;
473: u_int8_t byte = 0;
474:
475: tl_dio_write8(sc, TL_NETSIO, 0);
476:
477: EEPROM_START;
478:
479: /*
480: * Send write control code to EEPROM.
481: */
482: if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) {
483: printf("%s: failed to send write command, status: %x\n",
484: sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO));
485: return(1);
486: }
487:
488: /*
489: * Send address of byte we want to read.
490: */
491: if (tl_eeprom_putbyte(sc, addr)) {
492: printf("%s: failed to send address, status: %x\n",
493: sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO));
494: return(1);
495: }
496:
497: EEPROM_STOP;
498: EEPROM_START;
499: /*
500: * Send read control code to EEPROM.
501: */
502: if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) {
503: printf("%s: failed to send write command, status: %x\n",
504: sc->sc_dev.dv_xname, tl_dio_read8(sc, TL_NETSIO));
505: return(1);
506: }
507:
508: /*
509: * Start reading bits from EEPROM.
510: */
511: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN);
512: for (i = 0x80; i; i >>= 1) {
513: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK);
514: DELAY(1);
515: if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA)
516: byte |= i;
517: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK);
518: DELAY(1);
519: }
520:
521: EEPROM_STOP;
522:
523: /*
524: * No ACK generated for read, so just return byte.
525: */
526:
527: *dest = byte;
528:
529: return(0);
530: }
531:
532: /*
533: * Read a sequence of bytes from the EEPROM.
534: */
535: int tl_read_eeprom(sc, dest, off, cnt)
536: struct tl_softc *sc;
537: caddr_t dest;
538: int off;
539: int cnt;
540: {
541: int err = 0, i;
542: u_int8_t byte = 0;
543:
544: for (i = 0; i < cnt; i++) {
545: err = tl_eeprom_getbyte(sc, off + i, &byte);
546: if (err)
547: break;
548: *(dest + i) = byte;
549: }
550:
551: return(err ? 1 : 0);
552: }
553:
554: void tl_mii_sync(sc)
555: struct tl_softc *sc;
556: {
557: int i;
558:
559: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
560:
561: for (i = 0; i < 32; i++) {
562: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
563: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
564: }
565:
566: return;
567: }
568:
569: void tl_mii_send(sc, bits, cnt)
570: struct tl_softc *sc;
571: u_int32_t bits;
572: int cnt;
573: {
574: int i;
575:
576: for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
577: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
578: if (bits & i) {
579: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA);
580: } else {
581: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA);
582: }
583: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
584: }
585: }
586:
587: int tl_mii_readreg(sc, frame)
588: struct tl_softc *sc;
589: struct tl_mii_frame *frame;
590:
591: {
592: int i, ack, s;
593: int minten = 0;
594:
595: s = splnet();
596:
597: tl_mii_sync(sc);
598:
599: /*
600: * Set up frame for RX.
601: */
602: frame->mii_stdelim = TL_MII_STARTDELIM;
603: frame->mii_opcode = TL_MII_READOP;
604: frame->mii_turnaround = 0;
605: frame->mii_data = 0;
606:
607: /*
608: * Turn off MII interrupt by forcing MINTEN low.
609: */
610: minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
611: if (minten) {
612: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
613: }
614:
615: /*
616: * Turn on data xmit.
617: */
618: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
619:
620: /*
621: * Send command/address info.
622: */
623: tl_mii_send(sc, frame->mii_stdelim, 2);
624: tl_mii_send(sc, frame->mii_opcode, 2);
625: tl_mii_send(sc, frame->mii_phyaddr, 5);
626: tl_mii_send(sc, frame->mii_regaddr, 5);
627:
628: /*
629: * Turn off xmit.
630: */
631: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
632:
633: /* Idle bit */
634: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
635: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
636:
637: /* Check for ack */
638: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
639: ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA;
640:
641: /* Complete the cycle */
642: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
643:
644: /*
645: * Now try reading data bits. If the ack failed, we still
646: * need to clock through 16 cycles to keep the PHYs in sync.
647: */
648: if (ack) {
649: for(i = 0; i < 16; i++) {
650: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
651: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
652: }
653: goto fail;
654: }
655:
656: for (i = 0x8000; i; i >>= 1) {
657: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
658: if (!ack) {
659: if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA)
660: frame->mii_data |= i;
661: }
662: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
663: }
664:
665: fail:
666:
667: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
668: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
669:
670: /* Reenable interrupts */
671: if (minten) {
672: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
673: }
674:
675: splx(s);
676:
677: if (ack)
678: return(1);
679: return(0);
680: }
681:
682: int tl_mii_writereg(sc, frame)
683: struct tl_softc *sc;
684: struct tl_mii_frame *frame;
685:
686: {
687: int s;
688: int minten;
689:
690: tl_mii_sync(sc);
691:
692: s = splnet();
693: /*
694: * Set up frame for TX.
695: */
696:
697: frame->mii_stdelim = TL_MII_STARTDELIM;
698: frame->mii_opcode = TL_MII_WRITEOP;
699: frame->mii_turnaround = TL_MII_TURNAROUND;
700:
701: /*
702: * Turn off MII interrupt by forcing MINTEN low.
703: */
704: minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN;
705: if (minten) {
706: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN);
707: }
708:
709: /*
710: * Turn on data output.
711: */
712: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN);
713:
714: tl_mii_send(sc, frame->mii_stdelim, 2);
715: tl_mii_send(sc, frame->mii_opcode, 2);
716: tl_mii_send(sc, frame->mii_phyaddr, 5);
717: tl_mii_send(sc, frame->mii_regaddr, 5);
718: tl_mii_send(sc, frame->mii_turnaround, 2);
719: tl_mii_send(sc, frame->mii_data, 16);
720:
721: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK);
722: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK);
723:
724: /*
725: * Turn off xmit.
726: */
727: tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN);
728:
729: /* Reenable interrupts */
730: if (minten)
731: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN);
732:
733: splx(s);
734:
735: return(0);
736: }
737:
738: int tl_miibus_readreg(dev, phy, reg)
739: struct device *dev;
740: int phy, reg;
741: {
742: struct tl_softc *sc = (struct tl_softc *)dev;
743: struct tl_mii_frame frame;
744:
745: bzero((char *)&frame, sizeof(frame));
746:
747: frame.mii_phyaddr = phy;
748: frame.mii_regaddr = reg;
749: tl_mii_readreg(sc, &frame);
750:
751: return(frame.mii_data);
752: }
753:
754: void tl_miibus_writereg(dev, phy, reg, data)
755: struct device *dev;
756: int phy, reg, data;
757: {
758: struct tl_softc *sc = (struct tl_softc *)dev;
759: struct tl_mii_frame frame;
760:
761: bzero((char *)&frame, sizeof(frame));
762:
763: frame.mii_phyaddr = phy;
764: frame.mii_regaddr = reg;
765: frame.mii_data = data;
766:
767: tl_mii_writereg(sc, &frame);
768: }
769:
770: void tl_miibus_statchg(dev)
771: struct device *dev;
772: {
773: struct tl_softc *sc = (struct tl_softc *)dev;
774:
775: if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX) {
776: tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
777: } else {
778: tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
779: }
780: }
781:
782: /*
783: * Set modes for bitrate devices.
784: */
785: void tl_setmode(sc, media)
786: struct tl_softc *sc;
787: int media;
788: {
789: if (IFM_SUBTYPE(media) == IFM_10_5)
790: tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
791: if (IFM_SUBTYPE(media) == IFM_10_T) {
792: tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1);
793: if ((media & IFM_GMASK) == IFM_FDX) {
794: tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
795: tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
796: } else {
797: tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3);
798: tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX);
799: }
800: }
801: }
802:
803: #if 0
804: /*
805: * Calculate the hash of a MAC address for programming the multicast hash
806: * table. This hash is simply the address split into 6-bit chunks
807: * XOR'd, e.g.
808: * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555
809: * bit: 765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210
810: * Bytes 0-2 and 3-5 are symmetrical, so are folded together. Then
811: * the folded 24-bit value is split into 6-bit portions and XOR'd.
812: */
813: int tl_calchash(addr)
814: caddr_t addr;
815: {
816: int t;
817:
818: t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 |
819: (addr[2] ^ addr[5]);
820: return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f;
821: }
822: #endif
823:
824: /*
825: * The ThunderLAN has a perfect MAC address filter in addition to
826: * the multicast hash filter. The perfect filter can be programmed
827: * with up to four MAC addresses. The first one is always used to
828: * hold the station address, which leaves us free to use the other
829: * three for multicast addresses.
830: */
831: void tl_setfilt(sc, addr, slot)
832: struct tl_softc *sc;
833: caddr_t addr;
834: int slot;
835: {
836: int i;
837: u_int16_t regaddr;
838:
839: regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN);
840:
841: for (i = 0; i < ETHER_ADDR_LEN; i++)
842: tl_dio_write8(sc, regaddr + i, *(addr + i));
843:
844: return;
845: }
846:
847: /*
848: * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly
849: * linked list. This is fine, except addresses are added from the head
850: * end of the list. We want to arrange for 224.0.0.1 (the "all hosts")
851: * group to always be in the perfect filter, but as more groups are added,
852: * the 224.0.0.1 entry (which is always added first) gets pushed down
853: * the list and ends up at the tail. So after 3 or 4 multicast groups
854: * are added, the all-hosts entry gets pushed out of the perfect filter
855: * and into the hash table.
856: *
857: * Because the multicast list is a doubly-linked list as opposed to a
858: * circular queue, we don't have the ability to just grab the tail of
859: * the list and traverse it backwards. Instead, we have to traverse
860: * the list once to find the tail, then traverse it again backwards to
861: * update the multicast filter.
862: */
863: void tl_setmulti(sc)
864: struct tl_softc *sc;
865: {
866: struct ifnet *ifp;
867: u_int32_t hashes[2] = { 0, 0 };
868: int h;
869: struct arpcom *ac = &sc->arpcom;
870: struct ether_multistep step;
871: struct ether_multi *enm;
872: ifp = &sc->arpcom.ac_if;
873:
874: tl_dio_write32(sc, TL_HASH1, 0);
875: tl_dio_write32(sc, TL_HASH2, 0);
876:
877: ifp->if_flags &= ~IFF_ALLMULTI;
878: #if 0
879: ETHER_FIRST_MULTI(step, ac, enm);
880: while (enm != NULL) {
881: if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) == 0) {
882: h = tl_calchash(enm->enm_addrlo);
883: hashes[h/32] |= (1 << (h % 32));
884: } else {
885: hashes[0] = hashes[1] = 0xffffffff;
886: ifp->if_flags |= IFF_ALLMULTI;
887: break;
888: }
889: ETHER_NEXT_MULTI(step, enm);
890: }
891: #else
892: ETHER_FIRST_MULTI(step, ac, enm);
893: h = 0;
894: while (enm != NULL) {
895: h++;
896: ETHER_NEXT_MULTI(step, enm);
897: }
898: if (h) {
899: hashes[0] = hashes[1] = 0xffffffff;
900: ifp->if_flags |= IFF_ALLMULTI;
901: } else {
902: hashes[0] = hashes[1] = 0x00000000;
903: }
904: #endif
905:
906: tl_dio_write32(sc, TL_HASH1, hashes[0]);
907: tl_dio_write32(sc, TL_HASH2, hashes[1]);
908:
909: return;
910: }
911:
912: /*
913: * This routine is recommended by the ThunderLAN manual to insure that
914: * the internal PHY is powered up correctly. It also recommends a one
915: * second pause at the end to 'wait for the clocks to start' but in my
916: * experience this isn't necessary.
917: */
918: void tl_hardreset(dev)
919: struct device *dev;
920: {
921: struct tl_softc *sc = (struct tl_softc *)dev;
922: int i;
923: u_int16_t flags;
924:
925: flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN;
926:
927: for (i =0 ; i < MII_NPHY; i++)
928: tl_miibus_writereg(dev, i, MII_BMCR, flags);
929:
930: tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO);
931: tl_mii_sync(sc);
932: while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET);
933:
934: DELAY(5000);
935: return;
936: }
937:
938: void tl_softreset(sc, internal)
939: struct tl_softc *sc;
940: int internal;
941: {
942: u_int32_t cmd, dummy, i;
943:
944: /* Assert the adapter reset bit. */
945: CMD_SET(sc, TL_CMD_ADRST);
946: /* Turn off interrupts */
947: CMD_SET(sc, TL_CMD_INTSOFF);
948:
949: /* First, clear the stats registers. */
950: for (i = 0; i < 5; i++)
951: dummy = tl_dio_read32(sc, TL_TXGOODFRAMES);
952:
953: /* Clear Areg and Hash registers */
954: for (i = 0; i < 8; i++)
955: tl_dio_write32(sc, TL_AREG0_B5, 0x00000000);
956:
957: /*
958: * Set up Netconfig register. Enable one channel and
959: * one fragment mode.
960: */
961: tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG);
962: if (internal && !sc->tl_bitrate) {
963: tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
964: } else {
965: tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN);
966: }
967:
968: /* Handle cards with bitrate devices. */
969: if (sc->tl_bitrate)
970: tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE);
971:
972: /*
973: * Load adapter irq pacing timer and tx threshold.
974: * We make the transmit threshold 1 initially but we may
975: * change that later.
976: */
977: cmd = CSR_READ_4(sc, TL_HOSTCMD);
978: cmd |= TL_CMD_NES;
979: cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK);
980: CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR));
981: CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003));
982:
983: /* Unreset the MII */
984: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST);
985:
986: /* Take the adapter out of reset */
987: tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP);
988:
989: /* Wait for things to settle down a little. */
990: DELAY(500);
991:
992: return;
993: }
994:
995: /*
996: * Initialize the transmit lists.
997: */
998: int tl_list_tx_init(sc)
999: struct tl_softc *sc;
1000: {
1001: struct tl_chain_data *cd;
1002: struct tl_list_data *ld;
1003: int i;
1004:
1005: cd = &sc->tl_cdata;
1006: ld = sc->tl_ldata;
1007: for (i = 0; i < TL_TX_LIST_CNT; i++) {
1008: cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i];
1009: if (i == (TL_TX_LIST_CNT - 1))
1010: cd->tl_tx_chain[i].tl_next = NULL;
1011: else
1012: cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1];
1013: }
1014:
1015: cd->tl_tx_free = &cd->tl_tx_chain[0];
1016: cd->tl_tx_tail = cd->tl_tx_head = NULL;
1017: sc->tl_txeoc = 1;
1018:
1019: return(0);
1020: }
1021:
1022: /*
1023: * Initialize the RX lists and allocate mbufs for them.
1024: */
1025: int tl_list_rx_init(sc)
1026: struct tl_softc *sc;
1027: {
1028: struct tl_chain_data *cd;
1029: struct tl_list_data *ld;
1030: int i;
1031:
1032: cd = &sc->tl_cdata;
1033: ld = sc->tl_ldata;
1034:
1035: for (i = 0; i < TL_RX_LIST_CNT; i++) {
1036: cd->tl_rx_chain[i].tl_ptr =
1037: (struct tl_list_onefrag *)&ld->tl_rx_list[i];
1038: if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS)
1039: return(ENOBUFS);
1040: if (i == (TL_RX_LIST_CNT - 1)) {
1041: cd->tl_rx_chain[i].tl_next = NULL;
1042: ld->tl_rx_list[i].tlist_fptr = 0;
1043: } else {
1044: cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1];
1045: ld->tl_rx_list[i].tlist_fptr =
1046: VTOPHYS(&ld->tl_rx_list[i + 1]);
1047: }
1048: }
1049:
1050: cd->tl_rx_head = &cd->tl_rx_chain[0];
1051: cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1052:
1053: return(0);
1054: }
1055:
1056: int tl_newbuf(sc, c)
1057: struct tl_softc *sc;
1058: struct tl_chain_onefrag *c;
1059: {
1060: struct mbuf *m_new = NULL;
1061:
1062: MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1063: if (m_new == NULL) {
1064: return(ENOBUFS);
1065: }
1066:
1067: MCLGET(m_new, M_DONTWAIT);
1068: if (!(m_new->m_flags & M_EXT)) {
1069: m_freem(m_new);
1070: return(ENOBUFS);
1071: }
1072:
1073: #ifdef __alpha__
1074: m_new->m_data += 2;
1075: #endif
1076:
1077: c->tl_mbuf = m_new;
1078: c->tl_next = NULL;
1079: c->tl_ptr->tlist_frsize = MCLBYTES;
1080: c->tl_ptr->tlist_fptr = 0;
1081: c->tl_ptr->tl_frag.tlist_dadr = VTOPHYS(mtod(m_new, caddr_t));
1082: c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1083: c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1084:
1085: return(0);
1086: }
1087: /*
1088: * Interrupt handler for RX 'end of frame' condition (EOF). This
1089: * tells us that a full ethernet frame has been captured and we need
1090: * to handle it.
1091: *
1092: * Reception is done using 'lists' which consist of a header and a
1093: * series of 10 data count/data address pairs that point to buffers.
1094: * Initially you're supposed to create a list, populate it with pointers
1095: * to buffers, then load the physical address of the list into the
1096: * ch_parm register. The adapter is then supposed to DMA the received
1097: * frame into the buffers for you.
1098: *
1099: * To make things as fast as possible, we have the chip DMA directly
1100: * into mbufs. This saves us from having to do a buffer copy: we can
1101: * just hand the mbufs directly to ether_input(). Once the frame has
1102: * been sent on its way, the 'list' structure is assigned a new buffer
1103: * and moved to the end of the RX chain. As long we we stay ahead of
1104: * the chip, it will always think it has an endless receive channel.
1105: *
1106: * If we happen to fall behind and the chip manages to fill up all of
1107: * the buffers, it will generate an end of channel interrupt and wait
1108: * for us to empty the chain and restart the receiver.
1109: */
1110: int tl_intvec_rxeof(xsc, type)
1111: void *xsc;
1112: u_int32_t type;
1113: {
1114: struct tl_softc *sc;
1115: int r = 0, total_len = 0;
1116: struct ether_header *eh;
1117: struct mbuf *m;
1118: struct ifnet *ifp;
1119: struct tl_chain_onefrag *cur_rx;
1120:
1121: sc = xsc;
1122: ifp = &sc->arpcom.ac_if;
1123:
1124: while(sc->tl_cdata.tl_rx_head != NULL) {
1125: cur_rx = sc->tl_cdata.tl_rx_head;
1126: if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1127: break;
1128: r++;
1129: sc->tl_cdata.tl_rx_head = cur_rx->tl_next;
1130: m = cur_rx->tl_mbuf;
1131: total_len = cur_rx->tl_ptr->tlist_frsize;
1132:
1133: if (tl_newbuf(sc, cur_rx) == ENOBUFS) {
1134: ifp->if_ierrors++;
1135: cur_rx->tl_ptr->tlist_frsize = MCLBYTES;
1136: cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1137: cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES;
1138: continue;
1139: }
1140:
1141: sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr =
1142: VTOPHYS(cur_rx->tl_ptr);
1143: sc->tl_cdata.tl_rx_tail->tl_next = cur_rx;
1144: sc->tl_cdata.tl_rx_tail = cur_rx;
1145:
1146: eh = mtod(m, struct ether_header *);
1147: m->m_pkthdr.rcvif = ifp;
1148:
1149: /*
1150: * Note: when the ThunderLAN chip is in 'capture all
1151: * frames' mode, it will receive its own transmissions.
1152: * We drop don't need to process our own transmissions,
1153: * so we drop them here and continue.
1154: */
1155: /*if (ifp->if_flags & IFF_PROMISC && */
1156: if (!bcmp(eh->ether_shost, sc->arpcom.ac_enaddr,
1157: ETHER_ADDR_LEN)) {
1158: m_freem(m);
1159: continue;
1160: }
1161:
1162: m->m_pkthdr.len = m->m_len = total_len;
1163: #if NBPFILTER > 0
1164: /*
1165: * Handle BPF listeners. Let the BPF user see the packet, but
1166: * don't pass it up to the ether_input() layer unless it's
1167: * a broadcast packet, multicast packet, matches our ethernet
1168: * address or the interface is in promiscuous mode. If we don't
1169: * want the packet, just forget it. We leave the mbuf in place
1170: * since it can be used again later.
1171: */
1172: if (ifp->if_bpf) {
1173: bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1174: }
1175: #endif
1176: /* pass it on. */
1177: ether_input_mbuf(ifp, m);
1178: }
1179:
1180: return(r);
1181: }
1182:
1183: /*
1184: * The RX-EOC condition hits when the ch_parm address hasn't been
1185: * initialized or the adapter reached a list with a forward pointer
1186: * of 0 (which indicates the end of the chain). In our case, this means
1187: * the card has hit the end of the receive buffer chain and we need to
1188: * empty out the buffers and shift the pointer back to the beginning again.
1189: */
1190: int tl_intvec_rxeoc(xsc, type)
1191: void *xsc;
1192: u_int32_t type;
1193: {
1194: struct tl_softc *sc;
1195: int r;
1196: struct tl_chain_data *cd;
1197:
1198: sc = xsc;
1199: cd = &sc->tl_cdata;
1200:
1201: /* Flush out the receive queue and ack RXEOF interrupts. */
1202: r = tl_intvec_rxeof(xsc, type);
1203: CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000)));
1204: r = 1;
1205: cd->tl_rx_head = &cd->tl_rx_chain[0];
1206: cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1];
1207: CSR_WRITE_4(sc, TL_CH_PARM, VTOPHYS(sc->tl_cdata.tl_rx_head->tl_ptr));
1208: r |= (TL_CMD_GO|TL_CMD_RT);
1209: return(r);
1210: }
1211:
1212: int tl_intvec_txeof(xsc, type)
1213: void *xsc;
1214: u_int32_t type;
1215: {
1216: struct tl_softc *sc;
1217: int r = 0;
1218: struct tl_chain *cur_tx;
1219:
1220: sc = xsc;
1221:
1222: /*
1223: * Go through our tx list and free mbufs for those
1224: * frames that have been sent.
1225: */
1226: while (sc->tl_cdata.tl_tx_head != NULL) {
1227: cur_tx = sc->tl_cdata.tl_tx_head;
1228: if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP))
1229: break;
1230: sc->tl_cdata.tl_tx_head = cur_tx->tl_next;
1231:
1232: r++;
1233: m_freem(cur_tx->tl_mbuf);
1234: cur_tx->tl_mbuf = NULL;
1235:
1236: cur_tx->tl_next = sc->tl_cdata.tl_tx_free;
1237: sc->tl_cdata.tl_tx_free = cur_tx;
1238: if (!cur_tx->tl_ptr->tlist_fptr)
1239: break;
1240: }
1241:
1242: return(r);
1243: }
1244:
1245: /*
1246: * The transmit end of channel interrupt. The adapter triggers this
1247: * interrupt to tell us it hit the end of the current transmit list.
1248: *
1249: * A note about this: it's possible for a condition to arise where
1250: * tl_start() may try to send frames between TXEOF and TXEOC interrupts.
1251: * You have to avoid this since the chip expects things to go in a
1252: * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC.
1253: * When the TXEOF handler is called, it will free all of the transmitted
1254: * frames and reset the tx_head pointer to NULL. However, a TXEOC
1255: * interrupt should be received and acknowledged before any more frames
1256: * are queued for transmission. If tl_statrt() is called after TXEOF
1257: * resets the tx_head pointer but _before_ the TXEOC interrupt arrives,
1258: * it could attempt to issue a transmit command prematurely.
1259: *
1260: * To guard against this, tl_start() will only issue transmit commands
1261: * if the tl_txeoc flag is set, and only the TXEOC interrupt handler
1262: * can set this flag once tl_start() has cleared it.
1263: */
1264: int tl_intvec_txeoc(xsc, type)
1265: void *xsc;
1266: u_int32_t type;
1267: {
1268: struct tl_softc *sc;
1269: struct ifnet *ifp;
1270: u_int32_t cmd;
1271:
1272: sc = xsc;
1273: ifp = &sc->arpcom.ac_if;
1274:
1275: /* Clear the timeout timer. */
1276: ifp->if_timer = 0;
1277:
1278: if (sc->tl_cdata.tl_tx_head == NULL) {
1279: ifp->if_flags &= ~IFF_OACTIVE;
1280: sc->tl_cdata.tl_tx_tail = NULL;
1281: sc->tl_txeoc = 1;
1282: } else {
1283: sc->tl_txeoc = 0;
1284: /* First we have to ack the EOC interrupt. */
1285: CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type);
1286: /* Then load the address of the next TX list. */
1287: CSR_WRITE_4(sc, TL_CH_PARM,
1288: VTOPHYS(sc->tl_cdata.tl_tx_head->tl_ptr));
1289: /* Restart TX channel. */
1290: cmd = CSR_READ_4(sc, TL_HOSTCMD);
1291: cmd &= ~TL_CMD_RT;
1292: cmd |= TL_CMD_GO|TL_CMD_INTSON;
1293: CMD_PUT(sc, cmd);
1294: return(0);
1295: }
1296:
1297: return(1);
1298: }
1299:
1300: int tl_intvec_adchk(xsc, type)
1301: void *xsc;
1302: u_int32_t type;
1303: {
1304: struct tl_softc *sc;
1305:
1306: sc = xsc;
1307:
1308: if (type)
1309: printf("%s: adapter check: %x\n", sc->sc_dev.dv_xname,
1310: (unsigned int)CSR_READ_4(sc, TL_CH_PARM));
1311:
1312: tl_softreset(sc, 1);
1313: tl_stop(sc);
1314: tl_init(sc);
1315: CMD_SET(sc, TL_CMD_INTSON);
1316:
1317: return(0);
1318: }
1319:
1320: int tl_intvec_netsts(xsc, type)
1321: void *xsc;
1322: u_int32_t type;
1323: {
1324: struct tl_softc *sc;
1325: u_int16_t netsts;
1326:
1327: sc = xsc;
1328:
1329: netsts = tl_dio_read16(sc, TL_NETSTS);
1330: tl_dio_write16(sc, TL_NETSTS, netsts);
1331:
1332: printf("%s: network status: %x\n", sc->sc_dev.dv_xname, netsts);
1333:
1334: return(1);
1335: }
1336:
1337: int tl_intr(xsc)
1338: void *xsc;
1339: {
1340: struct tl_softc *sc;
1341: struct ifnet *ifp;
1342: int r = 0;
1343: u_int32_t type = 0;
1344: u_int16_t ints = 0;
1345: u_int8_t ivec = 0;
1346:
1347: sc = xsc;
1348:
1349: /* Disable interrupts */
1350: ints = CSR_READ_2(sc, TL_HOST_INT);
1351: CSR_WRITE_2(sc, TL_HOST_INT, ints);
1352: type = (ints << 16) & 0xFFFF0000;
1353: ivec = (ints & TL_VEC_MASK) >> 5;
1354: ints = (ints & TL_INT_MASK) >> 2;
1355:
1356: ifp = &sc->arpcom.ac_if;
1357:
1358: switch(ints) {
1359: case (TL_INTR_INVALID):
1360: /* Re-enable interrupts but don't ack this one. */
1361: CMD_PUT(sc, type);
1362: r = 0;
1363: break;
1364: case (TL_INTR_TXEOF):
1365: r = tl_intvec_txeof((void *)sc, type);
1366: break;
1367: case (TL_INTR_TXEOC):
1368: r = tl_intvec_txeoc((void *)sc, type);
1369: break;
1370: case (TL_INTR_STATOFLOW):
1371: tl_stats_update(sc);
1372: r = 1;
1373: break;
1374: case (TL_INTR_RXEOF):
1375: r = tl_intvec_rxeof((void *)sc, type);
1376: break;
1377: case (TL_INTR_DUMMY):
1378: printf("%s: got a dummy interrupt\n", sc->sc_dev.dv_xname);
1379: r = 1;
1380: break;
1381: case (TL_INTR_ADCHK):
1382: if (ivec)
1383: r = tl_intvec_adchk((void *)sc, type);
1384: else
1385: r = tl_intvec_netsts((void *)sc, type);
1386: break;
1387: case (TL_INTR_RXEOC):
1388: r = tl_intvec_rxeoc((void *)sc, type);
1389: break;
1390: default:
1391: printf("%s: bogus interrupt type\n", sc->sc_dev.dv_xname);
1392: break;
1393: }
1394:
1395: /* Re-enable interrupts */
1396: if (r) {
1397: CMD_PUT(sc, TL_CMD_ACK | r | type);
1398: }
1399:
1400: if (!IFQ_IS_EMPTY(&ifp->if_snd))
1401: tl_start(ifp);
1402:
1403: return r;
1404: }
1405:
1406: void tl_stats_update(xsc)
1407: void *xsc;
1408: {
1409: struct tl_softc *sc;
1410: struct ifnet *ifp;
1411: struct tl_stats tl_stats;
1412: u_int32_t *p;
1413: int s;
1414:
1415: s = splnet();
1416:
1417: bzero((char *)&tl_stats, sizeof(struct tl_stats));
1418:
1419: sc = xsc;
1420: ifp = &sc->arpcom.ac_if;
1421:
1422: p = (u_int32_t *)&tl_stats;
1423:
1424: CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC);
1425: *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1426: *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1427: *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1428: *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1429: *p++ = CSR_READ_4(sc, TL_DIO_DATA);
1430:
1431: ifp->if_opackets += tl_tx_goodframes(tl_stats);
1432: ifp->if_collisions += tl_stats.tl_tx_single_collision +
1433: tl_stats.tl_tx_multi_collision;
1434: ifp->if_ipackets += tl_rx_goodframes(tl_stats);
1435: ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors +
1436: tl_rx_overrun(tl_stats);
1437: ifp->if_oerrors += tl_tx_underrun(tl_stats);
1438:
1439: if (tl_tx_underrun(tl_stats)) {
1440: u_int8_t tx_thresh;
1441: tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH;
1442: if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) {
1443: tx_thresh >>= 4;
1444: tx_thresh++;
1445: tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1446: tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4);
1447: }
1448: }
1449:
1450: timeout_add(&sc->tl_stats_tmo, hz);
1451:
1452: if (!sc->tl_bitrate)
1453: mii_tick(&sc->sc_mii);
1454:
1455: splx(s);
1456: return;
1457: }
1458:
1459: /*
1460: * Encapsulate an mbuf chain in a list by coupling the mbuf data
1461: * pointers to the fragment pointers.
1462: */
1463: int tl_encap(sc, c, m_head)
1464: struct tl_softc *sc;
1465: struct tl_chain *c;
1466: struct mbuf *m_head;
1467: {
1468: int frag = 0;
1469: struct tl_frag *f = NULL;
1470: int total_len;
1471: struct mbuf *m;
1472:
1473: /*
1474: * Start packing the mbufs in this chain into
1475: * the fragment pointers. Stop when we run out
1476: * of fragments or hit the end of the mbuf chain.
1477: */
1478: m = m_head;
1479: total_len = 0;
1480:
1481: for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1482: if (m->m_len != 0) {
1483: if (frag == TL_MAXFRAGS)
1484: break;
1485: total_len+= m->m_len;
1486: c->tl_ptr->tl_frag[frag].tlist_dadr =
1487: VTOPHYS(mtod(m, vaddr_t));
1488: c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len;
1489: frag++;
1490: }
1491: }
1492:
1493: /*
1494: * Handle special cases.
1495: * Special case #1: we used up all 10 fragments, but
1496: * we have more mbufs left in the chain. Copy the
1497: * data into an mbuf cluster. Note that we don't
1498: * bother clearing the values in the other fragment
1499: * pointers/counters; it wouldn't gain us anything,
1500: * and would waste cycles.
1501: */
1502: if (m != NULL) {
1503: struct mbuf *m_new = NULL;
1504:
1505: MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1506: if (m_new == NULL) {
1507: return(1);
1508: }
1509: if (m_head->m_pkthdr.len > MHLEN) {
1510: MCLGET(m_new, M_DONTWAIT);
1511: if (!(m_new->m_flags & M_EXT)) {
1512: m_freem(m_new);
1513: return(1);
1514: }
1515: }
1516: m_copydata(m_head, 0, m_head->m_pkthdr.len,
1517: mtod(m_new, caddr_t));
1518: m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1519: m_freem(m_head);
1520: m_head = m_new;
1521: f = &c->tl_ptr->tl_frag[0];
1522: f->tlist_dadr = VTOPHYS(mtod(m_new, caddr_t));
1523: f->tlist_dcnt = total_len = m_new->m_len;
1524: frag = 1;
1525: }
1526:
1527: /*
1528: * Special case #2: the frame is smaller than the minimum
1529: * frame size. We have to pad it to make the chip happy.
1530: */
1531: if (total_len < TL_MIN_FRAMELEN) {
1532: f = &c->tl_ptr->tl_frag[frag];
1533: f->tlist_dcnt = TL_MIN_FRAMELEN - total_len;
1534: f->tlist_dadr = VTOPHYS(&sc->tl_ldata->tl_pad);
1535: total_len += f->tlist_dcnt;
1536: frag++;
1537: }
1538:
1539: c->tl_mbuf = m_head;
1540: c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG;
1541: c->tl_ptr->tlist_frsize = total_len;
1542: c->tl_ptr->tlist_cstat = TL_CSTAT_READY;
1543: c->tl_ptr->tlist_fptr = 0;
1544:
1545: return(0);
1546: }
1547:
1548: /*
1549: * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1550: * to the mbuf data regions directly in the transmit lists. We also save a
1551: * copy of the pointers since the transmit list fragment pointers are
1552: * physical addresses.
1553: */
1554: void tl_start(ifp)
1555: struct ifnet *ifp;
1556: {
1557: struct tl_softc *sc;
1558: struct mbuf *m_head = NULL;
1559: u_int32_t cmd;
1560: struct tl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
1561:
1562: sc = ifp->if_softc;
1563:
1564: /*
1565: * Check for an available queue slot. If there are none,
1566: * punt.
1567: */
1568: if (sc->tl_cdata.tl_tx_free == NULL) {
1569: ifp->if_flags |= IFF_OACTIVE;
1570: return;
1571: }
1572:
1573: start_tx = sc->tl_cdata.tl_tx_free;
1574:
1575: while(sc->tl_cdata.tl_tx_free != NULL) {
1576: IFQ_DEQUEUE(&ifp->if_snd, m_head);
1577: if (m_head == NULL)
1578: break;
1579:
1580: /* Pick a chain member off the free list. */
1581: cur_tx = sc->tl_cdata.tl_tx_free;
1582: sc->tl_cdata.tl_tx_free = cur_tx->tl_next;
1583:
1584: cur_tx->tl_next = NULL;
1585:
1586: /* Pack the data into the list. */
1587: tl_encap(sc, cur_tx, m_head);
1588:
1589: /* Chain it together */
1590: if (prev != NULL) {
1591: prev->tl_next = cur_tx;
1592: prev->tl_ptr->tlist_fptr = VTOPHYS(cur_tx->tl_ptr);
1593: }
1594: prev = cur_tx;
1595:
1596: /*
1597: * If there's a BPF listener, bounce a copy of this frame
1598: * to him.
1599: */
1600: #if NBPFILTER > 0
1601: if (ifp->if_bpf)
1602: bpf_mtap(ifp->if_bpf, cur_tx->tl_mbuf,
1603: BPF_DIRECTION_OUT);
1604: #endif
1605: }
1606:
1607: /*
1608: * If there are no packets queued, bail.
1609: */
1610: if (cur_tx == NULL)
1611: return;
1612:
1613: /*
1614: * That's all we can stands, we can't stands no more.
1615: * If there are no other transfers pending, then issue the
1616: * TX GO command to the adapter to start things moving.
1617: * Otherwise, just leave the data in the queue and let
1618: * the EOF/EOC interrupt handler send.
1619: */
1620: if (sc->tl_cdata.tl_tx_head == NULL) {
1621: sc->tl_cdata.tl_tx_head = start_tx;
1622: sc->tl_cdata.tl_tx_tail = cur_tx;
1623:
1624: if (sc->tl_txeoc) {
1625: sc->tl_txeoc = 0;
1626: CSR_WRITE_4(sc, TL_CH_PARM, VTOPHYS(start_tx->tl_ptr));
1627: cmd = CSR_READ_4(sc, TL_HOSTCMD);
1628: cmd &= ~TL_CMD_RT;
1629: cmd |= TL_CMD_GO|TL_CMD_INTSON;
1630: CMD_PUT(sc, cmd);
1631: }
1632: } else {
1633: sc->tl_cdata.tl_tx_tail->tl_next = start_tx;
1634: sc->tl_cdata.tl_tx_tail = cur_tx;
1635: }
1636:
1637: /*
1638: * Set a timeout in case the chip goes out to lunch.
1639: */
1640: ifp->if_timer = 10;
1641:
1642: return;
1643: }
1644:
1645: void tl_init(xsc)
1646: void *xsc;
1647: {
1648: struct tl_softc *sc = xsc;
1649: struct ifnet *ifp = &sc->arpcom.ac_if;
1650: int s;
1651:
1652: s = splnet();
1653:
1654: /*
1655: * Cancel pending I/O.
1656: */
1657: tl_stop(sc);
1658:
1659: /* Initialize TX FIFO threshold */
1660: tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH);
1661: tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG);
1662:
1663: /* Set PCI burst size */
1664: tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG);
1665:
1666: /*
1667: * Set 'capture all frames' bit for promiscuous mode.
1668: */
1669: if (ifp->if_flags & IFF_PROMISC)
1670: tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
1671: else
1672: tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
1673:
1674: /*
1675: * Set capture broadcast bit to capture broadcast frames.
1676: */
1677: if (ifp->if_flags & IFF_BROADCAST)
1678: tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX);
1679: else
1680: tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX);
1681:
1682: tl_dio_write16(sc, TL_MAXRX, MCLBYTES);
1683:
1684: /* Init our MAC address */
1685: tl_setfilt(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0);
1686:
1687: /* Init multicast filter, if needed. */
1688: tl_setmulti(sc);
1689:
1690: /* Init circular RX list. */
1691: if (tl_list_rx_init(sc) == ENOBUFS) {
1692: printf("%s: initialization failed: no memory for rx buffers\n",
1693: sc->sc_dev.dv_xname);
1694: tl_stop(sc);
1695: splx(s);
1696: return;
1697: }
1698:
1699: /* Init TX pointers. */
1700: tl_list_tx_init(sc);
1701:
1702: /* Enable PCI interrupts. */
1703: CMD_SET(sc, TL_CMD_INTSON);
1704:
1705: /* Load the address of the rx list */
1706: CMD_SET(sc, TL_CMD_RT);
1707: CSR_WRITE_4(sc, TL_CH_PARM, VTOPHYS(&sc->tl_ldata->tl_rx_list[0]));
1708:
1709: if (!sc->tl_bitrate) {
1710: mii_mediachg(&sc->sc_mii);
1711: } else {
1712: tl_ifmedia_upd(ifp);
1713: }
1714:
1715: /* Send the RX go command */
1716: CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT);
1717:
1718: splx(s);
1719:
1720: /* Start the stats update counter */
1721: timeout_set(&sc->tl_stats_tmo, tl_stats_update, sc);
1722: timeout_add(&sc->tl_stats_tmo, hz);
1723: timeout_set(&sc->tl_wait_tmo, tl_wait_up, sc);
1724: timeout_add(&sc->tl_wait_tmo, 2 * hz);
1725:
1726: return;
1727: }
1728:
1729: /*
1730: * Set media options.
1731: */
1732: int
1733: tl_ifmedia_upd(ifp)
1734: struct ifnet *ifp;
1735: {
1736: struct tl_softc *sc = ifp->if_softc;
1737:
1738: if (sc->tl_bitrate)
1739: tl_setmode(sc, sc->ifmedia.ifm_media);
1740: else
1741: mii_mediachg(&sc->sc_mii);
1742:
1743: return(0);
1744: }
1745:
1746: /*
1747: * Report current media status.
1748: */
1749: void tl_ifmedia_sts(ifp, ifmr)
1750: struct ifnet *ifp;
1751: struct ifmediareq *ifmr;
1752: {
1753: struct tl_softc *sc;
1754: struct mii_data *mii;
1755:
1756: sc = ifp->if_softc;
1757: mii = &sc->sc_mii;
1758:
1759: ifmr->ifm_active = IFM_ETHER;
1760: if (sc->tl_bitrate) {
1761: if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1)
1762: ifmr->ifm_active = IFM_ETHER|IFM_10_5;
1763: else
1764: ifmr->ifm_active = IFM_ETHER|IFM_10_T;
1765: if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3)
1766: ifmr->ifm_active |= IFM_HDX;
1767: else
1768: ifmr->ifm_active |= IFM_FDX;
1769: return;
1770: } else {
1771: mii_pollstat(mii);
1772: ifmr->ifm_active = mii->mii_media_active;
1773: ifmr->ifm_status = mii->mii_media_status;
1774: }
1775:
1776: return;
1777: }
1778:
1779: int tl_ioctl(ifp, command, data)
1780: struct ifnet *ifp;
1781: u_long command;
1782: caddr_t data;
1783: {
1784: struct tl_softc *sc = ifp->if_softc;
1785: struct ifreq *ifr = (struct ifreq *) data;
1786: struct ifaddr *ifa = (struct ifaddr *)data;
1787: int s, error = 0;
1788:
1789: s = splnet();
1790:
1791: if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
1792: splx(s);
1793: return error;
1794: }
1795:
1796: switch(command) {
1797: case SIOCSIFADDR:
1798: ifp->if_flags |= IFF_UP;
1799: switch (ifa->ifa_addr->sa_family) {
1800: #ifdef INET
1801: case AF_INET:
1802: tl_init(sc);
1803: arp_ifinit(&sc->arpcom, ifa);
1804: break;
1805: #endif /* INET */
1806: default:
1807: tl_init(sc);
1808: break;
1809: }
1810: break;
1811: case SIOCSIFFLAGS:
1812: if (ifp->if_flags & IFF_UP) {
1813: if (ifp->if_flags & IFF_RUNNING &&
1814: ifp->if_flags & IFF_PROMISC &&
1815: !(sc->tl_if_flags & IFF_PROMISC)) {
1816: tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF);
1817: tl_setmulti(sc);
1818: } else if (ifp->if_flags & IFF_RUNNING &&
1819: !(ifp->if_flags & IFF_PROMISC) &&
1820: sc->tl_if_flags & IFF_PROMISC) {
1821: tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF);
1822: tl_setmulti(sc);
1823: } else
1824: tl_init(sc);
1825: } else {
1826: if (ifp->if_flags & IFF_RUNNING) {
1827: tl_stop(sc);
1828: }
1829: }
1830: sc->tl_if_flags = ifp->if_flags;
1831: error = 0;
1832: break;
1833: case SIOCADDMULTI:
1834: case SIOCDELMULTI:
1835: error = (command == SIOCADDMULTI) ?
1836: ether_addmulti(ifr, &sc->arpcom) :
1837: ether_delmulti(ifr, &sc->arpcom);
1838:
1839: if (error == ENETRESET) {
1840: /*
1841: * Multicast list has changed; set the hardware
1842: * filter accordingly.
1843: */
1844: tl_setmulti(sc);
1845: error = 0;
1846: }
1847: break;
1848: case SIOCSIFMEDIA:
1849: case SIOCGIFMEDIA:
1850: if (sc->tl_bitrate)
1851: error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
1852: else
1853: error = ifmedia_ioctl(ifp, ifr,
1854: &sc->sc_mii.mii_media, command);
1855: break;
1856: default:
1857: error = ENOTTY;
1858: break;
1859: }
1860:
1861: splx(s);
1862:
1863: return(error);
1864: }
1865:
1866: void tl_watchdog(ifp)
1867: struct ifnet *ifp;
1868: {
1869: struct tl_softc *sc;
1870:
1871: sc = ifp->if_softc;
1872:
1873: printf("%s: device timeout\n", sc->sc_dev.dv_xname);
1874:
1875: ifp->if_oerrors++;
1876:
1877: tl_softreset(sc, 1);
1878: tl_init(sc);
1879:
1880: return;
1881: }
1882:
1883: /*
1884: * Stop the adapter and free any mbufs allocated to the
1885: * RX and TX lists.
1886: */
1887: void tl_stop(sc)
1888: struct tl_softc *sc;
1889: {
1890: int i;
1891: struct ifnet *ifp;
1892:
1893: ifp = &sc->arpcom.ac_if;
1894:
1895: /* Stop the stats updater. */
1896: timeout_del(&sc->tl_stats_tmo);
1897: timeout_del(&sc->tl_wait_tmo);
1898:
1899: /* Stop the transmitter */
1900: CMD_CLR(sc, TL_CMD_RT);
1901: CMD_SET(sc, TL_CMD_STOP);
1902: CSR_WRITE_4(sc, TL_CH_PARM, 0);
1903:
1904: /* Stop the receiver */
1905: CMD_SET(sc, TL_CMD_RT);
1906: CMD_SET(sc, TL_CMD_STOP);
1907: CSR_WRITE_4(sc, TL_CH_PARM, 0);
1908:
1909: /*
1910: * Disable host interrupts.
1911: */
1912: CMD_SET(sc, TL_CMD_INTSOFF);
1913:
1914: /*
1915: * Clear list pointer.
1916: */
1917: CSR_WRITE_4(sc, TL_CH_PARM, 0);
1918:
1919: /*
1920: * Free the RX lists.
1921: */
1922: for (i = 0; i < TL_RX_LIST_CNT; i++) {
1923: if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) {
1924: m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf);
1925: sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL;
1926: }
1927: }
1928: bzero((char *)&sc->tl_ldata->tl_rx_list,
1929: sizeof(sc->tl_ldata->tl_rx_list));
1930:
1931: /*
1932: * Free the TX list buffers.
1933: */
1934: for (i = 0; i < TL_TX_LIST_CNT; i++) {
1935: if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) {
1936: m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf);
1937: sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL;
1938: }
1939: }
1940: bzero((char *)&sc->tl_ldata->tl_tx_list,
1941: sizeof(sc->tl_ldata->tl_tx_list));
1942:
1943: ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1944:
1945: return;
1946: }
1947:
1948: int
1949: tl_probe(parent, match, aux)
1950: struct device *parent;
1951: void *match;
1952: void *aux;
1953: {
1954: struct pci_attach_args *pa = (struct pci_attach_args *) aux;
1955:
1956: if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TI) {
1957: if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_TI_TLAN)
1958: return 1;
1959: return 0;
1960: }
1961:
1962: if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_COMPAQ) {
1963: switch (PCI_PRODUCT(pa->pa_id)) {
1964: case PCI_PRODUCT_COMPAQ_N100TX:
1965: case PCI_PRODUCT_COMPAQ_N10T:
1966: case PCI_PRODUCT_COMPAQ_IntNF3P:
1967: case PCI_PRODUCT_COMPAQ_DPNet100TX:
1968: case PCI_PRODUCT_COMPAQ_IntPL100TX:
1969: case PCI_PRODUCT_COMPAQ_DP4000:
1970: case PCI_PRODUCT_COMPAQ_N10T2:
1971: case PCI_PRODUCT_COMPAQ_N10_TX_UTP:
1972: case PCI_PRODUCT_COMPAQ_NF3P:
1973: case PCI_PRODUCT_COMPAQ_NF3P_BNC:
1974: return 1;
1975: }
1976: return 0;
1977: }
1978:
1979: if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM) {
1980: switch (PCI_PRODUCT(pa->pa_id)) {
1981: case PCI_PRODUCT_OLICOM_OC2183:
1982: case PCI_PRODUCT_OLICOM_OC2325:
1983: case PCI_PRODUCT_OLICOM_OC2326:
1984: return 1;
1985: }
1986: return 0;
1987: }
1988:
1989: return 0;
1990: }
1991:
1992: void
1993: tl_attach(parent, self, aux)
1994: struct device *parent, *self;
1995: void *aux;
1996: {
1997: struct tl_softc *sc = (struct tl_softc *)self;
1998: struct pci_attach_args *pa = aux;
1999: pci_chipset_tag_t pc = pa->pa_pc;
2000: pci_intr_handle_t ih;
2001: const char *intrstr = NULL;
2002: struct ifnet *ifp = &sc->arpcom.ac_if;
2003: bus_size_t iosize;
2004: u_int32_t command;
2005: int i, rseg;
2006: bus_dma_segment_t seg;
2007: bus_dmamap_t dmamap;
2008: caddr_t kva;
2009:
2010: /*
2011: * Map control/status registers.
2012: */
2013:
2014: #ifdef TL_USEIOSPACE
2015: if (pci_mapreg_map(pa, TL_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
2016: &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)) {
2017: if (pci_mapreg_map(pa, TL_PCI_LOMEM, PCI_MAPREG_TYPE_IO, 0,
2018: &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)) {
2019: printf(": failed to map i/o space\n");
2020: return;
2021: }
2022: }
2023: #else
2024: if (pci_mapreg_map(pa, TL_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
2025: &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)){
2026: if (pci_mapreg_map(pa, TL_PCI_LOIO, PCI_MAPREG_TYPE_MEM, 0,
2027: &sc->tl_btag, &sc->tl_bhandle, NULL, &iosize, 0)){
2028: printf(": failed to map memory space\n");
2029: return;
2030: }
2031: }
2032: #endif
2033:
2034: /*
2035: * Manual wants the PCI latency timer jacked up to 0xff
2036: */
2037: command = pci_conf_read(pa->pa_pc, pa->pa_tag, TL_PCI_LATENCY_TIMER);
2038: command |= 0x0000ff00;
2039: pci_conf_write(pa->pa_pc, pa->pa_tag, TL_PCI_LATENCY_TIMER, command);
2040:
2041: /*
2042: * Allocate our interrupt.
2043: */
2044: if (pci_intr_map(pa, &ih)) {
2045: printf(": couldn't map interrupt\n");
2046: bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
2047: return;
2048: }
2049: intrstr = pci_intr_string(pc, ih);
2050: sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, tl_intr, sc,
2051: self->dv_xname);
2052: if (sc->sc_ih == NULL) {
2053: printf(": could not establish interrupt");
2054: if (intrstr != NULL)
2055: printf(" at %s", intrstr);
2056: printf("\n");
2057: bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
2058: return;
2059: }
2060: printf(": %s", intrstr);
2061:
2062: sc->sc_dmat = pa->pa_dmat;
2063: if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct tl_list_data),
2064: PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
2065: printf("%s: can't alloc list\n", sc->sc_dev.dv_xname);
2066: bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
2067: return;
2068: }
2069: if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct tl_list_data),
2070: &kva, BUS_DMA_NOWAIT)) {
2071: printf("%s: can't map dma buffers (%d bytes)\n",
2072: sc->sc_dev.dv_xname, sizeof(struct tl_list_data));
2073: bus_dmamem_free(sc->sc_dmat, &seg, rseg);
2074: return;
2075: }
2076: if (bus_dmamap_create(sc->sc_dmat, sizeof(struct tl_list_data), 1,
2077: sizeof(struct tl_list_data), 0, BUS_DMA_NOWAIT, &dmamap)) {
2078: printf("%s: can't create dma map\n", sc->sc_dev.dv_xname);
2079: bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct tl_list_data));
2080: bus_dmamem_free(sc->sc_dmat, &seg, rseg);
2081: bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
2082: return;
2083: }
2084: if (bus_dmamap_load(sc->sc_dmat, dmamap, kva,
2085: sizeof(struct tl_list_data), NULL, BUS_DMA_NOWAIT)) {
2086: printf("%s: can't load dma map\n", sc->sc_dev.dv_xname);
2087: bus_dmamap_destroy(sc->sc_dmat, dmamap);
2088: bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct tl_list_data));
2089: bus_dmamem_free(sc->sc_dmat, &seg, rseg);
2090: bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
2091: return;
2092: }
2093: sc->tl_ldata = (struct tl_list_data *)kva;
2094: bzero(sc->tl_ldata, sizeof(struct tl_list_data));
2095:
2096: for (sc->tl_product = tl_prods; sc->tl_product->tp_vend;
2097: sc->tl_product++) {
2098: if (sc->tl_product->tp_vend == PCI_VENDOR(pa->pa_id) &&
2099: sc->tl_product->tp_prod == PCI_PRODUCT(pa->pa_id))
2100: break;
2101: }
2102:
2103: if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_COMPAQ ||
2104: PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TI)
2105: sc->tl_eeaddr = TL_EEPROM_EADDR;
2106: if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM)
2107: sc->tl_eeaddr = TL_EEPROM_EADDR_OC;
2108:
2109: /*
2110: * Reset adapter.
2111: */
2112: tl_softreset(sc, 1);
2113: tl_hardreset(self);
2114: DELAY(1000000);
2115: tl_softreset(sc, 1);
2116:
2117: /*
2118: * Get station address from the EEPROM.
2119: */
2120: if (tl_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
2121: sc->tl_eeaddr, ETHER_ADDR_LEN)) {
2122: printf("\n%s: failed to read station address\n",
2123: sc->sc_dev.dv_xname);
2124: bus_space_unmap(sc->tl_btag, sc->tl_bhandle, iosize);
2125: return;
2126: }
2127:
2128: if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_OLICOM) {
2129: for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
2130: u_int16_t *p;
2131:
2132: p = (u_int16_t *)&sc->arpcom.ac_enaddr[i];
2133: *p = ntohs(*p);
2134: }
2135: }
2136:
2137: printf(" address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
2138:
2139: ifp = &sc->arpcom.ac_if;
2140: ifp->if_softc = sc;
2141: ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2142: ifp->if_ioctl = tl_ioctl;
2143: ifp->if_start = tl_start;
2144: ifp->if_watchdog = tl_watchdog;
2145: ifp->if_baudrate = 10000000;
2146: IFQ_SET_MAXLEN(&ifp->if_snd, TL_TX_LIST_CNT - 1);
2147: IFQ_SET_READY(&ifp->if_snd);
2148: bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
2149:
2150: /*
2151: * Reset adapter (again).
2152: */
2153: tl_softreset(sc, 1);
2154: tl_hardreset(self);
2155: DELAY(1000000);
2156: tl_softreset(sc, 1);
2157:
2158: /*
2159: * Do MII setup. If no PHYs are found, then this is a
2160: * bitrate ThunderLAN chip that only supports 10baseT
2161: * and AUI/BNC.
2162: */
2163: sc->sc_mii.mii_ifp = ifp;
2164: sc->sc_mii.mii_readreg = tl_miibus_readreg;
2165: sc->sc_mii.mii_writereg = tl_miibus_writereg;
2166: sc->sc_mii.mii_statchg = tl_miibus_statchg;
2167: ifmedia_init(&sc->sc_mii.mii_media, 0, tl_ifmedia_upd, tl_ifmedia_sts);
2168: mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
2169: 0);
2170: if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2171: struct ifmedia *ifm;
2172: sc->tl_bitrate = 1;
2173: ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts);
2174: ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
2175: ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
2176: ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
2177: ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
2178: ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T);
2179: /* Reset again, this time setting bitrate mode. */
2180: tl_softreset(sc, 1);
2181: ifm = &sc->ifmedia;
2182: ifm->ifm_media = ifm->ifm_cur->ifm_media;
2183: tl_ifmedia_upd(ifp);
2184: } else
2185: ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2186:
2187: /*
2188: * Attach us everywhere.
2189: */
2190: if_attach(ifp);
2191: ether_ifattach(ifp);
2192:
2193: shutdownhook_establish(tl_shutdown, sc);
2194: }
2195:
2196: void
2197: tl_wait_up(xsc)
2198: void *xsc;
2199: {
2200: struct tl_softc *sc = xsc;
2201: struct ifnet *ifp = &sc->arpcom.ac_if;
2202:
2203: ifp->if_flags |= IFF_RUNNING;
2204: ifp->if_flags &= ~IFF_OACTIVE;
2205: }
2206:
2207: void
2208: tl_shutdown(xsc)
2209: void *xsc;
2210: {
2211: struct tl_softc *sc = xsc;
2212:
2213: tl_stop(sc);
2214: }
2215:
2216: struct cfattach tl_ca = {
2217: sizeof(struct tl_softc), tl_probe, tl_attach
2218: };
2219:
2220: struct cfdriver tl_cd = {
2221: 0, "tl", DV_IFNET
2222: };
CVSweb