Annotation of sys/dev/pci/if_vr.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: if_vr.c,v 1.67 2006/11/03 23:45:26 brad Exp $ */
2:
3: /*
4: * Copyright (c) 1997, 1998
5: * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: * 3. All advertising materials mentioning features or use of this software
16: * must display the following acknowledgement:
17: * This product includes software developed by Bill Paul.
18: * 4. Neither the name of the author nor the names of any co-contributors
19: * may be used to endorse or promote products derived from this software
20: * without specific prior written permission.
21: *
22: * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25: * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32: * THE POSSIBILITY OF SUCH DAMAGE.
33: *
34: * $FreeBSD: src/sys/pci/if_vr.c,v 1.73 2003/08/22 07:13:22 imp Exp $
35: */
36:
37: /*
38: * VIA Rhine fast ethernet PCI NIC driver
39: *
40: * Supports various network adapters based on the VIA Rhine
41: * and Rhine II PCI controllers, including the D-Link DFE530TX.
42: * Datasheets are available at http://www.via.com.tw.
43: *
44: * Written by Bill Paul <wpaul@ctr.columbia.edu>
45: * Electrical Engineering Department
46: * Columbia University, New York City
47: */
48:
49: /*
50: * The VIA Rhine controllers are similar in some respects to the
51: * the DEC tulip chips, except less complicated. The controller
52: * uses an MII bus and an external physical layer interface. The
53: * receiver has a one entry perfect filter and a 64-bit hash table
54: * multicast filter. Transmit and receive descriptors are similar
55: * to the tulip.
56: *
57: * The Rhine has a serious flaw in its transmit DMA mechanism:
58: * transmit buffers must be longword aligned. Unfortunately,
59: * FreeBSD doesn't guarantee that mbufs will be filled in starting
60: * at longword boundaries, so we have to do a buffer copy before
61: * transmission.
62: */
63:
64: #include "bpfilter.h"
65:
66: #include <sys/param.h>
67: #include <sys/systm.h>
68: #include <sys/sockio.h>
69: #include <sys/mbuf.h>
70: #include <sys/malloc.h>
71: #include <sys/kernel.h>
72: #include <sys/timeout.h>
73: #include <sys/socket.h>
74:
75: #include <net/if.h>
76: #include <sys/device.h>
77: #ifdef INET
78: #include <netinet/in.h>
79: #include <netinet/in_systm.h>
80: #include <netinet/in_var.h>
81: #include <netinet/ip.h>
82: #include <netinet/if_ether.h>
83: #endif /* INET */
84: #include <net/if_dl.h>
85: #include <net/if_media.h>
86:
87: #if NBPFILTER > 0
88: #include <net/bpf.h>
89: #endif
90:
91: #include <machine/bus.h>
92:
93: #include <dev/mii/mii.h>
94: #include <dev/mii/miivar.h>
95:
96: #include <dev/pci/pcireg.h>
97: #include <dev/pci/pcivar.h>
98: #include <dev/pci/pcidevs.h>
99:
100: #define VR_USEIOSPACE
101: #undef VR_USESWSHIFT
102:
103: #include <dev/pci/if_vrreg.h>
104:
105: int vr_probe(struct device *, void *, void *);
106: void vr_attach(struct device *, struct device *, void *);
107:
108: struct cfattach vr_ca = {
109: sizeof(struct vr_softc), vr_probe, vr_attach
110: };
111: struct cfdriver vr_cd = {
112: 0, "vr", DV_IFNET
113: };
114:
115: int vr_encap(struct vr_softc *, struct vr_chain *, struct mbuf *);
116: void vr_rxeof(struct vr_softc *);
117: void vr_rxeoc(struct vr_softc *);
118: void vr_txeof(struct vr_softc *);
119: void vr_tick(void *);
120: int vr_intr(void *);
121: void vr_start(struct ifnet *);
122: int vr_ioctl(struct ifnet *, u_long, caddr_t);
123: void vr_init(void *);
124: void vr_stop(struct vr_softc *);
125: void vr_watchdog(struct ifnet *);
126: void vr_shutdown(void *);
127: int vr_ifmedia_upd(struct ifnet *);
128: void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
129:
130: void vr_mii_sync(struct vr_softc *);
131: void vr_mii_send(struct vr_softc *, u_int32_t, int);
132: int vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *);
133: int vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *);
134: int vr_miibus_readreg(struct device *, int, int);
135: void vr_miibus_writereg(struct device *, int, int, int);
136: void vr_miibus_statchg(struct device *);
137:
138: void vr_setcfg(struct vr_softc *, int);
139: void vr_setmulti(struct vr_softc *);
140: void vr_reset(struct vr_softc *);
141: int vr_list_rx_init(struct vr_softc *);
142: int vr_list_tx_init(struct vr_softc *);
143:
144: const struct pci_matchid vr_devices[] = {
145: { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINE },
146: { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII },
147: { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII_2 },
148: { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105 },
149: { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105M },
150: { PCI_VENDOR_DELTA, PCI_PRODUCT_DELTA_RHINEII },
151: { PCI_VENDOR_ADDTRON, PCI_PRODUCT_ADDTRON_RHINEII }
152: };
153:
154: #define VR_SETBIT(sc, reg, x) \
155: CSR_WRITE_1(sc, reg, \
156: CSR_READ_1(sc, reg) | (x))
157:
158: #define VR_CLRBIT(sc, reg, x) \
159: CSR_WRITE_1(sc, reg, \
160: CSR_READ_1(sc, reg) & ~(x))
161:
162: #define VR_SETBIT16(sc, reg, x) \
163: CSR_WRITE_2(sc, reg, \
164: CSR_READ_2(sc, reg) | (x))
165:
166: #define VR_CLRBIT16(sc, reg, x) \
167: CSR_WRITE_2(sc, reg, \
168: CSR_READ_2(sc, reg) & ~(x))
169:
170: #define VR_SETBIT32(sc, reg, x) \
171: CSR_WRITE_4(sc, reg, \
172: CSR_READ_4(sc, reg) | (x))
173:
174: #define VR_CLRBIT32(sc, reg, x) \
175: CSR_WRITE_4(sc, reg, \
176: CSR_READ_4(sc, reg) & ~(x))
177:
178: #define SIO_SET(x) \
179: CSR_WRITE_1(sc, VR_MIICMD, \
180: CSR_READ_1(sc, VR_MIICMD) | (x))
181:
182: #define SIO_CLR(x) \
183: CSR_WRITE_1(sc, VR_MIICMD, \
184: CSR_READ_1(sc, VR_MIICMD) & ~(x))
185:
186: #ifdef VR_USESWSHIFT
187: /*
188: * Sync the PHYs by setting data bit and strobing the clock 32 times.
189: */
190: void
191: vr_mii_sync(struct vr_softc *sc)
192: {
193: int i;
194:
195: SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
196:
197: for (i = 0; i < 32; i++) {
198: SIO_SET(VR_MIICMD_CLK);
199: DELAY(1);
200: SIO_CLR(VR_MIICMD_CLK);
201: DELAY(1);
202: }
203: }
204:
205: /*
206: * Clock a series of bits through the MII.
207: */
208: void
209: vr_mii_send(struct vr_softc *sc, u_int32_t bits, int cnt)
210: {
211: int i;
212:
213: SIO_CLR(VR_MIICMD_CLK);
214:
215: for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
216: if (bits & i) {
217: SIO_SET(VR_MIICMD_DATAIN);
218: } else {
219: SIO_CLR(VR_MIICMD_DATAIN);
220: }
221: DELAY(1);
222: SIO_CLR(VR_MIICMD_CLK);
223: DELAY(1);
224: SIO_SET(VR_MIICMD_CLK);
225: }
226: }
227: #endif
228:
229: /*
230: * Read an PHY register through the MII.
231: */
232: int
233: vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame)
234: #ifdef VR_USESWSHIFT
235: {
236: int i, ack, s;
237:
238: s = splnet();
239:
240: /*
241: * Set up frame for RX.
242: */
243: frame->mii_stdelim = VR_MII_STARTDELIM;
244: frame->mii_opcode = VR_MII_READOP;
245: frame->mii_turnaround = 0;
246: frame->mii_data = 0;
247:
248: CSR_WRITE_1(sc, VR_MIICMD, 0);
249: VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
250:
251: /*
252: * Turn on data xmit.
253: */
254: SIO_SET(VR_MIICMD_DIR);
255:
256: vr_mii_sync(sc);
257:
258: /*
259: * Send command/address info.
260: */
261: vr_mii_send(sc, frame->mii_stdelim, 2);
262: vr_mii_send(sc, frame->mii_opcode, 2);
263: vr_mii_send(sc, frame->mii_phyaddr, 5);
264: vr_mii_send(sc, frame->mii_regaddr, 5);
265:
266: /* Idle bit */
267: SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
268: DELAY(1);
269: SIO_SET(VR_MIICMD_CLK);
270: DELAY(1);
271:
272: /* Turn off xmit. */
273: SIO_CLR(VR_MIICMD_DIR);
274:
275: /* Check for ack */
276: SIO_CLR(VR_MIICMD_CLK);
277: DELAY(1);
278: ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
279: SIO_SET(VR_MIICMD_CLK);
280: DELAY(1);
281:
282: /*
283: * Now try reading data bits. If the ack failed, we still
284: * need to clock through 16 cycles to keep the PHY(s) in sync.
285: */
286: if (ack) {
287: for(i = 0; i < 16; i++) {
288: SIO_CLR(VR_MIICMD_CLK);
289: DELAY(1);
290: SIO_SET(VR_MIICMD_CLK);
291: DELAY(1);
292: }
293: goto fail;
294: }
295:
296: for (i = 0x8000; i; i >>= 1) {
297: SIO_CLR(VR_MIICMD_CLK);
298: DELAY(1);
299: if (!ack) {
300: if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
301: frame->mii_data |= i;
302: DELAY(1);
303: }
304: SIO_SET(VR_MIICMD_CLK);
305: DELAY(1);
306: }
307:
308: fail:
309:
310: SIO_CLR(VR_MIICMD_CLK);
311: DELAY(1);
312: SIO_SET(VR_MIICMD_CLK);
313: DELAY(1);
314:
315: splx(s);
316:
317: if (ack)
318: return(1);
319: return(0);
320: }
321: #else
322: {
323: int s, i;
324:
325: s = splnet();
326:
327: /* Set the PHY-address */
328: CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
329: frame->mii_phyaddr);
330:
331: /* Set the register-address */
332: CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
333: VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
334:
335: for (i = 0; i < 10000; i++) {
336: if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
337: break;
338: DELAY(1);
339: }
340:
341: frame->mii_data = CSR_READ_2(sc, VR_MIIDATA);
342:
343: splx(s);
344:
345: return(0);
346: }
347: #endif
348:
349:
350: /*
351: * Write to a PHY register through the MII.
352: */
353: int
354: vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame)
355: #ifdef VR_USESWSHIFT
356: {
357: int s;
358:
359: s = splnet();
360:
361: CSR_WRITE_1(sc, VR_MIICMD, 0);
362: VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
363:
364: /*
365: * Set up frame for TX.
366: */
367:
368: frame->mii_stdelim = VR_MII_STARTDELIM;
369: frame->mii_opcode = VR_MII_WRITEOP;
370: frame->mii_turnaround = VR_MII_TURNAROUND;
371:
372: /*
373: * Turn on data output.
374: */
375: SIO_SET(VR_MIICMD_DIR);
376:
377: vr_mii_sync(sc);
378:
379: vr_mii_send(sc, frame->mii_stdelim, 2);
380: vr_mii_send(sc, frame->mii_opcode, 2);
381: vr_mii_send(sc, frame->mii_phyaddr, 5);
382: vr_mii_send(sc, frame->mii_regaddr, 5);
383: vr_mii_send(sc, frame->mii_turnaround, 2);
384: vr_mii_send(sc, frame->mii_data, 16);
385:
386: /* Idle bit. */
387: SIO_SET(VR_MIICMD_CLK);
388: DELAY(1);
389: SIO_CLR(VR_MIICMD_CLK);
390: DELAY(1);
391:
392: /*
393: * Turn off xmit.
394: */
395: SIO_CLR(VR_MIICMD_DIR);
396:
397: splx(s);
398:
399: return(0);
400: }
401: #else
402: {
403: int s, i;
404:
405: s = splnet();
406:
407: /* Set the PHY-address */
408: CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
409: frame->mii_phyaddr);
410:
411: /* Set the register-address and data to write */
412: CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
413: CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data);
414:
415: VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
416:
417: for (i = 0; i < 10000; i++) {
418: if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
419: break;
420: DELAY(1);
421: }
422:
423: splx(s);
424:
425: return(0);
426: }
427: #endif
428:
429: int
430: vr_miibus_readreg(struct device *dev, int phy, int reg)
431: {
432: struct vr_softc *sc = (struct vr_softc *)dev;
433: struct vr_mii_frame frame;
434:
435: switch (sc->vr_revid) {
436: case REV_ID_VT6102_APOLLO:
437: case REV_ID_VT6103:
438: if (phy != 1)
439: return 0;
440: default:
441: break;
442: }
443:
444: bzero((char *)&frame, sizeof(frame));
445:
446: frame.mii_phyaddr = phy;
447: frame.mii_regaddr = reg;
448: vr_mii_readreg(sc, &frame);
449:
450: return(frame.mii_data);
451: }
452:
453: void
454: vr_miibus_writereg(struct device *dev, int phy, int reg, int data)
455: {
456: struct vr_softc *sc = (struct vr_softc *)dev;
457: struct vr_mii_frame frame;
458:
459: switch (sc->vr_revid) {
460: case REV_ID_VT6102_APOLLO:
461: case REV_ID_VT6103:
462: if (phy != 1)
463: return;
464: default:
465: break;
466: }
467:
468: bzero((char *)&frame, sizeof(frame));
469:
470: frame.mii_phyaddr = phy;
471: frame.mii_regaddr = reg;
472: frame.mii_data = data;
473:
474: vr_mii_writereg(sc, &frame);
475: }
476:
477: void
478: vr_miibus_statchg(struct device *dev)
479: {
480: struct vr_softc *sc = (struct vr_softc *)dev;
481:
482: vr_setcfg(sc, sc->sc_mii.mii_media_active);
483: }
484:
485: /*
486: * Program the 64-bit multicast hash filter.
487: */
488: void
489: vr_setmulti(struct vr_softc *sc)
490: {
491: struct ifnet *ifp;
492: int h = 0;
493: u_int32_t hashes[2] = { 0, 0 };
494: struct arpcom *ac = &sc->arpcom;
495: struct ether_multi *enm;
496: struct ether_multistep step;
497: u_int8_t rxfilt;
498: int mcnt = 0;
499:
500: ifp = &sc->arpcom.ac_if;
501:
502: rxfilt = CSR_READ_1(sc, VR_RXCFG);
503:
504: if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
505: allmulti:
506: rxfilt |= VR_RXCFG_RX_MULTI;
507: CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
508: CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
509: CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
510: return;
511: }
512:
513: /* first, zot all the existing hash bits */
514: CSR_WRITE_4(sc, VR_MAR0, 0);
515: CSR_WRITE_4(sc, VR_MAR1, 0);
516:
517: /* now program new ones */
518: ETHER_FIRST_MULTI(step, ac, enm);
519: while (enm != NULL) {
520: if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
521: ifp->if_flags |= IFF_ALLMULTI;
522: goto allmulti;
523: }
524: h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
525: if (h < 32)
526: hashes[0] |= (1 << h);
527: else
528: hashes[1] |= (1 << (h - 32));
529: mcnt++;
530:
531: ETHER_NEXT_MULTI(step, enm);
532: }
533:
534: if (mcnt)
535: rxfilt |= VR_RXCFG_RX_MULTI;
536: else
537: rxfilt &= ~VR_RXCFG_RX_MULTI;
538:
539: CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
540: CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
541: CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
542: }
543:
544: /*
545: * In order to fiddle with the
546: * 'full-duplex' and '100Mbps' bits in the netconfig register, we
547: * first have to put the transmit and/or receive logic in the idle state.
548: */
549: void
550: vr_setcfg(struct vr_softc *sc, int media)
551: {
552: int restart = 0;
553:
554: if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
555: restart = 1;
556: VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
557: }
558:
559: if ((media & IFM_GMASK) == IFM_FDX)
560: VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
561: else
562: VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
563:
564: if (restart)
565: VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
566: }
567:
568: void
569: vr_reset(struct vr_softc *sc)
570: {
571: int i;
572:
573: VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
574:
575: for (i = 0; i < VR_TIMEOUT; i++) {
576: DELAY(10);
577: if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
578: break;
579: }
580: if (i == VR_TIMEOUT) {
581: if (sc->vr_revid < REV_ID_VT3065_A)
582: printf("%s: reset never completed!\n",
583: sc->sc_dev.dv_xname);
584: else {
585: #ifdef VR_DEBUG
586: /* Use newer force reset command */
587: printf("%s: Using force reset command.\n",
588: sc->sc_dev.dv_xname);
589: #endif
590: VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
591: }
592: }
593:
594: /* Wait a little while for the chip to get its brains in order. */
595: DELAY(1000);
596: }
597:
598: /*
599: * Probe for a VIA Rhine chip.
600: */
601: int
602: vr_probe(struct device *parent, void *match, void *aux)
603: {
604: return (pci_matchbyid((struct pci_attach_args *)aux, vr_devices,
605: sizeof(vr_devices)/sizeof(vr_devices[0])));
606: }
607:
608: /*
609: * Attach the interface. Allocate softc structures, do ifmedia
610: * setup and ethernet/BPF attach.
611: */
612: void
613: vr_attach(struct device *parent, struct device *self, void *aux)
614: {
615: int i;
616: pcireg_t command;
617: struct vr_softc *sc = (struct vr_softc *)self;
618: struct pci_attach_args *pa = aux;
619: pci_chipset_tag_t pc = pa->pa_pc;
620: pci_intr_handle_t ih;
621: const char *intrstr = NULL;
622: struct ifnet *ifp = &sc->arpcom.ac_if;
623: bus_size_t size;
624: int rseg;
625: caddr_t kva;
626:
627: /*
628: * Handle power management nonsense.
629: */
630: command = pci_conf_read(pa->pa_pc, pa->pa_tag,
631: VR_PCI_CAPID) & 0x000000ff;
632: if (command == 0x01) {
633: command = pci_conf_read(pa->pa_pc, pa->pa_tag,
634: VR_PCI_PWRMGMTCTRL);
635: if (command & VR_PSTATE_MASK) {
636: pcireg_t iobase, membase, irq;
637:
638: /* Save important PCI config data. */
639: iobase = pci_conf_read(pa->pa_pc, pa->pa_tag,
640: VR_PCI_LOIO);
641: membase = pci_conf_read(pa->pa_pc, pa->pa_tag,
642: VR_PCI_LOMEM);
643: irq = pci_conf_read(pa->pa_pc, pa->pa_tag,
644: VR_PCI_INTLINE);
645:
646: /* Reset the power state. */
647: command &= 0xFFFFFFFC;
648: pci_conf_write(pa->pa_pc, pa->pa_tag,
649: VR_PCI_PWRMGMTCTRL, command);
650:
651: /* Restore PCI config data. */
652: pci_conf_write(pa->pa_pc, pa->pa_tag,
653: VR_PCI_LOIO, iobase);
654: pci_conf_write(pa->pa_pc, pa->pa_tag,
655: VR_PCI_LOMEM, membase);
656: pci_conf_write(pa->pa_pc, pa->pa_tag,
657: VR_PCI_INTLINE, irq);
658: }
659: }
660:
661: /*
662: * Map control/status registers.
663: */
664:
665: #ifdef VR_USEIOSPACE
666: if (pci_mapreg_map(pa, VR_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
667: &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) {
668: printf(": failed to map i/o space\n");
669: return;
670: }
671: #else
672: if (pci_mapreg_map(pa, VR_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
673: &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) {
674: printf(": failed to map memory space\n");
675: return;
676: }
677: #endif
678:
679: /* Allocate interrupt */
680: if (pci_intr_map(pa, &ih)) {
681: printf(": couldn't map interrupt\n");
682: goto fail_1;
683: }
684: intrstr = pci_intr_string(pc, ih);
685: sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, vr_intr, sc,
686: self->dv_xname);
687: if (sc->sc_ih == NULL) {
688: printf(": could not establish interrupt");
689: if (intrstr != NULL)
690: printf(" at %s", intrstr);
691: printf("\n");
692: goto fail_1;
693: }
694: printf(": %s", intrstr);
695:
696: sc->vr_revid = PCI_REVISION(pa->pa_class);
697:
698: /*
699: * Windows may put the chip in suspend mode when it
700: * shuts down. Be sure to kick it in the head to wake it
701: * up again.
702: */
703: VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
704:
705: /* Reset the adapter. */
706: vr_reset(sc);
707:
708: /*
709: * Turn on bit2 (MIION) in PCI configuration register 0x53 during
710: * initialization and disable AUTOPOLL.
711: */
712: pci_conf_write(pa->pa_pc, pa->pa_tag, VR_PCI_MODE,
713: pci_conf_read(pa->pa_pc, pa->pa_tag, VR_PCI_MODE) |
714: (VR_MODE3_MIION << 24));
715: VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
716:
717: /*
718: * Get station address. The way the Rhine chips work,
719: * you're not allowed to directly access the EEPROM once
720: * they've been programmed a special way. Consequently,
721: * we need to read the node address from the PAR0 and PAR1
722: * registers.
723: */
724: VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
725: DELAY(1000);
726: for (i = 0; i < ETHER_ADDR_LEN; i++)
727: sc->arpcom.ac_enaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
728:
729: /*
730: * A Rhine chip was detected. Inform the world.
731: */
732: printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
733:
734: sc->sc_dmat = pa->pa_dmat;
735: if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vr_list_data),
736: PAGE_SIZE, 0, &sc->sc_listseg, 1, &rseg, BUS_DMA_NOWAIT)) {
737: printf(": can't alloc list\n");
738: goto fail_2;
739: }
740: if (bus_dmamem_map(sc->sc_dmat, &sc->sc_listseg, rseg,
741: sizeof(struct vr_list_data), &kva, BUS_DMA_NOWAIT)) {
742: printf(": can't map dma buffers (%d bytes)\n",
743: sizeof(struct vr_list_data));
744: goto fail_3;
745: }
746: if (bus_dmamap_create(sc->sc_dmat, sizeof(struct vr_list_data), 1,
747: sizeof(struct vr_list_data), 0, BUS_DMA_NOWAIT, &sc->sc_listmap)) {
748: printf(": can't create dma map\n");
749: goto fail_4;
750: }
751: if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, kva,
752: sizeof(struct vr_list_data), NULL, BUS_DMA_NOWAIT)) {
753: printf(": can't load dma map\n");
754: goto fail_5;
755: }
756: sc->vr_ldata = (struct vr_list_data *)kva;
757: bzero(sc->vr_ldata, sizeof(struct vr_list_data));
758:
759: ifp = &sc->arpcom.ac_if;
760: ifp->if_softc = sc;
761: ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
762: ifp->if_ioctl = vr_ioctl;
763: ifp->if_start = vr_start;
764: ifp->if_watchdog = vr_watchdog;
765: ifp->if_baudrate = 10000000;
766: IFQ_SET_READY(&ifp->if_snd);
767: bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
768:
769: /*
770: * Do MII setup.
771: */
772: sc->sc_mii.mii_ifp = ifp;
773: sc->sc_mii.mii_readreg = vr_miibus_readreg;
774: sc->sc_mii.mii_writereg = vr_miibus_writereg;
775: sc->sc_mii.mii_statchg = vr_miibus_statchg;
776: ifmedia_init(&sc->sc_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts);
777: mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
778: 0);
779: if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
780: ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
781: ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
782: } else
783: ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
784: timeout_set(&sc->sc_to, vr_tick, sc);
785:
786: /*
787: * Call MI attach routines.
788: */
789: if_attach(ifp);
790: ether_ifattach(ifp);
791:
792: shutdownhook_establish(vr_shutdown, sc);
793: return;
794:
795: fail_5:
796: bus_dmamap_destroy(sc->sc_dmat, sc->sc_listmap);
797:
798: fail_4:
799: bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct vr_list_data));
800:
801: fail_3:
802: bus_dmamem_free(sc->sc_dmat, &sc->sc_listseg, rseg);
803:
804: fail_2:
805: pci_intr_disestablish(pc, sc->sc_ih);
806:
807: fail_1:
808: bus_space_unmap(sc->vr_btag, sc->vr_bhandle, size);
809: }
810:
811: /*
812: * Initialize the transmit descriptors.
813: */
814: int
815: vr_list_tx_init(struct vr_softc *sc)
816: {
817: struct vr_chain_data *cd;
818: struct vr_list_data *ld;
819: int i;
820:
821: cd = &sc->vr_cdata;
822: ld = sc->vr_ldata;
823: for (i = 0; i < VR_TX_LIST_CNT; i++) {
824: cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
825: cd->vr_tx_chain[i].vr_paddr =
826: sc->sc_listmap->dm_segs[0].ds_addr +
827: offsetof(struct vr_list_data, vr_tx_list[i]);
828:
829: if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
830: MCLBYTES, 0, BUS_DMA_NOWAIT, &cd->vr_tx_chain[i].vr_map))
831: return (ENOBUFS);
832:
833: if (i == (VR_TX_LIST_CNT - 1))
834: cd->vr_tx_chain[i].vr_nextdesc =
835: &cd->vr_tx_chain[0];
836: else
837: cd->vr_tx_chain[i].vr_nextdesc =
838: &cd->vr_tx_chain[i + 1];
839: }
840:
841: cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0];
842:
843: return (0);
844: }
845:
846:
847: /*
848: * Initialize the RX descriptors and allocate mbufs for them. Note that
849: * we arrange the descriptors in a closed ring, so that the last descriptor
850: * points back to the first.
851: */
852: int
853: vr_list_rx_init(struct vr_softc *sc)
854: {
855: struct vr_chain_data *cd;
856: struct vr_list_data *ld;
857: int i;
858: struct vr_desc *d;
859:
860: cd = &sc->vr_cdata;
861: ld = sc->vr_ldata;
862:
863: for (i = 0; i < VR_RX_LIST_CNT; i++) {
864: d = (struct vr_desc *)&ld->vr_rx_list[i];
865: cd->vr_rx_chain[i].vr_ptr = d;
866: cd->vr_rx_chain[i].vr_paddr =
867: sc->sc_listmap->dm_segs[0].ds_addr +
868: offsetof(struct vr_list_data, vr_rx_list[i]);
869: cd->vr_rx_chain[i].vr_buf =
870: (u_int8_t *)malloc(MCLBYTES, M_DEVBUF, M_NOWAIT);
871: if (cd->vr_rx_chain[i].vr_buf == NULL)
872: return (ENOBUFS);
873:
874: if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
875: 0, BUS_DMA_NOWAIT | BUS_DMA_READ,
876: &cd->vr_rx_chain[i].vr_map))
877: return (ENOBUFS);
878:
879: if (bus_dmamap_load(sc->sc_dmat, cd->vr_rx_chain[i].vr_map,
880: cd->vr_rx_chain[i].vr_buf, MCLBYTES, NULL, BUS_DMA_NOWAIT))
881: return (ENOBUFS);
882: bus_dmamap_sync(sc->sc_dmat, cd->vr_rx_chain[i].vr_map,
883: 0, cd->vr_rx_chain[i].vr_map->dm_mapsize,
884: BUS_DMASYNC_PREREAD);
885:
886: d->vr_status = htole32(VR_RXSTAT);
887: d->vr_data =
888: htole32(cd->vr_rx_chain[i].vr_map->dm_segs[0].ds_addr +
889: sizeof(u_int64_t));
890: d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN);
891:
892: if (i == (VR_RX_LIST_CNT - 1)) {
893: cd->vr_rx_chain[i].vr_nextdesc =
894: &cd->vr_rx_chain[0];
895: ld->vr_rx_list[i].vr_next =
896: htole32(sc->sc_listmap->dm_segs[0].ds_addr +
897: offsetof(struct vr_list_data, vr_rx_list[0]));
898: } else {
899: cd->vr_rx_chain[i].vr_nextdesc =
900: &cd->vr_rx_chain[i + 1];
901: ld->vr_rx_list[i].vr_next =
902: htole32(sc->sc_listmap->dm_segs[0].ds_addr +
903: offsetof(struct vr_list_data, vr_rx_list[i + 1]));
904: }
905: }
906:
907: cd->vr_rx_head = &cd->vr_rx_chain[0];
908:
909: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0,
910: sc->sc_listmap->dm_mapsize,
911: BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
912:
913: return(0);
914: }
915:
916: /*
917: * A frame has been uploaded: pass the resulting mbuf chain up to
918: * the higher level protocols.
919: */
920: void
921: vr_rxeof(struct vr_softc *sc)
922: {
923: struct mbuf *m0;
924: struct ifnet *ifp;
925: struct vr_chain_onefrag *cur_rx;
926: int total_len = 0;
927: u_int32_t rxstat;
928:
929: ifp = &sc->arpcom.ac_if;
930:
931: for (;;) {
932:
933: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
934: 0, sc->sc_listmap->dm_mapsize,
935: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
936: rxstat = letoh32(sc->vr_cdata.vr_rx_head->vr_ptr->vr_status);
937: if (rxstat & VR_RXSTAT_OWN)
938: break;
939:
940: m0 = NULL;
941: cur_rx = sc->vr_cdata.vr_rx_head;
942: sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
943:
944: /*
945: * If an error occurs, update stats, clear the
946: * status word and leave the mbuf cluster in place:
947: * it should simply get re-used next time this descriptor
948: * comes up in the ring.
949: */
950: if (rxstat & VR_RXSTAT_RXERR) {
951: ifp->if_ierrors++;
952: #ifdef VR_DEBUG
953: printf("%s: rx error (%02x):",
954: sc->sc_dev.dv_xname, rxstat & 0x000000ff);
955: if (rxstat & VR_RXSTAT_CRCERR)
956: printf(" crc error");
957: if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
958: printf(" frame alignment error");
959: if (rxstat & VR_RXSTAT_FIFOOFLOW)
960: printf(" FIFO overflow");
961: if (rxstat & VR_RXSTAT_GIANT)
962: printf(" received giant packet");
963: if (rxstat & VR_RXSTAT_RUNT)
964: printf(" received runt packet");
965: if (rxstat & VR_RXSTAT_BUSERR)
966: printf(" system bus error");
967: if (rxstat & VR_RXSTAT_BUFFERR)
968: printf(" rx buffer error");
969: printf("\n");
970: #endif
971:
972: /* Reinitialize descriptor */
973: cur_rx->vr_ptr->vr_status = htole32(VR_RXSTAT);
974: cur_rx->vr_ptr->vr_data =
975: htole32(cur_rx->vr_map->dm_segs[0].ds_addr +
976: sizeof(u_int64_t));
977: cur_rx->vr_ptr->vr_ctl = htole32(VR_RXCTL | VR_RXLEN);
978: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
979: 0, sc->sc_listmap->dm_mapsize,
980: BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
981: continue;
982: }
983:
984: /* No errors; receive the packet. */
985: total_len = VR_RXBYTES(letoh32(cur_rx->vr_ptr->vr_status));
986:
987: /*
988: * XXX The VIA Rhine chip includes the CRC with every
989: * received frame, and there's no way to turn this
990: * behavior off (at least, I can't find anything in
991: * the manual that explains how to do it) so we have
992: * to trim off the CRC manually.
993: */
994: total_len -= ETHER_CRC_LEN;
995:
996: bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0,
997: cur_rx->vr_map->dm_mapsize,
998: BUS_DMASYNC_POSTREAD);
999: m0 = m_devget(cur_rx->vr_buf + sizeof(u_int64_t) - ETHER_ALIGN,
1000: total_len + ETHER_ALIGN, 0, ifp, NULL);
1001: bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0,
1002: cur_rx->vr_map->dm_mapsize,
1003: BUS_DMASYNC_PREREAD);
1004:
1005: /* Reinitialize descriptor */
1006: cur_rx->vr_ptr->vr_status = htole32(VR_RXSTAT);
1007: cur_rx->vr_ptr->vr_data =
1008: htole32(cur_rx->vr_map->dm_segs[0].ds_addr +
1009: sizeof(u_int64_t));
1010: cur_rx->vr_ptr->vr_ctl = htole32(VR_RXCTL | VR_RXLEN);
1011: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0,
1012: sc->sc_listmap->dm_mapsize,
1013: BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1014:
1015: if (m0 == NULL) {
1016: ifp->if_ierrors++;
1017: continue;
1018: }
1019: m_adj(m0, ETHER_ALIGN);
1020:
1021: ifp->if_ipackets++;
1022:
1023: #if NBPFILTER > 0
1024: /*
1025: * Handle BPF listeners. Let the BPF user see the packet.
1026: */
1027: if (ifp->if_bpf)
1028: bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_IN);
1029: #endif
1030: /* pass it on. */
1031: ether_input_mbuf(ifp, m0);
1032: }
1033:
1034: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1035: 0, sc->sc_listmap->dm_mapsize,
1036: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1037: }
1038:
1039: void
1040: vr_rxeoc(struct vr_softc *sc)
1041: {
1042: struct ifnet *ifp;
1043: int i;
1044:
1045: ifp = &sc->arpcom.ac_if;
1046:
1047: ifp->if_ierrors++;
1048:
1049: VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1050: DELAY(10000);
1051:
1052: for (i = 0x400;
1053: i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON);
1054: i--)
1055: ; /* Wait for receiver to stop */
1056:
1057: if (!i) {
1058: printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname);
1059: sc->vr_flags |= VR_F_RESTART;
1060: return;
1061: }
1062:
1063: vr_rxeof(sc);
1064:
1065: CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_head->vr_paddr);
1066: VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1067: VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1068: }
1069:
1070: /*
1071: * A frame was downloaded to the chip. It's safe for us to clean up
1072: * the list buffers.
1073: */
1074:
1075: void
1076: vr_txeof(struct vr_softc *sc)
1077: {
1078: struct vr_chain *cur_tx;
1079: struct ifnet *ifp;
1080:
1081: ifp = &sc->arpcom.ac_if;
1082:
1083: /*
1084: * Go through our tx list and free mbufs for those
1085: * frames that have been transmitted.
1086: */
1087: cur_tx = sc->vr_cdata.vr_tx_cons;
1088: while(cur_tx->vr_mbuf != NULL) {
1089: u_int32_t txstat;
1090: int i;
1091:
1092: txstat = letoh32(cur_tx->vr_ptr->vr_status);
1093:
1094: if ((txstat & VR_TXSTAT_ABRT) ||
1095: (txstat & VR_TXSTAT_UDF)) {
1096: for (i = 0x400;
1097: i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON);
1098: i--)
1099: ; /* Wait for chip to shutdown */
1100: if (!i) {
1101: printf("%s: tx shutdown timeout\n",
1102: sc->sc_dev.dv_xname);
1103: sc->vr_flags |= VR_F_RESTART;
1104: break;
1105: }
1106: VR_TXOWN(cur_tx) = htole32(VR_TXSTAT_OWN);
1107: CSR_WRITE_4(sc, VR_TXADDR, cur_tx->vr_paddr);
1108: break;
1109: }
1110:
1111: if (txstat & VR_TXSTAT_OWN)
1112: break;
1113:
1114: if (txstat & VR_TXSTAT_ERRSUM) {
1115: ifp->if_oerrors++;
1116: if (txstat & VR_TXSTAT_DEFER)
1117: ifp->if_collisions++;
1118: if (txstat & VR_TXSTAT_LATECOLL)
1119: ifp->if_collisions++;
1120: }
1121:
1122: ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1123:
1124: ifp->if_opackets++;
1125: if (cur_tx->vr_map != NULL && cur_tx->vr_map->dm_nsegs > 0)
1126: bus_dmamap_unload(sc->sc_dmat, cur_tx->vr_map);
1127:
1128: m_freem(cur_tx->vr_mbuf);
1129: cur_tx->vr_mbuf = NULL;
1130: ifp->if_flags &= ~IFF_OACTIVE;
1131:
1132: cur_tx = cur_tx->vr_nextdesc;
1133: }
1134:
1135: sc->vr_cdata.vr_tx_cons = cur_tx;
1136: if (cur_tx->vr_mbuf == NULL)
1137: ifp->if_timer = 0;
1138: }
1139:
1140: void
1141: vr_tick(void *xsc)
1142: {
1143: struct vr_softc *sc = xsc;
1144: int s;
1145:
1146: s = splnet();
1147: if (sc->vr_flags & VR_F_RESTART) {
1148: printf("%s: restarting\n", sc->sc_dev.dv_xname);
1149: vr_stop(sc);
1150: vr_reset(sc);
1151: vr_init(sc);
1152: sc->vr_flags &= ~VR_F_RESTART;
1153: }
1154:
1155: mii_tick(&sc->sc_mii);
1156: timeout_add(&sc->sc_to, hz);
1157: splx(s);
1158: }
1159:
1160: int
1161: vr_intr(void *arg)
1162: {
1163: struct vr_softc *sc;
1164: struct ifnet *ifp;
1165: u_int16_t status;
1166: int claimed = 0;
1167:
1168: sc = arg;
1169: ifp = &sc->arpcom.ac_if;
1170:
1171: /* Supress unwanted interrupts. */
1172: if (!(ifp->if_flags & IFF_UP)) {
1173: vr_stop(sc);
1174: return 0;
1175: }
1176:
1177: /* Disable interrupts. */
1178: CSR_WRITE_2(sc, VR_IMR, 0x0000);
1179:
1180: for (;;) {
1181:
1182: status = CSR_READ_2(sc, VR_ISR);
1183: if (status)
1184: CSR_WRITE_2(sc, VR_ISR, status);
1185:
1186: if ((status & VR_INTRS) == 0)
1187: break;
1188:
1189: claimed = 1;
1190:
1191: if (status & VR_ISR_RX_OK)
1192: vr_rxeof(sc);
1193:
1194: if (status & VR_ISR_RX_DROPPED) {
1195: #ifdef VR_DEBUG
1196: printf("%s: rx packet lost\n", sc->sc_dev.dv_xname);
1197: #endif
1198: ifp->if_ierrors++;
1199: }
1200:
1201: if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1202: (status & VR_ISR_RX_OFLOW)) {
1203: #ifdef VR_DEBUG
1204: printf("%s: receive error (%04x)",
1205: sc->sc_dev.dv_xname, status);
1206: if (status & VR_ISR_RX_NOBUF)
1207: printf(" no buffers");
1208: if (status & VR_ISR_RX_OFLOW)
1209: printf(" overflow");
1210: printf("\n");
1211: #endif
1212: vr_rxeoc(sc);
1213: }
1214:
1215: if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) {
1216: #ifdef VR_DEBUG
1217: if (status & VR_ISR_BUSERR)
1218: printf("%s: PCI bus error\n",
1219: sc->sc_dev.dv_xname);
1220: if (status & VR_ISR_TX_UNDERRUN)
1221: printf("%s: transmit underrun\n",
1222: sc->sc_dev.dv_xname);
1223: #endif
1224: vr_reset(sc);
1225: vr_init(sc);
1226: break;
1227: }
1228:
1229: if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) ||
1230: (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) {
1231: vr_txeof(sc);
1232: if ((status & VR_ISR_UDFI) ||
1233: (status & VR_ISR_TX_ABRT2) ||
1234: (status & VR_ISR_TX_ABRT)) {
1235: #ifdef VR_DEBUG
1236: if (status & (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2))
1237: printf("%s: transmit aborted\n",
1238: sc->sc_dev.dv_xname);
1239: if (status & VR_ISR_UDFI)
1240: printf("%s: transmit underflow\n",
1241: sc->sc_dev.dv_xname);
1242: #endif
1243: ifp->if_oerrors++;
1244: if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) {
1245: VR_SETBIT16(sc, VR_COMMAND,
1246: VR_CMD_TX_ON);
1247: VR_SETBIT16(sc, VR_COMMAND,
1248: VR_CMD_TX_GO);
1249: }
1250: }
1251: }
1252: }
1253:
1254: /* Re-enable interrupts. */
1255: CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1256:
1257: if (!IFQ_IS_EMPTY(&ifp->if_snd))
1258: vr_start(ifp);
1259:
1260: return (claimed);
1261: }
1262:
1263: /*
1264: * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1265: * pointers to the fragment pointers.
1266: */
1267: int
1268: vr_encap(struct vr_softc *sc, struct vr_chain *c, struct mbuf *m_head)
1269: {
1270: struct vr_desc *f = NULL;
1271: struct mbuf *m_new = NULL;
1272:
1273: MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1274: if (m_new == NULL)
1275: return (1);
1276: if (m_head->m_pkthdr.len > MHLEN) {
1277: MCLGET(m_new, M_DONTWAIT);
1278: if (!(m_new->m_flags & M_EXT)) {
1279: m_freem(m_new);
1280: return (1);
1281: }
1282: }
1283: m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t));
1284: m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1285:
1286: /*
1287: * The Rhine chip doesn't auto-pad, so we have to make
1288: * sure to pad short frames out to the minimum frame length
1289: * ourselves.
1290: */
1291: if (m_new->m_len < VR_MIN_FRAMELEN) {
1292: /* data field should be padded with octets of zero */
1293: bzero(&m_new->m_data[m_new->m_len],
1294: VR_MIN_FRAMELEN-m_new->m_len);
1295: m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1296: m_new->m_len = m_new->m_pkthdr.len;
1297: }
1298:
1299: if (bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m_new,
1300: BUS_DMA_NOWAIT | BUS_DMA_WRITE)) {
1301: m_freem(m_new);
1302: return (1);
1303: }
1304: bus_dmamap_sync(sc->sc_dmat, c->vr_map, 0, c->vr_map->dm_mapsize,
1305: BUS_DMASYNC_PREWRITE);
1306:
1307: m_freem(m_head);
1308:
1309: c->vr_mbuf = m_new;
1310:
1311: f = c->vr_ptr;
1312: f->vr_data = htole32(c->vr_map->dm_segs[0].ds_addr);
1313: f->vr_ctl = htole32(c->vr_map->dm_mapsize);
1314: f->vr_ctl |= htole32(VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG);
1315: f->vr_status = htole32(0);
1316:
1317: f->vr_ctl |= htole32(VR_TXCTL_LASTFRAG|VR_TXCTL_FINT);
1318: f->vr_next = htole32(c->vr_nextdesc->vr_paddr);
1319:
1320: return (0);
1321: }
1322:
1323: /*
1324: * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1325: * to the mbuf data regions directly in the transmit lists. We also save a
1326: * copy of the pointers since the transmit list fragment pointers are
1327: * physical addresses.
1328: */
1329:
1330: void
1331: vr_start(struct ifnet *ifp)
1332: {
1333: struct vr_softc *sc;
1334: struct mbuf *m_head;
1335: struct vr_chain *cur_tx;
1336:
1337: if (ifp->if_flags & IFF_OACTIVE)
1338: return;
1339:
1340: sc = ifp->if_softc;
1341:
1342: cur_tx = sc->vr_cdata.vr_tx_prod;
1343: while (cur_tx->vr_mbuf == NULL) {
1344: IFQ_DEQUEUE(&ifp->if_snd, m_head);
1345: if (m_head == NULL)
1346: break;
1347:
1348: /* Pack the data into the descriptor. */
1349: if (vr_encap(sc, cur_tx, m_head)) {
1350: /* Rollback, send what we were able to encap. */
1351: if (ALTQ_IS_ENABLED(&ifp->if_snd))
1352: m_freem(m_head);
1353: else
1354: IF_PREPEND(&ifp->if_snd, m_head);
1355: break;
1356: }
1357:
1358: VR_TXOWN(cur_tx) = htole32(VR_TXSTAT_OWN);
1359:
1360: #if NBPFILTER > 0
1361: /*
1362: * If there's a BPF listener, bounce a copy of this frame
1363: * to him.
1364: */
1365: if (ifp->if_bpf)
1366: bpf_mtap(ifp->if_bpf, cur_tx->vr_mbuf,
1367: BPF_DIRECTION_OUT);
1368: #endif
1369: cur_tx = cur_tx->vr_nextdesc;
1370: }
1371: if (cur_tx != sc->vr_cdata.vr_tx_prod || cur_tx->vr_mbuf != NULL) {
1372: sc->vr_cdata.vr_tx_prod = cur_tx;
1373:
1374: bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0,
1375: sc->sc_listmap->dm_mapsize,
1376: BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1377:
1378: /* Tell the chip to start transmitting. */
1379: VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO);
1380:
1381: /* Set a timeout in case the chip goes out to lunch. */
1382: ifp->if_timer = 5;
1383:
1384: if (cur_tx->vr_mbuf != NULL)
1385: ifp->if_flags |= IFF_OACTIVE;
1386: }
1387: }
1388:
1389: void
1390: vr_init(void *xsc)
1391: {
1392: struct vr_softc *sc = xsc;
1393: struct ifnet *ifp = &sc->arpcom.ac_if;
1394: struct mii_data *mii = &sc->sc_mii;
1395: int s, i;
1396:
1397: s = splnet();
1398:
1399: /*
1400: * Cancel pending I/O and free all RX/TX buffers.
1401: */
1402: vr_stop(sc);
1403: vr_reset(sc);
1404:
1405: /*
1406: * Set our station address.
1407: */
1408: for (i = 0; i < ETHER_ADDR_LEN; i++)
1409: CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1410:
1411: /* Set DMA size */
1412: VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
1413: VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
1414:
1415: /*
1416: * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
1417: * so we must set both.
1418: */
1419: VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
1420: VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
1421:
1422: VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
1423: VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD);
1424:
1425: VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1426: VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
1427:
1428: VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1429: VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1430:
1431: /* Init circular RX list. */
1432: if (vr_list_rx_init(sc) == ENOBUFS) {
1433: printf("%s: initialization failed: no memory for rx buffers\n",
1434: sc->sc_dev.dv_xname);
1435: vr_stop(sc);
1436: splx(s);
1437: return;
1438: }
1439:
1440: /*
1441: * Init tx descriptors.
1442: */
1443: if (vr_list_tx_init(sc) == ENOBUFS) {
1444: printf("%s: initialization failed: no memory for tx buffers\n",
1445: sc->sc_dev.dv_xname);
1446: vr_stop(sc);
1447: splx(s);
1448: return;
1449: }
1450:
1451: /* If we want promiscuous mode, set the allframes bit. */
1452: if (ifp->if_flags & IFF_PROMISC)
1453: VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1454: else
1455: VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1456:
1457: /* Set capture broadcast bit to capture broadcast frames. */
1458: if (ifp->if_flags & IFF_BROADCAST)
1459: VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1460: else
1461: VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1462:
1463: /*
1464: * Program the multicast filter, if necessary.
1465: */
1466: vr_setmulti(sc);
1467:
1468: /*
1469: * Load the address of the RX list.
1470: */
1471: CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_head->vr_paddr);
1472:
1473: /* Enable receiver and transmitter. */
1474: CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1475: VR_CMD_TX_ON|VR_CMD_RX_ON|
1476: VR_CMD_RX_GO);
1477:
1478: CSR_WRITE_4(sc, VR_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
1479: offsetof(struct vr_list_data, vr_tx_list[0]));
1480:
1481: /*
1482: * Enable interrupts.
1483: */
1484: CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1485: CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1486:
1487: /* Restore state of BMCR */
1488: mii_mediachg(mii);
1489:
1490: ifp->if_flags |= IFF_RUNNING;
1491: ifp->if_flags &= ~IFF_OACTIVE;
1492:
1493: if (!timeout_pending(&sc->sc_to))
1494: timeout_add(&sc->sc_to, hz);
1495:
1496: splx(s);
1497: }
1498:
1499: /*
1500: * Set media options.
1501: */
1502: int
1503: vr_ifmedia_upd(struct ifnet *ifp)
1504: {
1505: struct vr_softc *sc = ifp->if_softc;
1506:
1507: if (ifp->if_flags & IFF_UP)
1508: vr_init(sc);
1509:
1510: return (0);
1511: }
1512:
1513: /*
1514: * Report current media status.
1515: */
1516: void
1517: vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1518: {
1519: struct vr_softc *sc = ifp->if_softc;
1520: struct mii_data *mii = &sc->sc_mii;
1521:
1522: mii_pollstat(mii);
1523: ifmr->ifm_active = mii->mii_media_active;
1524: ifmr->ifm_status = mii->mii_media_status;
1525: }
1526:
1527: int
1528: vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1529: {
1530: struct vr_softc *sc = ifp->if_softc;
1531: struct ifreq *ifr = (struct ifreq *) data;
1532: int s, error = 0;
1533: struct ifaddr *ifa = (struct ifaddr *)data;
1534:
1535: s = splnet();
1536:
1537: if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
1538: splx(s);
1539: return error;
1540: }
1541:
1542: switch(command) {
1543: case SIOCSIFADDR:
1544: ifp->if_flags |= IFF_UP;
1545: if (!(ifp->if_flags & IFF_RUNNING))
1546: vr_init(sc);
1547: #ifdef INET
1548: if (ifa->ifa_addr->sa_family == AF_INET)
1549: arp_ifinit(&sc->arpcom, ifa);
1550: #endif
1551: break;
1552: case SIOCSIFFLAGS:
1553: if (ifp->if_flags & IFF_UP) {
1554: if (ifp->if_flags & IFF_RUNNING &&
1555: ifp->if_flags & IFF_PROMISC &&
1556: !(sc->sc_if_flags & IFF_PROMISC)) {
1557: VR_SETBIT(sc, VR_RXCFG,
1558: VR_RXCFG_RX_PROMISC);
1559: vr_setmulti(sc);
1560: } else if (ifp->if_flags & IFF_RUNNING &&
1561: !(ifp->if_flags & IFF_PROMISC) &&
1562: sc->sc_if_flags & IFF_PROMISC) {
1563: VR_CLRBIT(sc, VR_RXCFG,
1564: VR_RXCFG_RX_PROMISC);
1565: vr_setmulti(sc);
1566: } else if (ifp->if_flags & IFF_RUNNING &&
1567: (ifp->if_flags ^ sc->sc_if_flags) & IFF_ALLMULTI) {
1568: vr_setmulti(sc);
1569: } else {
1570: if (!(ifp->if_flags & IFF_RUNNING))
1571: vr_init(sc);
1572: }
1573: } else {
1574: if (ifp->if_flags & IFF_RUNNING)
1575: vr_stop(sc);
1576: }
1577: sc->sc_if_flags = ifp->if_flags;
1578: break;
1579: case SIOCADDMULTI:
1580: case SIOCDELMULTI:
1581: error = (command == SIOCADDMULTI) ?
1582: ether_addmulti(ifr, &sc->arpcom) :
1583: ether_delmulti(ifr, &sc->arpcom);
1584:
1585: if (error == ENETRESET) {
1586: /*
1587: * Multicast list has changed; set the hardware
1588: * filter accordingly.
1589: */
1590: if (ifp->if_flags & IFF_RUNNING)
1591: vr_setmulti(sc);
1592: error = 0;
1593: }
1594: break;
1595: case SIOCGIFMEDIA:
1596: case SIOCSIFMEDIA:
1597: error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1598: break;
1599: default:
1600: error = ENOTTY;
1601: break;
1602: }
1603:
1604: splx(s);
1605:
1606: return(error);
1607: }
1608:
1609: void
1610: vr_watchdog(struct ifnet *ifp)
1611: {
1612: struct vr_softc *sc;
1613:
1614: sc = ifp->if_softc;
1615:
1616: ifp->if_oerrors++;
1617: printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1618:
1619: vr_stop(sc);
1620: vr_reset(sc);
1621: vr_init(sc);
1622:
1623: if (!IFQ_IS_EMPTY(&ifp->if_snd))
1624: vr_start(ifp);
1625: }
1626:
1627: /*
1628: * Stop the adapter and free any mbufs allocated to the
1629: * RX and TX lists.
1630: */
1631: void
1632: vr_stop(struct vr_softc *sc)
1633: {
1634: int i;
1635: struct ifnet *ifp;
1636: bus_dmamap_t map;
1637:
1638: ifp = &sc->arpcom.ac_if;
1639: ifp->if_timer = 0;
1640:
1641: if (timeout_pending(&sc->sc_to))
1642: timeout_del(&sc->sc_to);
1643:
1644: ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1645:
1646: VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1647: VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1648: CSR_WRITE_2(sc, VR_IMR, 0x0000);
1649: CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1650: CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1651:
1652: /*
1653: * Free data in the RX lists.
1654: */
1655: for (i = 0; i < VR_RX_LIST_CNT; i++) {
1656:
1657: if (sc->vr_cdata.vr_rx_chain[i].vr_buf != NULL) {
1658: free(sc->vr_cdata.vr_rx_chain[i].vr_buf, M_DEVBUF);
1659: sc->vr_cdata.vr_rx_chain[i].vr_buf = NULL;
1660: }
1661:
1662: map = sc->vr_cdata.vr_rx_chain[i].vr_map;
1663: if (map != NULL) {
1664: if (map->dm_nsegs > 0)
1665: bus_dmamap_unload(sc->sc_dmat, map);
1666: bus_dmamap_destroy(sc->sc_dmat, map);
1667: sc->vr_cdata.vr_rx_chain[i].vr_map = NULL;
1668: }
1669: }
1670: bzero((char *)&sc->vr_ldata->vr_rx_list,
1671: sizeof(sc->vr_ldata->vr_rx_list));
1672:
1673: /*
1674: * Free the TX list buffers.
1675: */
1676: for (i = 0; i < VR_TX_LIST_CNT; i++) {
1677: bus_dmamap_t map;
1678:
1679: if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1680: m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1681: sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1682: }
1683: map = sc->vr_cdata.vr_tx_chain[i].vr_map;
1684: if (map != NULL) {
1685: if (map->dm_nsegs > 0)
1686: bus_dmamap_unload(sc->sc_dmat, map);
1687: bus_dmamap_destroy(sc->sc_dmat, map);
1688: sc->vr_cdata.vr_tx_chain[i].vr_map = NULL;
1689: }
1690: }
1691:
1692: bzero((char *)&sc->vr_ldata->vr_tx_list,
1693: sizeof(sc->vr_ldata->vr_tx_list));
1694: }
1695:
1696: /*
1697: * Stop all chip I/O so that the kernel's probe routines don't
1698: * get confused by errant DMAs when rebooting.
1699: */
1700: void
1701: vr_shutdown(void *arg)
1702: {
1703: struct vr_softc *sc = (struct vr_softc *)arg;
1704:
1705: vr_stop(sc);
1706: }
CVSweb