Annotation of sys/dev/pci/if_bge.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: if_bge.c,v 1.213 2007/06/21 01:11:50 dlg Exp $ */
2:
3: /*
4: * Copyright (c) 2001 Wind River Systems
5: * Copyright (c) 1997, 1998, 1999, 2001
6: * Bill Paul <wpaul@windriver.com>. All rights reserved.
7: *
8: * Redistribution and use in source and binary forms, with or without
9: * modification, are permitted provided that the following conditions
10: * are met:
11: * 1. Redistributions of source code must retain the above copyright
12: * notice, this list of conditions and the following disclaimer.
13: * 2. Redistributions in binary form must reproduce the above copyright
14: * notice, this list of conditions and the following disclaimer in the
15: * documentation and/or other materials provided with the distribution.
16: * 3. All advertising materials mentioning features or use of this software
17: * must display the following acknowledgement:
18: * This product includes software developed by Bill Paul.
19: * 4. Neither the name of the author nor the names of any co-contributors
20: * may be used to endorse or promote products derived from this software
21: * without specific prior written permission.
22: *
23: * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26: * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33: * THE POSSIBILITY OF SUCH DAMAGE.
34: *
35: * $FreeBSD: if_bge.c,v 1.25 2002/11/14 23:54:49 sam Exp $
36: */
37:
38: /*
39: * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40: *
41: * Written by Bill Paul <wpaul@windriver.com>
42: * Senior Engineer, Wind River Systems
43: */
44:
45: /*
46: * The Broadcom BCM5700 is based on technology originally developed by
47: * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48: * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49: * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50: * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, Jumbo
51: * frames, highly configurable RX filtering, and 16 RX and TX queues
52: * (which, along with RX filter rules, can be used for QOS applications).
53: * Other features, such as TCP segmentation, may be available as part
54: * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55: * firmware images can be stored in hardware and need not be compiled
56: * into the driver.
57: *
58: * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59: * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
60: *
61: * The BCM5701 is a single-chip solution incorporating both the BCM5700
62: * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63: * does not support external SSRAM.
64: *
65: * Broadcom also produces a variation of the BCM5700 under the "Altima"
66: * brand name, which is functionally similar but lacks PCI-X support.
67: *
68: * Without external SSRAM, you can only have at most 4 TX rings,
69: * and the use of the mini RX ring is disabled. This seems to imply
70: * that these features are simply not available on the BCM5701. As a
71: * result, this driver does not implement any support for the mini RX
72: * ring.
73: */
74:
75: #include "bpfilter.h"
76: #include "vlan.h"
77:
78: #include <sys/param.h>
79: #include <sys/systm.h>
80: #include <sys/sockio.h>
81: #include <sys/mbuf.h>
82: #include <sys/malloc.h>
83: #include <sys/kernel.h>
84: #include <sys/device.h>
85: #include <sys/timeout.h>
86: #include <sys/socket.h>
87:
88: #include <net/if.h>
89: #include <net/if_dl.h>
90: #include <net/if_media.h>
91:
92: #ifdef INET
93: #include <netinet/in.h>
94: #include <netinet/in_systm.h>
95: #include <netinet/in_var.h>
96: #include <netinet/ip.h>
97: #include <netinet/if_ether.h>
98: #endif
99:
100: #if NVLAN > 0
101: #include <net/if_types.h>
102: #include <net/if_vlan_var.h>
103: #endif
104:
105: #if NBPFILTER > 0
106: #include <net/bpf.h>
107: #endif
108:
109: #ifdef __sparc64__
110: #include <dev/ofw/openfirm.h>
111: #endif
112:
113: #include <dev/pci/pcireg.h>
114: #include <dev/pci/pcivar.h>
115: #include <dev/pci/pcidevs.h>
116:
117: #include <dev/mii/mii.h>
118: #include <dev/mii/miivar.h>
119: #include <dev/mii/miidevs.h>
120: #include <dev/mii/brgphyreg.h>
121:
122: #include <dev/pci/if_bgereg.h>
123:
124: const struct bge_revision * bge_lookup_rev(u_int32_t);
125: int bge_probe(struct device *, void *, void *);
126: void bge_attach(struct device *, struct device *, void *);
127:
128: struct cfattach bge_ca = {
129: sizeof(struct bge_softc), bge_probe, bge_attach
130: };
131:
132: struct cfdriver bge_cd = {
133: 0, "bge", DV_IFNET
134: };
135:
136: void bge_txeof(struct bge_softc *);
137: void bge_rxeof(struct bge_softc *);
138:
139: void bge_tick(void *);
140: void bge_stats_update(struct bge_softc *);
141: void bge_stats_update_regs(struct bge_softc *);
142: int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
143: int bge_compact_dma_runt(struct mbuf *pkt);
144:
145: int bge_intr(void *);
146: void bge_start(struct ifnet *);
147: int bge_ioctl(struct ifnet *, u_long, caddr_t);
148: void bge_init(void *);
149: void bge_power(int, void *);
150: void bge_stop_block(struct bge_softc *, bus_size_t, u_int32_t);
151: void bge_stop(struct bge_softc *);
152: void bge_watchdog(struct ifnet *);
153: void bge_shutdown(void *);
154: int bge_ifmedia_upd(struct ifnet *);
155: void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
156:
157: u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
158: int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
159:
160: void bge_iff(struct bge_softc *);
161:
162: int bge_alloc_jumbo_mem(struct bge_softc *);
163: void *bge_jalloc(struct bge_softc *);
164: void bge_jfree(caddr_t, u_int, void *);
165: int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t);
166: int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
167: int bge_init_rx_ring_std(struct bge_softc *);
168: void bge_free_rx_ring_std(struct bge_softc *);
169: int bge_init_rx_ring_jumbo(struct bge_softc *);
170: void bge_free_rx_ring_jumbo(struct bge_softc *);
171: void bge_free_tx_ring(struct bge_softc *);
172: int bge_init_tx_ring(struct bge_softc *);
173:
174: void bge_chipinit(struct bge_softc *);
175: int bge_blockinit(struct bge_softc *);
176:
177: u_int32_t bge_readmem_ind(struct bge_softc *, int);
178: void bge_writemem_ind(struct bge_softc *, int, int);
179: void bge_writereg_ind(struct bge_softc *, int, int);
180:
181: int bge_miibus_readreg(struct device *, int, int);
182: void bge_miibus_writereg(struct device *, int, int, int);
183: void bge_miibus_statchg(struct device *);
184:
185: void bge_reset(struct bge_softc *);
186: void bge_link_upd(struct bge_softc *);
187:
188: #ifdef BGE_DEBUG
189: #define DPRINTF(x) do { if (bgedebug) printf x; } while (0)
190: #define DPRINTFN(n,x) do { if (bgedebug >= (n)) printf x; } while (0)
191: int bgedebug = 0;
192: #else
193: #define DPRINTF(x)
194: #define DPRINTFN(n,x)
195: #endif
196:
197: /*
198: * Various supported device vendors/types and their names. Note: the
199: * spec seems to indicate that the hardware still has Alteon's vendor
200: * ID burned into it, though it will always be overridden by the vendor
201: * ID in the EEPROM. Just to be safe, we cover all possibilities.
202: */
203: const struct pci_matchid bge_devices[] = {
204: { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700 },
205: { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5701 },
206:
207: { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000 },
208: { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001 },
209: { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100 },
210:
211: { PCI_VENDOR_APPLE, PCI_PRODUCT_APPLE_BCM5701 },
212:
213: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700 },
214: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701 },
215: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702 },
216: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702_ALT },
217: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X },
218: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703 },
219: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703_ALT },
220: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X },
221: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C },
222: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S },
223: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S_ALT },
224: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705 },
225: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705F },
226: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705K },
227: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M },
228: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M_ALT },
229: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714 },
230: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5714S },
231: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715 },
232: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5715S },
233: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5720 },
234: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5721 },
235: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5722 },
236: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750 },
237: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5750M },
238: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751 },
239: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751F },
240: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5751M },
241: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752 },
242: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5752M },
243: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753 },
244: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753F },
245: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5753M },
246: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754 },
247: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5754M },
248: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755 },
249: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5755M },
250: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5756 },
251: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780 },
252: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5780S },
253: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5781 },
254: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782 },
255: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5786 },
256: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787 },
257: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787F },
258: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5787M },
259: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5788 },
260: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5789 },
261: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901 },
262: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2 },
263: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5903M },
264: #if 0
265: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906 },
266: { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5906M },
267: #endif
268:
269: { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9D21 },
270:
271: { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C996 },
272: };
273:
274: #define BGE_IS_5705_OR_BEYOND(sc) \
275: (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 || \
276: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 || \
277: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || \
278: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 || \
279: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714 || \
280: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || \
281: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || \
282: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 || \
283: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
284:
285: #define BGE_IS_575X_PLUS(sc) \
286: (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 || \
287: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || \
288: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 || \
289: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714 || \
290: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || \
291: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || \
292: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 || \
293: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
294:
295: #define BGE_IS_5714_FAMILY(sc) \
296: (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || \
297: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 || \
298: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714)
299:
300: #define BGE_IS_JUMBO_CAPABLE(sc) \
301: (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 || \
302: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 || \
303: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 || \
304: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
305:
306:
307: static const struct bge_revision {
308: u_int32_t br_chipid;
309: const char *br_name;
310: } bge_revisions[] = {
311: { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" },
312: { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" },
313: { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" },
314: { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" },
315: { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" },
316: { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" },
317: { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" },
318: { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" },
319: { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" },
320: { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" },
321: { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" },
322: { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" },
323: { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" },
324: { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" },
325: { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" },
326: { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" },
327: { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" },
328: { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" },
329: { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" },
330: { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" },
331: { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" },
332: { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" },
333: { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" },
334: { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" },
335: { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" },
336: { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" },
337: { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" },
338: { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" },
339: { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" },
340: { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" },
341: { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" },
342: { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" },
343: { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" },
344: { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" },
345: { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" },
346: { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" },
347: { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" },
348: { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" },
349: { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" },
350: { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" },
351: { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" },
352: { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" },
353: { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" },
354: { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" },
355: { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" },
356: { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" },
357: /* the 5754 and 5787 share the same ASIC ID */
358: { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" },
359: { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" },
360: { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" },
361: { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" },
362:
363: { 0, NULL }
364: };
365:
366: /*
367: * Some defaults for major revisions, so that newer steppings
368: * that we don't know about have a shot at working.
369: */
370: static const struct bge_revision bge_majorrevs[] = {
371: { BGE_ASICREV_BCM5700, "unknown BCM5700" },
372: { BGE_ASICREV_BCM5701, "unknown BCM5701" },
373: /* 5702 and 5703 share the same ASIC ID */
374: { BGE_ASICREV_BCM5703, "unknown BCM5703" },
375: { BGE_ASICREV_BCM5704, "unknown BCM5704" },
376: { BGE_ASICREV_BCM5705, "unknown BCM5705" },
377: { BGE_ASICREV_BCM5750, "unknown BCM5750" },
378: { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" },
379: { BGE_ASICREV_BCM5752, "unknown BCM5752" },
380: { BGE_ASICREV_BCM5780, "unknown BCM5780" },
381: { BGE_ASICREV_BCM5714, "unknown BCM5714" },
382: { BGE_ASICREV_BCM5755, "unknown BCM5755" },
383: /* 5754 and 5787 share the same ASIC ID */
384: { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" },
385: { BGE_ASICREV_BCM5906, "unknown BCM5906" },
386:
387: { 0, NULL }
388: };
389:
390: u_int32_t
391: bge_readmem_ind(struct bge_softc *sc, int off)
392: {
393: struct pci_attach_args *pa = &(sc->bge_pa);
394:
395: pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
396: return (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA));
397: }
398:
399: void
400: bge_writemem_ind(struct bge_softc *sc, int off, int val)
401: {
402: struct pci_attach_args *pa = &(sc->bge_pa);
403:
404: pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
405: pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
406: }
407:
408: void
409: bge_writereg_ind(struct bge_softc *sc, int off, int val)
410: {
411: struct pci_attach_args *pa = &(sc->bge_pa);
412:
413: pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
414: pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
415: }
416:
417: /*
418: * Read a byte of data stored in the EEPROM at address 'addr.' The
419: * BCM570x supports both the traditional bitbang interface and an
420: * auto access interface for reading the EEPROM. We use the auto
421: * access method.
422: */
423: u_int8_t
424: bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
425: {
426: int i;
427: u_int32_t byte = 0;
428:
429: /*
430: * Enable use of auto EEPROM access so we can avoid
431: * having to use the bitbang method.
432: */
433: BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
434:
435: /* Reset the EEPROM, load the clock period. */
436: CSR_WRITE_4(sc, BGE_EE_ADDR,
437: BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
438: DELAY(20);
439:
440: /* Issue the read EEPROM command. */
441: CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
442:
443: /* Wait for completion */
444: for(i = 0; i < BGE_TIMEOUT * 10; i++) {
445: DELAY(10);
446: if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
447: break;
448: }
449:
450: if (i == BGE_TIMEOUT * 10) {
451: printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
452: return (1);
453: }
454:
455: /* Get result. */
456: byte = CSR_READ_4(sc, BGE_EE_DATA);
457:
458: *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
459:
460: return (0);
461: }
462:
463: /*
464: * Read a sequence of bytes from the EEPROM.
465: */
466: int
467: bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
468: {
469: int err = 0, i;
470: u_int8_t byte = 0;
471:
472: for (i = 0; i < cnt; i++) {
473: err = bge_eeprom_getbyte(sc, off + i, &byte);
474: if (err)
475: break;
476: *(dest + i) = byte;
477: }
478:
479: return (err ? 1 : 0);
480: }
481:
482: int
483: bge_miibus_readreg(struct device *dev, int phy, int reg)
484: {
485: struct bge_softc *sc = (struct bge_softc *)dev;
486: u_int32_t val, autopoll;
487: int i;
488:
489: /*
490: * Broadcom's own driver always assumes the internal
491: * PHY is at GMII address 1. On some chips, the PHY responds
492: * to accesses at all addresses, which could cause us to
493: * bogusly attach the PHY 32 times at probe type. Always
494: * restricting the lookup to address 1 is simpler than
495: * trying to figure out which chips revisions should be
496: * special-cased.
497: */
498: if (phy != 1)
499: return (0);
500:
501: /* Reading with autopolling on may trigger PCI errors */
502: autopoll = CSR_READ_4(sc, BGE_MI_MODE);
503: if (autopoll & BGE_MIMODE_AUTOPOLL) {
504: BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
505: DELAY(40);
506: }
507:
508: CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
509: BGE_MIPHY(phy)|BGE_MIREG(reg));
510:
511: for (i = 0; i < 200; i++) {
512: delay(1);
513: val = CSR_READ_4(sc, BGE_MI_COMM);
514: if (!(val & BGE_MICOMM_BUSY))
515: break;
516: delay(10);
517: }
518:
519: if (i == 200) {
520: printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
521: val = 0;
522: goto done;
523: }
524:
525: val = CSR_READ_4(sc, BGE_MI_COMM);
526:
527: done:
528: if (autopoll & BGE_MIMODE_AUTOPOLL) {
529: BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
530: DELAY(40);
531: }
532:
533: if (val & BGE_MICOMM_READFAIL)
534: return (0);
535:
536: return (val & 0xFFFF);
537: }
538:
539: void
540: bge_miibus_writereg(struct device *dev, int phy, int reg, int val)
541: {
542: struct bge_softc *sc = (struct bge_softc *)dev;
543: u_int32_t autopoll;
544: int i;
545:
546: /* Reading with autopolling on may trigger PCI errors */
547: autopoll = CSR_READ_4(sc, BGE_MI_MODE);
548: if (autopoll & BGE_MIMODE_AUTOPOLL) {
549: DELAY(40);
550: BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
551: DELAY(10); /* 40 usec is supposed to be adequate */
552: }
553:
554: CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
555: BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
556:
557: for (i = 0; i < 200; i++) {
558: delay(1);
559: if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
560: break;
561: delay(10);
562: }
563:
564: if (autopoll & BGE_MIMODE_AUTOPOLL) {
565: BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
566: DELAY(40);
567: }
568:
569: if (i == 200) {
570: printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
571: }
572: }
573:
574: void
575: bge_miibus_statchg(struct device *dev)
576: {
577: struct bge_softc *sc = (struct bge_softc *)dev;
578: struct mii_data *mii = &sc->bge_mii;
579:
580: /*
581: * Get flow control negotiation result.
582: */
583: if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
584: (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) {
585: sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
586: mii->mii_media_active &= ~IFM_ETH_FMASK;
587: }
588:
589: BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
590: if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
591: BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
592: else
593: BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
594:
595: if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
596: BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
597: else
598: BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
599:
600: /*
601: * 802.3x flow control
602: */
603: if (sc->bge_flowflags & IFM_ETH_RXPAUSE)
604: BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
605: else
606: BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
607:
608: if (sc->bge_flowflags & IFM_ETH_TXPAUSE)
609: BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
610: else
611: BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
612: }
613:
614: /*
615: * Memory management for Jumbo frames.
616: */
617:
618: int
619: bge_alloc_jumbo_mem(struct bge_softc *sc)
620: {
621: caddr_t ptr, kva;
622: bus_dma_segment_t seg;
623: int i, rseg, state, error;
624: struct bge_jpool_entry *entry;
625:
626: state = error = 0;
627:
628: /* Grab a big chunk o' storage. */
629: if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
630: &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
631: printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname);
632: return (ENOBUFS);
633: }
634:
635: state = 1;
636: if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva,
637: BUS_DMA_NOWAIT)) {
638: printf("%s: can't map dma buffers (%d bytes)\n",
639: sc->bge_dev.dv_xname, BGE_JMEM);
640: error = ENOBUFS;
641: goto out;
642: }
643:
644: state = 2;
645: if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
646: BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
647: printf("%s: can't create dma map\n", sc->bge_dev.dv_xname);
648: error = ENOBUFS;
649: goto out;
650: }
651:
652: state = 3;
653: if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
654: kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
655: printf("%s: can't load dma map\n", sc->bge_dev.dv_xname);
656: error = ENOBUFS;
657: goto out;
658: }
659:
660: state = 4;
661: sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva;
662: DPRINTFN(1,("bge_jumbo_buf = 0x%08X\n", sc->bge_cdata.bge_jumbo_buf));
663:
664: SLIST_INIT(&sc->bge_jfree_listhead);
665: SLIST_INIT(&sc->bge_jinuse_listhead);
666:
667: /*
668: * Now divide it up into 9K pieces and save the addresses
669: * in an array.
670: */
671: ptr = sc->bge_cdata.bge_jumbo_buf;
672: for (i = 0; i < BGE_JSLOTS; i++) {
673: sc->bge_cdata.bge_jslots[i] = ptr;
674: ptr += BGE_JLEN;
675: entry = malloc(sizeof(struct bge_jpool_entry),
676: M_DEVBUF, M_NOWAIT);
677: if (entry == NULL) {
678: printf("%s: no memory for jumbo buffer queue!\n",
679: sc->bge_dev.dv_xname);
680: error = ENOBUFS;
681: goto out;
682: }
683: entry->slot = i;
684: SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
685: entry, jpool_entries);
686: }
687: out:
688: if (error != 0) {
689: switch (state) {
690: case 4:
691: bus_dmamap_unload(sc->bge_dmatag,
692: sc->bge_cdata.bge_rx_jumbo_map);
693: case 3:
694: bus_dmamap_destroy(sc->bge_dmatag,
695: sc->bge_cdata.bge_rx_jumbo_map);
696: case 2:
697: bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
698: case 1:
699: bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
700: break;
701: default:
702: break;
703: }
704: }
705:
706: return (error);
707: }
708:
709: /*
710: * Allocate a Jumbo buffer.
711: */
712: void *
713: bge_jalloc(struct bge_softc *sc)
714: {
715: struct bge_jpool_entry *entry;
716:
717: entry = SLIST_FIRST(&sc->bge_jfree_listhead);
718:
719: if (entry == NULL)
720: return (NULL);
721:
722: SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
723: SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
724: return (sc->bge_cdata.bge_jslots[entry->slot]);
725: }
726:
727: /*
728: * Release a Jumbo buffer.
729: */
730: void
731: bge_jfree(caddr_t buf, u_int size, void *arg)
732: {
733: struct bge_jpool_entry *entry;
734: struct bge_softc *sc;
735: int i;
736:
737: /* Extract the softc struct pointer. */
738: sc = (struct bge_softc *)arg;
739:
740: if (sc == NULL)
741: panic("bge_jfree: can't find softc pointer!");
742:
743: /* calculate the slot this buffer belongs to */
744:
745: i = ((vaddr_t)buf
746: - (vaddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
747:
748: if ((i < 0) || (i >= BGE_JSLOTS))
749: panic("bge_jfree: asked to free buffer that we don't manage!");
750:
751: entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
752: if (entry == NULL)
753: panic("bge_jfree: buffer not in use!");
754: entry->slot = i;
755: SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
756: SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
757: }
758:
759:
760: /*
761: * Intialize a standard receive ring descriptor.
762: */
763: int
764: bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m,
765: bus_dmamap_t dmamap)
766: {
767: struct mbuf *m_new = NULL;
768: struct bge_rx_bd *r;
769: int error;
770:
771: if (dmamap == NULL) {
772: error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
773: MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
774: if (error != 0)
775: return (error);
776: }
777:
778: sc->bge_cdata.bge_rx_std_map[i] = dmamap;
779:
780: if (m == NULL) {
781: MGETHDR(m_new, M_DONTWAIT, MT_DATA);
782: if (m_new == NULL)
783: return (ENOBUFS);
784:
785: MCLGET(m_new, M_DONTWAIT);
786: if (!(m_new->m_flags & M_EXT)) {
787: m_freem(m_new);
788: return (ENOBUFS);
789: }
790: m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
791: } else {
792: /*
793: * We're re-using a previously allocated mbuf;
794: * be sure to re-init pointers and lengths to
795: * default values.
796: */
797: m_new = m;
798: m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
799: m_new->m_data = m_new->m_ext.ext_buf;
800: }
801:
802: if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
803: m_adj(m_new, ETHER_ALIGN);
804:
805: error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
806: BUS_DMA_READ|BUS_DMA_NOWAIT);
807: if (error) {
808: if (m == NULL) {
809: m_freem(m_new);
810: sc->bge_cdata.bge_rx_std_chain[i] = NULL;
811: }
812: return (ENOBUFS);
813: }
814:
815: sc->bge_cdata.bge_rx_std_chain[i] = m_new;
816: r = &sc->bge_rdata->bge_rx_std_ring[i];
817: BGE_HOSTADDR(r->bge_addr, dmamap->dm_segs[0].ds_addr);
818: r->bge_flags = BGE_RXBDFLAG_END;
819: r->bge_len = m_new->m_len;
820: r->bge_idx = i;
821:
822: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
823: offsetof(struct bge_ring_data, bge_rx_std_ring) +
824: i * sizeof (struct bge_rx_bd),
825: sizeof (struct bge_rx_bd),
826: BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
827:
828: return (0);
829: }
830:
831: /*
832: * Initialize a Jumbo receive ring descriptor. This allocates
833: * a Jumbo buffer from the pool managed internally by the driver.
834: */
835: int
836: bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
837: {
838: struct mbuf *m_new = NULL;
839: struct bge_rx_bd *r;
840:
841: if (m == NULL) {
842: caddr_t buf = NULL;
843:
844: /* Allocate the mbuf. */
845: MGETHDR(m_new, M_DONTWAIT, MT_DATA);
846: if (m_new == NULL)
847: return (ENOBUFS);
848:
849: /* Allocate the Jumbo buffer */
850: buf = bge_jalloc(sc);
851: if (buf == NULL) {
852: m_freem(m_new);
853: return (ENOBUFS);
854: }
855:
856: /* Attach the buffer to the mbuf. */
857: m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
858: MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, 0, bge_jfree, sc);
859: } else {
860: /*
861: * We're re-using a previously allocated mbuf;
862: * be sure to re-init pointers and lengths to
863: * default values.
864: */
865: m_new = m;
866: m_new->m_data = m_new->m_ext.ext_buf;
867: m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
868: }
869:
870: if (!(sc->bge_flags & BGE_RX_ALIGNBUG))
871: m_adj(m_new, ETHER_ALIGN);
872: /* Set up the descriptor. */
873: r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
874: sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
875: BGE_HOSTADDR(r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
876: r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
877: r->bge_len = m_new->m_len;
878: r->bge_idx = i;
879:
880: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
881: offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
882: i * sizeof (struct bge_rx_bd),
883: sizeof (struct bge_rx_bd),
884: BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
885:
886: return (0);
887: }
888:
889: /*
890: * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
891: * that's 1MB or memory, which is a lot. For now, we fill only the first
892: * 256 ring entries and hope that our CPU is fast enough to keep up with
893: * the NIC.
894: */
895: int
896: bge_init_rx_ring_std(struct bge_softc *sc)
897: {
898: int i;
899:
900: if (sc->bge_flags & BGE_RXRING_VALID)
901: return (0);
902:
903: for (i = 0; i < BGE_SSLOTS; i++) {
904: if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
905: return (ENOBUFS);
906: }
907:
908: sc->bge_std = i - 1;
909: CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
910:
911: sc->bge_flags |= BGE_RXRING_VALID;
912:
913: return (0);
914: }
915:
916: void
917: bge_free_rx_ring_std(struct bge_softc *sc)
918: {
919: int i;
920:
921: if (!(sc->bge_flags & BGE_RXRING_VALID))
922: return;
923:
924: for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
925: if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
926: m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
927: sc->bge_cdata.bge_rx_std_chain[i] = NULL;
928: bus_dmamap_destroy(sc->bge_dmatag,
929: sc->bge_cdata.bge_rx_std_map[i]);
930: }
931: bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i],
932: sizeof(struct bge_rx_bd));
933: }
934:
935: sc->bge_flags &= ~BGE_RXRING_VALID;
936: }
937:
938: int
939: bge_init_rx_ring_jumbo(struct bge_softc *sc)
940: {
941: int i;
942: volatile struct bge_rcb *rcb;
943:
944: if (sc->bge_flags & BGE_JUMBO_RXRING_VALID)
945: return (0);
946:
947: for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
948: if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
949: return (ENOBUFS);
950: };
951:
952: sc->bge_jumbo = i - 1;
953: sc->bge_flags |= BGE_JUMBO_RXRING_VALID;
954:
955: rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
956: rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
957: CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
958:
959: CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
960:
961: return (0);
962: }
963:
964: void
965: bge_free_rx_ring_jumbo(struct bge_softc *sc)
966: {
967: int i;
968:
969: if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
970: return;
971:
972: for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
973: if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
974: m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
975: sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
976: }
977: bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i],
978: sizeof(struct bge_rx_bd));
979: }
980:
981: sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
982: }
983:
984: void
985: bge_free_tx_ring(struct bge_softc *sc)
986: {
987: int i;
988: struct txdmamap_pool_entry *dma;
989:
990: if (!(sc->bge_flags & BGE_TXRING_VALID))
991: return;
992:
993: for (i = 0; i < BGE_TX_RING_CNT; i++) {
994: if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
995: m_freem(sc->bge_cdata.bge_tx_chain[i]);
996: sc->bge_cdata.bge_tx_chain[i] = NULL;
997: SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
998: link);
999: sc->txdma[i] = 0;
1000: }
1001: bzero((char *)&sc->bge_rdata->bge_tx_ring[i],
1002: sizeof(struct bge_tx_bd));
1003: }
1004:
1005: while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1006: SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1007: bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1008: free(dma, M_DEVBUF);
1009: }
1010:
1011: sc->bge_flags &= ~BGE_TXRING_VALID;
1012: }
1013:
1014: int
1015: bge_init_tx_ring(struct bge_softc *sc)
1016: {
1017: int i;
1018: bus_dmamap_t dmamap;
1019: struct txdmamap_pool_entry *dma;
1020:
1021: if (sc->bge_flags & BGE_TXRING_VALID)
1022: return (0);
1023:
1024: sc->bge_txcnt = 0;
1025: sc->bge_tx_saved_considx = 0;
1026:
1027: /* Initialize transmit producer index for host-memory send ring. */
1028: sc->bge_tx_prodidx = 0;
1029: CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1030: if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1031: CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1032:
1033: /* NIC-memory send ring not used; initialize to zero. */
1034: CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1035: if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
1036: CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1037:
1038: SLIST_INIT(&sc->txdma_list);
1039: for (i = 0; i < BGE_TX_RING_CNT; i++) {
1040: if (bus_dmamap_create(sc->bge_dmatag, BGE_JLEN,
1041: BGE_NTXSEG, BGE_JLEN, 0, BUS_DMA_NOWAIT,
1042: &dmamap))
1043: return (ENOBUFS);
1044: if (dmamap == NULL)
1045: panic("dmamap NULL in bge_init_tx_ring");
1046: dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1047: if (dma == NULL) {
1048: printf("%s: can't alloc txdmamap_pool_entry\n",
1049: sc->bge_dev.dv_xname);
1050: bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1051: return (ENOMEM);
1052: }
1053: dma->dmamap = dmamap;
1054: SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1055: }
1056:
1057: sc->bge_flags |= BGE_TXRING_VALID;
1058:
1059: return (0);
1060: }
1061:
1062: void
1063: bge_iff(struct bge_softc *sc)
1064: {
1065: struct arpcom *ac = &sc->arpcom;
1066: struct ifnet *ifp = &ac->ac_if;
1067: struct ether_multi *enm;
1068: struct ether_multistep step;
1069: u_int8_t hashes[16];
1070: u_int32_t h, rxmode;
1071:
1072: /* First, zot all the existing filters. */
1073: rxmode = CSR_READ_4(sc, BGE_RX_MODE) & ~BGE_RXMODE_RX_PROMISC;
1074: ifp->if_flags &= ~IFF_ALLMULTI;
1075: memset(hashes, 0x00, sizeof(hashes));
1076:
1077: if (ifp->if_flags & IFF_PROMISC)
1078: rxmode |= BGE_RXMODE_RX_PROMISC;
1079: else if (ac->ac_multirangecnt > 0) {
1080: ifp->if_flags |= IFF_ALLMULTI;
1081: memset(hashes, 0xff, sizeof(hashes));
1082: } else {
1083: ETHER_FIRST_MULTI(step, ac, enm);
1084: while (enm != NULL) {
1085: h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1086: setbit(hashes, h & 0x7F);
1087: ETHER_NEXT_MULTI(step, enm);
1088: }
1089: }
1090:
1091: bus_space_write_raw_region_4(sc->bge_btag, sc->bge_bhandle, BGE_MAR0,
1092: hashes, sizeof(hashes));
1093:
1094: CSR_WRITE_4(sc, BGE_RX_MODE, rxmode);
1095: }
1096:
1097: /*
1098: * Do endian, PCI and DMA initialization.
1099: */
1100: void
1101: bge_chipinit(struct bge_softc *sc)
1102: {
1103: struct pci_attach_args *pa = &(sc->bge_pa);
1104: u_int32_t dma_rw_ctl;
1105: int i;
1106:
1107: /* Set endianness before we access any non-PCI registers. */
1108: pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1109: BGE_INIT);
1110:
1111: /* Clear the MAC control register */
1112: CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1113:
1114: /*
1115: * Clear the MAC statistics block in the NIC's
1116: * internal memory.
1117: */
1118: for (i = BGE_STATS_BLOCK;
1119: i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1120: BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1121:
1122: for (i = BGE_STATUS_BLOCK;
1123: i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1124: BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1125:
1126: /* Set up the PCI DMA control register. */
1127: if (sc->bge_flags & BGE_PCIE) {
1128: /* PCI Express bus */
1129: u_int32_t device_ctl;
1130:
1131: /* alternative from Linux driver */
1132: #define DMA_CTRL_WRITE_PCIE_H20MARK_128 0x00180000
1133: #define DMA_CTRL_WRITE_PCIE_H20MARK_256 0x00380000
1134:
1135: dma_rw_ctl = 0x76000000; /* XXX XXX XXX */;
1136: device_ctl = pci_conf_read(pa->pa_pc, pa->pa_tag,
1137: BGE_PCI_CONF_DEV_CTRL);
1138:
1139: if ((device_ctl & 0x00e0) && 0) {
1140: /*
1141: * This clause is exactly what the Broadcom-supplied
1142: * Linux does; but given overall register programming
1143: * by bge(4), this larger DMA-write watermark
1144: * value causes BCM5721 chips to totally wedge.
1145: */
1146: dma_rw_ctl |= BGE_PCIDMA_RWCTL_PCIE_WRITE_WATRMARK_256;
1147: } else {
1148: dma_rw_ctl |= BGE_PCIDMA_RWCTL_PCIE_WRITE_WATRMARK_128;
1149: }
1150: } else if (sc->bge_flags & BGE_PCIX) {
1151: /* PCI-X bus */
1152: if (BGE_IS_5714_FAMILY(sc)) {
1153: dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1154: dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1155: /* XXX magic values, Broadcom-supplied Linux driver */
1156: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
1157: dma_rw_ctl |= (1 << 20) | (1 << 18) |
1158: BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1159: else
1160: dma_rw_ctl |= (1<<20) | (1<<18) | (1 << 15);
1161: } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1162: /*
1163: * The 5704 uses a different encoding of read/write
1164: * watermarks.
1165: */
1166: dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1167: (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1168: (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1169: else
1170: dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1171: (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1172: (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1173: (0x0F);
1174:
1175: /*
1176: * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1177: * for hardware bugs.
1178: */
1179: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1180: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1181: u_int32_t tmp;
1182:
1183: tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1184: if (tmp == 0x6 || tmp == 0x7)
1185: dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1186: }
1187: } else {
1188: /* Conventional PCI bus */
1189: dma_rw_ctl = BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
1190: (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1191: (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1192: (0x0f);
1193: }
1194:
1195: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 ||
1196: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704 ||
1197: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705)
1198: dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1199:
1200: pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
1201:
1202: /*
1203: * Set up general mode register.
1204: */
1205: #ifndef BGE_CHECKSUM
1206: CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1207: BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1208: BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1209: #else
1210: CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1211: BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS);
1212: #endif
1213:
1214: /*
1215: * Disable memory write invalidate. Apparently it is not supported
1216: * properly by these devices.
1217: */
1218: PCI_CLRBIT(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
1219: PCI_COMMAND_INVALIDATE_ENABLE);
1220:
1221: #ifdef __brokenalpha__
1222: /*
1223: * Must insure that we do not cross an 8K (bytes) boundary
1224: * for DMA reads. Our highest limit is 1K bytes. This is a
1225: * restriction on some ALPHA platforms with early revision
1226: * 21174 PCI chipsets, such as the AlphaPC 164lx
1227: */
1228: PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1229: BGE_PCI_READ_BNDRY_1024);
1230: #endif
1231:
1232: /* Set the timer prescaler (always 66MHz) */
1233: CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1234: }
1235:
1236: int
1237: bge_blockinit(struct bge_softc *sc)
1238: {
1239: volatile struct bge_rcb *rcb;
1240: vaddr_t rcb_addr;
1241: int i;
1242: bge_hostaddr taddr;
1243: u_int32_t val;
1244:
1245: /*
1246: * Initialize the memory window pointer register so that
1247: * we can access the first 32K of internal NIC RAM. This will
1248: * allow us to set up the TX send ring RCBs and the RX return
1249: * ring RCBs, plus other things which live in NIC memory.
1250: */
1251: CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1252:
1253: /* Configure mbuf memory pool */
1254: if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1255: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1256: BGE_BUFFPOOL_1);
1257:
1258: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
1259: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1260: else
1261: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1262:
1263: /* Configure DMA resource pool */
1264: CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1265: BGE_DMA_DESCRIPTORS);
1266: CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1267: }
1268:
1269: /* Configure mbuf pool watermarks */
1270: /* new Broadcom docs strongly recommend these: */
1271: if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1272: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1273: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1274: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1275: } else {
1276: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1277: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1278: CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1279: }
1280:
1281: /* Configure DMA resource watermarks */
1282: CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1283: CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1284:
1285: /* Enable buffer manager */
1286: CSR_WRITE_4(sc, BGE_BMAN_MODE,
1287: BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1288:
1289: /* Poll for buffer manager start indication */
1290: for (i = 0; i < 2000; i++) {
1291: if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1292: break;
1293: DELAY(10);
1294: }
1295:
1296: if (i == 2000) {
1297: printf("%s: buffer manager failed to start\n",
1298: sc->bge_dev.dv_xname);
1299: return (ENXIO);
1300: }
1301:
1302: /* Enable flow-through queues */
1303: CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1304: CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1305:
1306: /* Wait until queue initialization is complete */
1307: for (i = 0; i < 2000; i++) {
1308: if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1309: break;
1310: DELAY(10);
1311: }
1312:
1313: if (i == 2000) {
1314: printf("%s: flow-through queue init failed\n",
1315: sc->bge_dev.dv_xname);
1316: return (ENXIO);
1317: }
1318:
1319: /* Initialize the standard RX ring control block */
1320: rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1321: BGE_HOSTADDR(rcb->bge_hostaddr, BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1322: if (BGE_IS_5705_OR_BEYOND(sc))
1323: rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1324: else
1325: rcb->bge_maxlen_flags =
1326: BGE_RCB_MAXLEN_FLAGS(ETHER_MAX_DIX_LEN, 0);
1327: rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1328: CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1329: CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1330: CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1331: CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1332:
1333: /*
1334: * Initialize the Jumbo RX ring control block
1335: * We set the 'ring disabled' bit in the flags
1336: * field until we're actually ready to start
1337: * using this ring (i.e. once we set the MTU
1338: * high enough to require it).
1339: */
1340: if (BGE_IS_JUMBO_CAPABLE(sc)) {
1341: rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1342: BGE_HOSTADDR(rcb->bge_hostaddr,
1343: BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1344: rcb->bge_maxlen_flags =
1345: BGE_RCB_MAXLEN_FLAGS(BGE_JUMBO_FRAMELEN,
1346: BGE_RCB_FLAG_RING_DISABLED);
1347: rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1348:
1349: CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1350: rcb->bge_hostaddr.bge_addr_hi);
1351: CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1352: rcb->bge_hostaddr.bge_addr_lo);
1353: CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1354: rcb->bge_maxlen_flags);
1355: CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR,
1356: rcb->bge_nicaddr);
1357:
1358: /* Set up dummy disabled mini ring RCB */
1359: rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1360: rcb->bge_maxlen_flags =
1361: BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1362: CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1363: rcb->bge_maxlen_flags);
1364:
1365: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1366: offsetof(struct bge_ring_data, bge_info),
1367: sizeof (struct bge_gib),
1368: BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1369: }
1370:
1371: /*
1372: * Set the BD ring replenish thresholds. The recommended
1373: * values are 1/8th the number of descriptors allocated to
1374: * each ring.
1375: */
1376: i = BGE_STD_RX_RING_CNT / 8;
1377:
1378: /*
1379: * Use a value of 8 for the following chips to workaround HW errata.
1380: * Some of these chips have been added based on empirical
1381: * evidence (they don't work unless this is done).
1382: */
1383: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 ||
1384: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
1385: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1386: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787)
1387: i = 8;
1388:
1389: CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, i);
1390: CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT / 8);
1391:
1392: /*
1393: * Disable all unused send rings by setting the 'ring disabled'
1394: * bit in the flags field of all the TX send ring control blocks.
1395: * These are located in NIC memory.
1396: */
1397: rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1398: for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1399: RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1400: BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED));
1401: RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1402: rcb_addr += sizeof(struct bge_rcb);
1403: }
1404:
1405: /* Configure TX RCB 0 (we use only the first ring) */
1406: rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1407: BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
1408: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1409: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1410: RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
1411: BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1412: if (!(BGE_IS_5705_OR_BEYOND(sc)))
1413: RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1414: BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1415:
1416: /* Disable all unused RX return rings */
1417: rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1418: for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1419: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
1420: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
1421: RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1422: BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1423: BGE_RCB_FLAG_RING_DISABLED));
1424: RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1425: CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1426: (i * (sizeof(u_int64_t))), 0);
1427: rcb_addr += sizeof(struct bge_rcb);
1428: }
1429:
1430: /* Initialize RX ring indexes */
1431: CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1432: CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1433: CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1434:
1435: /*
1436: * Set up RX return ring 0
1437: * Note that the NIC address for RX return rings is 0x00000000.
1438: * The return rings live entirely within the host, so the
1439: * nicaddr field in the RCB isn't used.
1440: */
1441: rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1442: BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
1443: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1444: RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1445: RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
1446: RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1447: BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1448:
1449: /* Set random backoff seed for TX */
1450: CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1451: sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1452: sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1453: sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1454: BGE_TX_BACKOFF_SEED_MASK);
1455:
1456: /* Set inter-packet gap */
1457: CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1458:
1459: /*
1460: * Specify which ring to use for packets that don't match
1461: * any RX rules.
1462: */
1463: CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1464:
1465: /*
1466: * Configure number of RX lists. One interrupt distribution
1467: * list, sixteen active lists, one bad frames class.
1468: */
1469: CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1470:
1471: /* Inialize RX list placement stats mask. */
1472: CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1473: CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1474:
1475: /* Disable host coalescing until we get it set up */
1476: CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1477:
1478: /* Poll to make sure it's shut down. */
1479: for (i = 0; i < 2000; i++) {
1480: if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1481: break;
1482: DELAY(10);
1483: }
1484:
1485: if (i == 2000) {
1486: printf("%s: host coalescing engine failed to idle\n",
1487: sc->bge_dev.dv_xname);
1488: return (ENXIO);
1489: }
1490:
1491: /* Set up host coalescing defaults */
1492: CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1493: CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1494: CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1495: CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1496: if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1497: CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1498: CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1499: }
1500: CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1501: CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1502:
1503: /* Set up address of statistics block */
1504: if (!(BGE_IS_5705_OR_BEYOND(sc))) {
1505: CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1506: CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1507: BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
1508:
1509: CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1510: CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1511: CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1512: }
1513:
1514: /* Set up address of status block */
1515: BGE_HOSTADDR(taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
1516: CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
1517: CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
1518:
1519: sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1520: sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1521:
1522: /* Turn on host coalescing state machine */
1523: CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1524:
1525: /* Turn on RX BD completion state machine and enable attentions */
1526: CSR_WRITE_4(sc, BGE_RBDC_MODE,
1527: BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1528:
1529: /* Turn on RX list placement state machine */
1530: CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1531:
1532: /* Turn on RX list selector state machine. */
1533: if (!(BGE_IS_5705_OR_BEYOND(sc)))
1534: CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1535:
1536: /* Turn on DMA, clear stats */
1537: CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1538: BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1539: BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1540: BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1541: (sc->bge_flags & BGE_TBI ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1542:
1543: /* Set misc. local control, enable interrupts on attentions */
1544: CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1545:
1546: #ifdef notdef
1547: /* Assert GPIO pins for PHY reset */
1548: BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1549: BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1550: BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1551: BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1552: #endif
1553:
1554: /* Turn on DMA completion state machine */
1555: if (!(BGE_IS_5705_OR_BEYOND(sc)))
1556: CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1557:
1558: val = BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1559:
1560: /* Enable host coalescing bug fix. */
1561: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1562: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787)
1563: val |= (1 << 29);
1564:
1565: /* Turn on write DMA state machine */
1566: CSR_WRITE_4(sc, BGE_WDMA_MODE, val);
1567:
1568: /* Turn on read DMA state machine */
1569: {
1570: uint32_t dma_read_modebits;
1571:
1572: dma_read_modebits =
1573: BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1574:
1575: if (sc->bge_flags & BGE_PCIE && 0)
1576: dma_read_modebits |= BGE_RDMA_MODE_FIFO_LONG_BURST;
1577:
1578: CSR_WRITE_4(sc, BGE_RDMA_MODE, dma_read_modebits);
1579: }
1580:
1581: /* Turn on RX data completion state machine */
1582: CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1583:
1584: /* Turn on RX BD initiator state machine */
1585: CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1586:
1587: /* Turn on RX data and RX BD initiator state machine */
1588: CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1589:
1590: /* Turn on Mbuf cluster free state machine */
1591: if (!(BGE_IS_5705_OR_BEYOND(sc)))
1592: CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1593:
1594: /* Turn on send BD completion state machine */
1595: CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1596:
1597: /* Turn on send data completion state machine */
1598: CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1599:
1600: /* Turn on send data initiator state machine */
1601: CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1602:
1603: /* Turn on send BD initiator state machine */
1604: CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1605:
1606: /* Turn on send BD selector state machine */
1607: CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1608:
1609: CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1610: CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1611: BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1612:
1613: /* ack/clear link change events */
1614: CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1615: BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1616: BGE_MACSTAT_LINK_CHANGED);
1617:
1618: /* Enable PHY auto polling (for MII/GMII only) */
1619: if (sc->bge_flags & BGE_TBI) {
1620: CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1621: } else {
1622: BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1623: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 &&
1624: sc->bge_chipid != BGE_CHIPID_BCM5700_B2)
1625: CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1626: BGE_EVTENB_MI_INTERRUPT);
1627: }
1628:
1629: /*
1630: * Clear any pending link state attention.
1631: * Otherwise some link state change events may be lost until attention
1632: * is cleared by bge_intr() -> bge_link_upd() sequence.
1633: * It's not necessary on newer BCM chips - perhaps enabling link
1634: * state change attentions implies clearing pending attention.
1635: */
1636: CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1637: BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1638: BGE_MACSTAT_LINK_CHANGED);
1639:
1640: /* Enable link state change attentions. */
1641: BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1642:
1643: return (0);
1644: }
1645:
1646: const struct bge_revision *
1647: bge_lookup_rev(u_int32_t chipid)
1648: {
1649: const struct bge_revision *br;
1650:
1651: for (br = bge_revisions; br->br_name != NULL; br++) {
1652: if (br->br_chipid == chipid)
1653: return (br);
1654: }
1655:
1656: for (br = bge_majorrevs; br->br_name != NULL; br++) {
1657: if (br->br_chipid == BGE_ASICREV(chipid))
1658: return (br);
1659: }
1660:
1661: return (NULL);
1662: }
1663:
1664: /*
1665: * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1666: * against our list and return its name if we find a match. Note
1667: * that since the Broadcom controller contains VPD support, we
1668: * can get the device name string from the controller itself instead
1669: * of the compiled-in string. This is a little slow, but it guarantees
1670: * we'll always announce the right product name.
1671: */
1672: int
1673: bge_probe(struct device *parent, void *match, void *aux)
1674: {
1675: return (pci_matchbyid((struct pci_attach_args *)aux, bge_devices,
1676: sizeof(bge_devices)/sizeof(bge_devices[0])));
1677: }
1678:
1679: void
1680: bge_attach(struct device *parent, struct device *self, void *aux)
1681: {
1682: struct bge_softc *sc = (struct bge_softc *)self;
1683: struct pci_attach_args *pa = aux;
1684: pci_chipset_tag_t pc = pa->pa_pc;
1685: const struct bge_revision *br;
1686: pcireg_t pm_ctl, memtype, subid;
1687: pci_intr_handle_t ih;
1688: const char *intrstr = NULL;
1689: bus_size_t size;
1690: bus_dma_segment_t seg;
1691: int rseg, gotenaddr = 0;
1692: u_int32_t hwcfg = 0;
1693: u_int32_t mac_addr = 0;
1694: u_int32_t misccfg;
1695: struct ifnet *ifp;
1696: caddr_t kva;
1697: #ifdef __sparc64__
1698: int subvendor;
1699: #endif
1700:
1701: sc->bge_pa = *pa;
1702:
1703: subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1704:
1705: /*
1706: * Map control/status registers.
1707: */
1708: DPRINTFN(5, ("Map control/status regs\n"));
1709:
1710: DPRINTFN(5, ("pci_mapreg_map\n"));
1711: memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
1712: switch (memtype) {
1713: case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1714: case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1715: if (pci_mapreg_map(pa, BGE_PCI_BAR0,
1716: memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
1717: NULL, &size, 0) == 0)
1718: break;
1719: default:
1720: printf(": can't find mem space\n");
1721: return;
1722: }
1723:
1724: DPRINTFN(5, ("pci_intr_map\n"));
1725: if (pci_intr_map(pa, &ih)) {
1726: printf(": couldn't map interrupt\n");
1727: goto fail_1;
1728: }
1729:
1730: DPRINTFN(5, ("pci_intr_string\n"));
1731: intrstr = pci_intr_string(pc, ih);
1732:
1733: /*
1734: * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
1735: * can clobber the chip's PCI config-space power control registers,
1736: * leaving the card in D3 powersave state.
1737: * We do not have memory-mapped registers in this state,
1738: * so force device into D0 state before starting initialization.
1739: */
1740: pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
1741: pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
1742: pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
1743: pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
1744: DELAY(1000); /* 27 usec is allegedly sufficent */
1745:
1746: /*
1747: * Save ASIC rev.
1748: */
1749:
1750: sc->bge_chipid =
1751: pci_conf_read(pc, pa->pa_tag, BGE_PCI_MISC_CTL) &
1752: BGE_PCIMISCCTL_ASICREV;
1753:
1754: printf(", ");
1755: br = bge_lookup_rev(sc->bge_chipid);
1756: if (br == NULL)
1757: printf("unknown ASIC (0x%04x)", sc->bge_chipid >> 16);
1758: else
1759: printf("%s (0x%04x)", br->br_name, sc->bge_chipid >> 16);
1760:
1761: /*
1762: * PCI Express check.
1763: */
1764: if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
1765: NULL, NULL) != 0)
1766: sc->bge_flags |= BGE_PCIE;
1767:
1768: /*
1769: * PCI-X check.
1770: */
1771: if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) &
1772: BGE_PCISTATE_PCI_BUSMODE) == 0)
1773: sc->bge_flags |= BGE_PCIX;
1774:
1775: /*
1776: * SEEPROM check.
1777: */
1778: #ifdef __sparc64__
1779: if (OF_getprop(PCITAG_NODE(pa->pa_tag), "subsystem-vendor-id",
1780: &subvendor, sizeof(subvendor)) == sizeof(subvendor)) {
1781: if (subvendor == PCI_VENDOR_SUN)
1782: sc->bge_flags |= BGE_NO_EEPROM;
1783: }
1784: #endif
1785:
1786: /*
1787: * When using the BCM5701 in PCI-X mode, data corruption has
1788: * been observed in the first few bytes of some received packets.
1789: * Aligning the packet buffer in memory eliminates the corruption.
1790: * Unfortunately, this misaligns the packet payloads. On platforms
1791: * which do not support unaligned accesses, we will realign the
1792: * payloads by copying the received packets.
1793: */
1794: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701 &&
1795: sc->bge_flags & BGE_PCIX)
1796: sc->bge_flags |= BGE_RX_ALIGNBUG;
1797:
1798: if (BGE_IS_JUMBO_CAPABLE(sc))
1799: sc->bge_flags |= BGE_JUMBO_CAP;
1800:
1801: if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1802: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5701) &&
1803: PCI_VENDOR(subid) == DELL_VENDORID)
1804: sc->bge_flags |= BGE_NO_3LED;
1805:
1806: misccfg = CSR_READ_4(sc, BGE_MISC_CFG);
1807: misccfg &= BGE_MISCCFG_BOARD_ID_MASK;
1808:
1809: if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703 &&
1810: (misccfg == 0x4000 || misccfg == 0x8000)) ||
1811: (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
1812: PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
1813: (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901 ||
1814: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5901A2 ||
1815: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5705F)) ||
1816: (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM &&
1817: (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5751F ||
1818: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5753F ||
1819: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5787F)) ||
1820: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1821: sc->bge_flags |= BGE_10_100_ONLY;
1822:
1823: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 ||
1824: (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5705 &&
1825: (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 &&
1826: sc->bge_chipid != BGE_CHIPID_BCM5705_A1)) ||
1827: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1828: sc->bge_flags |= BGE_NO_ETH_WIRE_SPEED;
1829:
1830: if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5703_AX ||
1831: BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5704_AX)
1832: sc->bge_flags |= BGE_PHY_ADC_BUG;
1833: if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0)
1834: sc->bge_flags |= BGE_PHY_5704_A0_BUG;
1835:
1836: if (BGE_IS_5705_OR_BEYOND(sc)) {
1837: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1838: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787) {
1839: if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5722 &&
1840: PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BROADCOM_BCM5756)
1841: sc->bge_flags |= BGE_PHY_JITTER_BUG;
1842: if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_BCM5755M)
1843: sc->bge_flags |= BGE_PHY_ADJUST_TRIM;
1844: } else if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
1845: sc->bge_flags |= BGE_PHY_BER_BUG;
1846: }
1847:
1848: /* Try to reset the chip. */
1849: DPRINTFN(5, ("bge_reset\n"));
1850: bge_reset(sc);
1851:
1852: bge_chipinit(sc);
1853:
1854: #ifdef __sparc64__
1855: if (!gotenaddr) {
1856: if (OF_getprop(PCITAG_NODE(pa->pa_tag), "local-mac-address",
1857: sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
1858: gotenaddr = 1;
1859: }
1860: #endif
1861:
1862: /*
1863: * Get station address from the EEPROM.
1864: */
1865: if (!gotenaddr) {
1866: mac_addr = bge_readmem_ind(sc, 0x0c14);
1867: if ((mac_addr >> 16) == 0x484b) {
1868: sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
1869: sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
1870: mac_addr = bge_readmem_ind(sc, 0x0c18);
1871: sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
1872: sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
1873: sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
1874: sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
1875: gotenaddr = 1;
1876: }
1877: }
1878: if (!gotenaddr && (!(sc->bge_flags & BGE_NO_EEPROM))) {
1879: if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1880: BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN) == 0)
1881: gotenaddr = 1;
1882: }
1883:
1884: #ifdef __sparc64__
1885: if (!gotenaddr) {
1886: extern void myetheraddr(u_char *);
1887:
1888: myetheraddr(sc->arpcom.ac_enaddr);
1889: gotenaddr = 1;
1890: }
1891: #endif
1892:
1893: if (!gotenaddr) {
1894: printf(": failed to read station address\n");
1895: goto fail_1;
1896: }
1897:
1898: /* Allocate the general information block and ring buffers. */
1899: sc->bge_dmatag = pa->pa_dmat;
1900: DPRINTFN(5, ("bus_dmamem_alloc\n"));
1901: if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
1902: PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1903: printf(": can't alloc rx buffers\n");
1904: goto fail_1;
1905: }
1906: DPRINTFN(5, ("bus_dmamem_map\n"));
1907: if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
1908: sizeof(struct bge_ring_data), &kva,
1909: BUS_DMA_NOWAIT)) {
1910: printf(": can't map dma buffers (%d bytes)\n",
1911: sizeof(struct bge_ring_data));
1912: goto fail_2;
1913: }
1914: DPRINTFN(5, ("bus_dmamem_create\n"));
1915: if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
1916: sizeof(struct bge_ring_data), 0,
1917: BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
1918: printf(": can't create dma map\n");
1919: goto fail_3;
1920: }
1921: DPRINTFN(5, ("bus_dmamem_load\n"));
1922: if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
1923: sizeof(struct bge_ring_data), NULL,
1924: BUS_DMA_NOWAIT)) {
1925: goto fail_4;
1926: }
1927:
1928: DPRINTFN(5, ("bzero\n"));
1929: sc->bge_rdata = (struct bge_ring_data *)kva;
1930:
1931: bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
1932:
1933: /*
1934: * Try to allocate memory for Jumbo buffers.
1935: */
1936: if (BGE_IS_JUMBO_CAPABLE(sc)) {
1937: if (bge_alloc_jumbo_mem(sc)) {
1938: printf(": jumbo buffer allocation failed\n");
1939: goto fail_5;
1940: }
1941: }
1942:
1943: /* Set default tuneable values. */
1944: sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1945: sc->bge_rx_coal_ticks = 150;
1946: sc->bge_rx_max_coal_bds = 64;
1947: sc->bge_tx_coal_ticks = 300;
1948: sc->bge_tx_max_coal_bds = 400;
1949:
1950: /* 5705 limits RX return ring to 512 entries. */
1951: if (BGE_IS_5705_OR_BEYOND(sc))
1952: sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1953: else
1954: sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1955:
1956: /* Set up ifnet structure */
1957: ifp = &sc->arpcom.ac_if;
1958: ifp->if_softc = sc;
1959: ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1960: ifp->if_ioctl = bge_ioctl;
1961: ifp->if_start = bge_start;
1962: ifp->if_watchdog = bge_watchdog;
1963: ifp->if_baudrate = 1000000000;
1964: IFQ_SET_MAXLEN(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1965: IFQ_SET_READY(&ifp->if_snd);
1966: DPRINTFN(5, ("bcopy\n"));
1967: bcopy(sc->bge_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1968:
1969: ifp->if_capabilities = IFCAP_VLAN_MTU;
1970:
1971: #if NVLAN > 0
1972: ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1973: #endif
1974:
1975: if (BGE_IS_JUMBO_CAPABLE(sc))
1976: ifp->if_hardmtu = BGE_JUMBO_MTU;
1977:
1978: /*
1979: * Do MII setup.
1980: */
1981: DPRINTFN(5, ("mii setup\n"));
1982: sc->bge_mii.mii_ifp = ifp;
1983: sc->bge_mii.mii_readreg = bge_miibus_readreg;
1984: sc->bge_mii.mii_writereg = bge_miibus_writereg;
1985: sc->bge_mii.mii_statchg = bge_miibus_statchg;
1986:
1987: /*
1988: * Figure out what sort of media we have by checking the hardware
1989: * config word in the first 32K of internal NIC memory, or fall back to
1990: * examining the EEPROM if necessary. Note: on some BCM5700 cards,
1991: * this value seems to be unset. If that's the case, we have to rely on
1992: * identifying the NIC by its PCI subsystem ID, as we do below for the
1993: * SysKonnect SK-9D41.
1994: */
1995: if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1996: hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1997: else if (!(sc->bge_flags & BGE_NO_EEPROM)) {
1998: if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET,
1999: sizeof(hwcfg))) {
2000: printf(": failed to read media type\n");
2001: goto fail_5;
2002: }
2003: hwcfg = ntohl(hwcfg);
2004: }
2005:
2006: if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2007: sc->bge_flags |= BGE_TBI;
2008:
2009: /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2010: if (PCI_PRODUCT(subid) == SK_SUBSYSID_9D41)
2011: sc->bge_flags |= BGE_TBI;
2012:
2013: /* Hookup IRQ last. */
2014: DPRINTFN(5, ("pci_intr_establish\n"));
2015: sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc,
2016: sc->bge_dev.dv_xname);
2017: if (sc->bge_intrhand == NULL) {
2018: printf(": couldn't establish interrupt");
2019: if (intrstr != NULL)
2020: printf(" at %s", intrstr);
2021: printf("\n");
2022: goto fail_5;
2023: }
2024:
2025: /*
2026: * A Broadcom chip was detected. Inform the world.
2027: */
2028: printf(": %s, address %s\n", intrstr,
2029: ether_sprintf(sc->arpcom.ac_enaddr));
2030:
2031: if (sc->bge_flags & BGE_TBI) {
2032: ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2033: bge_ifmedia_sts);
2034: ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2035: ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
2036: 0, NULL);
2037: ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2038: ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2039: sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media;
2040: } else {
2041: /*
2042: * Do transceiver setup.
2043: */
2044: ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
2045: bge_ifmedia_sts);
2046: mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
2047: MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE);
2048:
2049: if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
2050: printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
2051: ifmedia_add(&sc->bge_mii.mii_media,
2052: IFM_ETHER|IFM_MANUAL, 0, NULL);
2053: ifmedia_set(&sc->bge_mii.mii_media,
2054: IFM_ETHER|IFM_MANUAL);
2055: } else
2056: ifmedia_set(&sc->bge_mii.mii_media,
2057: IFM_ETHER|IFM_AUTO);
2058: }
2059:
2060: /*
2061: * Call MI attach routine.
2062: */
2063: if_attach(ifp);
2064: ether_ifattach(ifp);
2065:
2066: sc->sc_shutdownhook = shutdownhook_establish(bge_shutdown, sc);
2067: sc->sc_powerhook = powerhook_establish(bge_power, sc);
2068:
2069: timeout_set(&sc->bge_timeout, bge_tick, sc);
2070: return;
2071:
2072: fail_5:
2073: bus_dmamap_unload(sc->bge_dmatag, sc->bge_ring_map);
2074:
2075: fail_4:
2076: bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
2077:
2078: fail_3:
2079: bus_dmamem_unmap(sc->bge_dmatag, kva,
2080: sizeof(struct bge_ring_data));
2081:
2082: fail_2:
2083: bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2084:
2085: fail_1:
2086: bus_space_unmap(sc->bge_btag, sc->bge_bhandle, size);
2087: }
2088:
2089: void
2090: bge_reset(struct bge_softc *sc)
2091: {
2092: struct pci_attach_args *pa = &sc->bge_pa;
2093: pcireg_t cachesize, command, pcistate, new_pcistate;
2094: u_int32_t reset;
2095: int i, val = 0;
2096:
2097: /* Save some important PCI state. */
2098: cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
2099: command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
2100: pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
2101:
2102: pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2103: BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2104: BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW);
2105:
2106: /* Disable fastboot on controllers that support it. */
2107: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
2108: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2109: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787)
2110: CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
2111:
2112: reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1);
2113:
2114: if (sc->bge_flags & BGE_PCIE) {
2115: if (CSR_READ_4(sc, 0x7e2c) == 0x60) {
2116: /* PCI Express 1.0 system */
2117: CSR_WRITE_4(sc, 0x7e2c, 0x20);
2118: }
2119: if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2120: /*
2121: * Prevent PCI Express link training
2122: * during global reset.
2123: */
2124: CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29));
2125: reset |= (1<<29);
2126: }
2127: }
2128:
2129: /*
2130: * Set GPHY Power Down Override to leave GPHY
2131: * powered up in D0 uninitialized.
2132: */
2133: if (BGE_IS_5705_OR_BEYOND(sc))
2134: reset |= BGE_MISCCFG_KEEP_GPHY_POWER;
2135:
2136: /* Issue global reset */
2137: bge_writereg_ind(sc, BGE_MISC_CFG, reset);
2138:
2139: DELAY(1000);
2140:
2141: if (sc->bge_flags & BGE_PCIE) {
2142: if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2143: pcireg_t v;
2144:
2145: DELAY(500000); /* wait for link training to complete */
2146: v = pci_conf_read(pa->pa_pc, pa->pa_tag, 0xc4);
2147: pci_conf_write(pa->pa_pc, pa->pa_tag, 0xc4, v | (1<<15));
2148: }
2149:
2150: /*
2151: * Set PCI Express max payload size to 128 bytes
2152: * and clear error status.
2153: */
2154: pci_conf_write(pa->pa_pc, pa->pa_tag,
2155: BGE_PCI_CONF_DEV_CTRL, 0xf5000);
2156: }
2157:
2158: /* Reset some of the PCI state that got zapped by reset */
2159: pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2160: BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2161: BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW);
2162: pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
2163: pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
2164: bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2165:
2166: /* Enable memory arbiter. */
2167: if (BGE_IS_5714_FAMILY(sc)) {
2168: u_int32_t val;
2169:
2170: val = CSR_READ_4(sc, BGE_MARB_MODE);
2171: CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val);
2172: } else
2173: CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2174:
2175: /*
2176: * Prevent PXE restart: write a magic number to the
2177: * general communications memory at 0xB50.
2178: */
2179: bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2180:
2181: /*
2182: * Poll until we see 1's complement of the magic number.
2183: * This indicates that the firmware initialization
2184: * is complete. We expect this to fail if no SEEPROM
2185: * is fitted.
2186: */
2187: for (i = 0; i < BGE_TIMEOUT; i++) {
2188: val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2189: if (val == ~BGE_MAGIC_NUMBER)
2190: break;
2191: DELAY(10);
2192: }
2193:
2194: if (i >= BGE_TIMEOUT && (!(sc->bge_flags & BGE_NO_EEPROM)))
2195: printf("%s: firmware handshake timed out\n",
2196: sc->bge_dev.dv_xname);
2197:
2198: /*
2199: * XXX Wait for the value of the PCISTATE register to
2200: * return to its original pre-reset state. This is a
2201: * fairly good indicator of reset completion. If we don't
2202: * wait for the reset to fully complete, trying to read
2203: * from the device's non-PCI registers may yield garbage
2204: * results.
2205: */
2206: for (i = 0; i < BGE_TIMEOUT; i++) {
2207: new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag,
2208: BGE_PCI_PCISTATE);
2209: if ((new_pcistate & ~BGE_PCISTATE_RESERVED) ==
2210: (pcistate & ~BGE_PCISTATE_RESERVED))
2211: break;
2212: DELAY(10);
2213: }
2214: if ((new_pcistate & ~BGE_PCISTATE_RESERVED) !=
2215: (pcistate & ~BGE_PCISTATE_RESERVED)) {
2216: DPRINTFN(5, ("%s: pcistate failed to revert\n",
2217: sc->bge_dev.dv_xname));
2218: }
2219:
2220: /* Fix up byte swapping */
2221: CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
2222:
2223: CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2224:
2225: /*
2226: * The 5704 in TBI mode apparently needs some special
2227: * adjustment to insure the SERDES drive level is set
2228: * to 1.2V.
2229: */
2230: if (sc->bge_flags & BGE_TBI &&
2231: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
2232: u_int32_t serdescfg;
2233:
2234: serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG);
2235: serdescfg = (serdescfg & ~0xFFF) | 0x880;
2236: CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg);
2237: }
2238:
2239: if (sc->bge_flags & BGE_PCIE &&
2240: sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2241: u_int32_t v;
2242:
2243: /* Enable PCI Express bug fix */
2244: v = CSR_READ_4(sc, 0x7c00);
2245: CSR_WRITE_4(sc, 0x7c00, v | (1<<25));
2246: }
2247: DELAY(10000);
2248: }
2249:
2250: /*
2251: * Frame reception handling. This is called if there's a frame
2252: * on the receive return list.
2253: *
2254: * Note: we have to be able to handle two possibilities here:
2255: * 1) the frame is from the Jumbo receive ring
2256: * 2) the frame is from the standard receive ring
2257: */
2258:
2259: void
2260: bge_rxeof(struct bge_softc *sc)
2261: {
2262: struct ifnet *ifp;
2263: int stdcnt = 0, jumbocnt = 0;
2264: bus_dmamap_t dmamap;
2265: bus_addr_t offset, toff;
2266: bus_size_t tlen;
2267: int tosync;
2268:
2269: /* Nothing to do */
2270: if (sc->bge_rx_saved_considx ==
2271: sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx)
2272: return;
2273:
2274: ifp = &sc->arpcom.ac_if;
2275:
2276: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2277: offsetof(struct bge_ring_data, bge_status_block),
2278: sizeof (struct bge_status_block),
2279: BUS_DMASYNC_POSTREAD);
2280:
2281: offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
2282: tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx -
2283: sc->bge_rx_saved_considx;
2284:
2285: toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd));
2286:
2287: if (tosync < 0) {
2288: tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) *
2289: sizeof (struct bge_rx_bd);
2290: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2291: toff, tlen, BUS_DMASYNC_POSTREAD);
2292: tosync = -tosync;
2293: }
2294:
2295: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2296: offset, tosync * sizeof (struct bge_rx_bd),
2297: BUS_DMASYNC_POSTREAD);
2298:
2299: while(sc->bge_rx_saved_considx !=
2300: sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
2301: struct bge_rx_bd *cur_rx;
2302: u_int32_t rxidx;
2303: struct mbuf *m = NULL;
2304: #ifdef BGE_CHECKSUM
2305: int sumflags = 0;
2306: #endif
2307:
2308: cur_rx = &sc->bge_rdata->
2309: bge_rx_return_ring[sc->bge_rx_saved_considx];
2310:
2311: rxidx = cur_rx->bge_idx;
2312: BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2313:
2314: if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2315: BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2316: m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2317: sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2318: jumbocnt++;
2319: if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2320: ifp->if_ierrors++;
2321: bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2322: continue;
2323: }
2324: if (bge_newbuf_jumbo(sc, sc->bge_jumbo, NULL)
2325: == ENOBUFS) {
2326: struct mbuf *m0;
2327: m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
2328: cur_rx->bge_len - ETHER_CRC_LEN +
2329: ETHER_ALIGN, 0, ifp, NULL);
2330: bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2331: if (m0 == NULL) {
2332: ifp->if_ierrors++;
2333: continue;
2334: }
2335: m_adj(m0, ETHER_ALIGN);
2336: m = m0;
2337: }
2338: } else {
2339: BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2340: m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2341: sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2342: stdcnt++;
2343: dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
2344: sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
2345: bus_dmamap_unload(sc->bge_dmatag, dmamap);
2346: if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2347: ifp->if_ierrors++;
2348: bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2349: continue;
2350: }
2351: if (bge_newbuf_std(sc, sc->bge_std,
2352: NULL, dmamap) == ENOBUFS) {
2353: ifp->if_ierrors++;
2354: bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2355: continue;
2356: }
2357: }
2358:
2359: ifp->if_ipackets++;
2360: #ifdef __STRICT_ALIGNMENT
2361: /*
2362: * The i386 allows unaligned accesses, but for other
2363: * platforms we must make sure the payload is aligned.
2364: */
2365: if (sc->bge_flags & BGE_RX_ALIGNBUG) {
2366: bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2367: cur_rx->bge_len);
2368: m->m_data += ETHER_ALIGN;
2369: }
2370: #endif
2371: m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2372: m->m_pkthdr.rcvif = ifp;
2373:
2374: #if NBPFILTER > 0
2375: /*
2376: * Handle BPF listeners. Let the BPF user see the packet.
2377: */
2378: if (ifp->if_bpf)
2379: bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
2380: #endif
2381:
2382: #ifdef BGE_CHECKSUM
2383: if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2384: sumflags |= M_IPV4_CSUM_IN_OK;
2385: else
2386: sumflags |= M_IPV4_CSUM_IN_BAD;
2387:
2388: if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2389: m->m_pkthdr.csum_data =
2390: cur_rx->bge_tcp_udp_csum;
2391: m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2392: }
2393:
2394: m->m_pkthdr.csum_flags = sumflags;
2395: sumflags = 0;
2396: #endif
2397: ether_input_mbuf(ifp, m);
2398: }
2399:
2400: CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2401: if (stdcnt)
2402: CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2403: if (jumbocnt)
2404: CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2405: }
2406:
2407: void
2408: bge_txeof(struct bge_softc *sc)
2409: {
2410: struct bge_tx_bd *cur_tx = NULL;
2411: struct ifnet *ifp;
2412: struct txdmamap_pool_entry *dma;
2413: bus_addr_t offset, toff;
2414: bus_size_t tlen;
2415: int tosync;
2416: struct mbuf *m;
2417:
2418: /* Nothing to do */
2419: if (sc->bge_tx_saved_considx ==
2420: sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx)
2421: return;
2422:
2423: ifp = &sc->arpcom.ac_if;
2424:
2425: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2426: offsetof(struct bge_ring_data, bge_status_block),
2427: sizeof (struct bge_status_block),
2428: BUS_DMASYNC_POSTREAD);
2429:
2430: offset = offsetof(struct bge_ring_data, bge_tx_ring);
2431: tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
2432: sc->bge_tx_saved_considx;
2433:
2434: toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
2435:
2436: if (tosync < 0) {
2437: tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
2438: sizeof (struct bge_tx_bd);
2439: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2440: toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2441: tosync = -tosync;
2442: }
2443:
2444: bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2445: offset, tosync * sizeof (struct bge_tx_bd),
2446: BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2447:
2448: /*
2449: * Go through our tx ring and free mbufs for those
2450: * frames that have been sent.
2451: */
2452: while (sc->bge_tx_saved_considx !=
2453: sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2454: u_int32_t idx = 0;
2455:
2456: idx = sc->bge_tx_saved_considx;
2457: cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2458: if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2459: ifp->if_opackets++;
2460: m = sc->bge_cdata.bge_tx_chain[idx];
2461: if (m != NULL) {
2462: sc->bge_cdata.bge_tx_chain[idx] = NULL;
2463: dma = sc->txdma[idx];
2464: bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
2465: dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2466: bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
2467: SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
2468: sc->txdma[idx] = NULL;
2469:
2470: m_freem(m);
2471: }
2472: sc->bge_txcnt--;
2473: BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2474: ifp->if_timer = 0;
2475: }
2476:
2477: if (cur_tx != NULL)
2478: ifp->if_flags &= ~IFF_OACTIVE;
2479: }
2480:
2481: int
2482: bge_intr(void *xsc)
2483: {
2484: struct bge_softc *sc;
2485: struct ifnet *ifp;
2486: u_int32_t statusword;
2487:
2488: sc = xsc;
2489: ifp = &sc->arpcom.ac_if;
2490:
2491: /* It is possible for the interrupt to arrive before
2492: * the status block is updated prior to the interrupt.
2493: * Reading the PCI State register will confirm whether the
2494: * interrupt is ours and will flush the status block.
2495: */
2496:
2497: /* read status word from status block */
2498: statusword = sc->bge_rdata->bge_status_block.bge_status;
2499:
2500: if ((statusword & BGE_STATFLAG_UPDATED) ||
2501: (!(CSR_READ_4(sc, BGE_PCI_PCISTATE) & BGE_PCISTATE_INTR_NOT_ACTIVE))) {
2502:
2503: /* Ack interrupt and stop others from occurring. */
2504: CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2505:
2506: /* clear status word */
2507: sc->bge_rdata->bge_status_block.bge_status = 0;
2508:
2509: if ((BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 &&
2510: sc->bge_chipid != BGE_CHIPID_BCM5700_B2) ||
2511: statusword & BGE_STATFLAG_LINKSTATE_CHANGED ||
2512: sc->bge_link_evt)
2513: bge_link_upd(sc);
2514:
2515: if (ifp->if_flags & IFF_RUNNING) {
2516: /* Check RX return ring producer/consumer */
2517: bge_rxeof(sc);
2518:
2519: /* Check TX ring producer/consumer */
2520: bge_txeof(sc);
2521: }
2522:
2523: /* Re-enable interrupts. */
2524: CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2525:
2526: if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
2527: bge_start(ifp);
2528:
2529: return (1);
2530: } else
2531: return (0);
2532: }
2533:
2534: void
2535: bge_tick(void *xsc)
2536: {
2537: struct bge_softc *sc = xsc;
2538: struct mii_data *mii = &sc->bge_mii;
2539: int s;
2540:
2541: s = splnet();
2542:
2543: if (BGE_IS_5705_OR_BEYOND(sc))
2544: bge_stats_update_regs(sc);
2545: else
2546: bge_stats_update(sc);
2547:
2548: if (sc->bge_flags & BGE_TBI) {
2549: /*
2550: * Since in TBI mode auto-polling can't be used we should poll
2551: * link status manually. Here we register pending link event
2552: * and trigger interrupt.
2553: */
2554: sc->bge_link_evt++;
2555: BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET);
2556: } else
2557: mii_tick(mii);
2558:
2559: timeout_add(&sc->bge_timeout, hz);
2560:
2561: splx(s);
2562: }
2563:
2564: void
2565: bge_stats_update_regs(struct bge_softc *sc)
2566: {
2567: struct ifnet *ifp;
2568: struct bge_mac_stats_regs stats;
2569: u_int32_t *s;
2570: u_long cnt;
2571: int i;
2572:
2573: ifp = &sc->arpcom.ac_if;
2574:
2575: s = (u_int32_t *)&stats;
2576: for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2577: *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2578: s++;
2579: }
2580:
2581: cnt = stats.dot3StatsSingleCollisionFrames +
2582: stats.dot3StatsMultipleCollisionFrames +
2583: stats.dot3StatsExcessiveCollisions +
2584: stats.dot3StatsLateCollisions;
2585: ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2586: cnt - sc->bge_tx_collisions : cnt;
2587: sc->bge_tx_collisions = cnt;
2588: }
2589:
2590: void
2591: bge_stats_update(struct bge_softc *sc)
2592: {
2593: struct ifnet *ifp = &sc->arpcom.ac_if;
2594: bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2595: u_long cnt;
2596:
2597: #define READ_STAT(sc, stats, stat) \
2598: CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2599:
2600: cnt = READ_STAT(sc, stats,
2601: txstats.dot3StatsSingleCollisionFrames.bge_addr_lo);
2602: cnt += READ_STAT(sc, stats,
2603: txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo);
2604: cnt += READ_STAT(sc, stats,
2605: txstats.dot3StatsExcessiveCollisions.bge_addr_lo);
2606: cnt += READ_STAT(sc, stats,
2607: txstats.dot3StatsLateCollisions.bge_addr_lo);
2608: ifp->if_collisions += cnt >= sc->bge_tx_collisions ?
2609: cnt - sc->bge_tx_collisions : cnt;
2610: sc->bge_tx_collisions = cnt;
2611:
2612: cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
2613: ifp->if_ierrors += cnt >= sc->bge_rx_discards ?
2614: cnt - sc->bge_rx_discards : cnt;
2615: sc->bge_rx_discards = cnt;
2616:
2617: cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
2618: ifp->if_oerrors += cnt >= sc->bge_tx_discards ?
2619: cnt - sc->bge_tx_discards : cnt;
2620: sc->bge_tx_discards = cnt;
2621:
2622: #undef READ_STAT
2623: }
2624:
2625: /*
2626: * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
2627: */
2628: int
2629: bge_compact_dma_runt(struct mbuf *pkt)
2630: {
2631: struct mbuf *m, *prev, *n = NULL;
2632: int totlen, prevlen, newprevlen;
2633:
2634: prev = NULL;
2635: totlen = 0;
2636: prevlen = -1;
2637:
2638: for (m = pkt; m != NULL; prev = m,m = m->m_next) {
2639: int mlen = m->m_len;
2640: int shortfall = 8 - mlen ;
2641:
2642: totlen += mlen;
2643: if (mlen == 0)
2644: continue;
2645: if (mlen >= 8)
2646: continue;
2647:
2648: /* If we get here, mbuf data is too small for DMA engine.
2649: * Try to fix by shuffling data to prev or next in chain.
2650: * If that fails, do a compacting deep-copy of the whole chain.
2651: */
2652:
2653: /* Internal frag. If fits in prev, copy it there. */
2654: if (prev && M_TRAILINGSPACE(prev) >= m->m_len) {
2655: bcopy(m->m_data,
2656: prev->m_data+prev->m_len,
2657: mlen);
2658: prev->m_len += mlen;
2659: m->m_len = 0;
2660: /* XXX stitch chain */
2661: prev->m_next = m_free(m);
2662: m = prev;
2663: continue;
2664: } else if (m->m_next != NULL &&
2665: M_TRAILINGSPACE(m) >= shortfall &&
2666: m->m_next->m_len >= (8 + shortfall)) {
2667: /* m is writable and have enough data in next, pull up. */
2668:
2669: bcopy(m->m_next->m_data,
2670: m->m_data+m->m_len,
2671: shortfall);
2672: m->m_len += shortfall;
2673: m->m_next->m_len -= shortfall;
2674: m->m_next->m_data += shortfall;
2675: } else if (m->m_next == NULL || 1) {
2676: /* Got a runt at the very end of the packet.
2677: * borrow data from the tail of the preceding mbuf and
2678: * update its length in-place. (The original data is still
2679: * valid, so we can do this even if prev is not writable.)
2680: */
2681:
2682: /* if we'd make prev a runt, just move all of its data. */
2683: #ifdef DEBUG
2684: KASSERT(prev != NULL /*, ("runt but null PREV")*/);
2685: KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
2686: #endif
2687: if ((prev->m_len - shortfall) < 8)
2688: shortfall = prev->m_len;
2689:
2690: newprevlen = prev->m_len - shortfall;
2691:
2692: MGET(n, M_NOWAIT, MT_DATA);
2693: if (n == NULL)
2694: return (ENOBUFS);
2695: KASSERT(m->m_len + shortfall < MLEN
2696: /*,
2697: ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
2698:
2699: /* first copy the data we're stealing from prev */
2700: bcopy(prev->m_data + newprevlen, n->m_data, shortfall);
2701:
2702: /* update prev->m_len accordingly */
2703: prev->m_len -= shortfall;
2704:
2705: /* copy data from runt m */
2706: bcopy(m->m_data, n->m_data + shortfall, m->m_len);
2707:
2708: /* n holds what we stole from prev, plus m */
2709: n->m_len = shortfall + m->m_len;
2710:
2711: /* stitch n into chain and free m */
2712: n->m_next = m->m_next;
2713: prev->m_next = n;
2714: /* KASSERT(m->m_next == NULL); */
2715: m->m_next = NULL;
2716: m_free(m);
2717: m = n; /* for continuing loop */
2718: }
2719: prevlen = m->m_len;
2720: }
2721: return (0);
2722: }
2723:
2724: /*
2725: * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2726: * pointers to descriptors.
2727: */
2728: int
2729: bge_encap(struct bge_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
2730: {
2731: struct bge_tx_bd *f = NULL;
2732: u_int32_t frag, cur;
2733: u_int16_t csum_flags = 0;
2734: struct txdmamap_pool_entry *dma;
2735: bus_dmamap_t dmamap;
2736: int i = 0;
2737: #if NVLAN > 0
2738: struct ifvlan *ifv = NULL;
2739:
2740: if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2741: m_head->m_pkthdr.rcvif != NULL)
2742: ifv = m_head->m_pkthdr.rcvif->if_softc;
2743: #endif
2744:
2745: cur = frag = *txidx;
2746:
2747: #ifdef BGE_CHECKSUM
2748: if (m_head->m_pkthdr.csum_flags) {
2749: if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
2750: csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2751: if (m_head->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT |
2752: M_UDPV4_CSUM_OUT))
2753: csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2754: #ifdef fake
2755: if (m_head->m_flags & M_LASTFRAG)
2756: csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2757: else if (m_head->m_flags & M_FRAG)
2758: csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2759: #endif
2760: }
2761: #endif
2762: if (!(BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX))
2763: goto doit;
2764:
2765: /*
2766: * bcm5700 Revision B silicon cannot handle DMA descriptors with
2767: * less than eight bytes. If we encounter a teeny mbuf
2768: * at the end of a chain, we can pad. Otherwise, copy.
2769: */
2770: if (bge_compact_dma_runt(m_head) != 0)
2771: return (ENOBUFS);
2772:
2773: doit:
2774: dma = SLIST_FIRST(&sc->txdma_list);
2775: if (dma == NULL)
2776: return (ENOBUFS);
2777: dmamap = dma->dmamap;
2778:
2779: /*
2780: * Start packing the mbufs in this chain into
2781: * the fragment pointers. Stop when we run out
2782: * of fragments or hit the end of the mbuf chain.
2783: */
2784: if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
2785: BUS_DMA_NOWAIT))
2786: return (ENOBUFS);
2787:
2788: /*
2789: * Sanity check: avoid coming within 16 descriptors
2790: * of the end of the ring.
2791: */
2792: if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16))
2793: goto fail_unload;
2794:
2795: for (i = 0; i < dmamap->dm_nsegs; i++) {
2796: f = &sc->bge_rdata->bge_tx_ring[frag];
2797: if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2798: break;
2799: BGE_HOSTADDR(f->bge_addr, dmamap->dm_segs[i].ds_addr);
2800: f->bge_len = dmamap->dm_segs[i].ds_len;
2801: f->bge_flags = csum_flags;
2802: #if NVLAN > 0
2803: if (ifv != NULL) {
2804: f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2805: f->bge_vlan_tag = ifv->ifv_tag;
2806: } else {
2807: f->bge_vlan_tag = 0;
2808: }
2809: #endif
2810: cur = frag;
2811: BGE_INC(frag, BGE_TX_RING_CNT);
2812: }
2813:
2814: if (i < dmamap->dm_nsegs)
2815: goto fail_unload;
2816:
2817: bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
2818: BUS_DMASYNC_PREWRITE);
2819:
2820: if (frag == sc->bge_tx_saved_considx)
2821: goto fail_unload;
2822:
2823: sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
2824: sc->bge_cdata.bge_tx_chain[cur] = m_head;
2825: SLIST_REMOVE_HEAD(&sc->txdma_list, link);
2826: sc->txdma[cur] = dma;
2827: sc->bge_txcnt += dmamap->dm_nsegs;
2828:
2829: *txidx = frag;
2830:
2831: return (0);
2832:
2833: fail_unload:
2834: bus_dmamap_unload(sc->bge_dmatag, dmamap);
2835:
2836: return (ENOBUFS);
2837: }
2838:
2839: /*
2840: * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2841: * to the mbuf data regions directly in the transmit descriptors.
2842: */
2843: void
2844: bge_start(struct ifnet *ifp)
2845: {
2846: struct bge_softc *sc;
2847: struct mbuf *m_head = NULL;
2848: u_int32_t prodidx;
2849: int pkts = 0;
2850:
2851: sc = ifp->if_softc;
2852:
2853: if (!sc->bge_link || IFQ_IS_EMPTY(&ifp->if_snd))
2854: return;
2855:
2856: prodidx = sc->bge_tx_prodidx;
2857:
2858: while (sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2859: IFQ_POLL(&ifp->if_snd, m_head);
2860: if (m_head == NULL)
2861: break;
2862:
2863: /*
2864: * Pack the data into the transmit ring. If we
2865: * don't have room, set the OACTIVE flag and wait
2866: * for the NIC to drain the ring.
2867: */
2868: if (bge_encap(sc, m_head, &prodidx)) {
2869: ifp->if_flags |= IFF_OACTIVE;
2870: break;
2871: }
2872:
2873: /* now we are committed to transmit the packet */
2874: IFQ_DEQUEUE(&ifp->if_snd, m_head);
2875: pkts++;
2876:
2877: #if NBPFILTER > 0
2878: /*
2879: * If there's a BPF listener, bounce a copy of this frame
2880: * to him.
2881: */
2882: if (ifp->if_bpf)
2883: bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
2884: #endif
2885: }
2886: if (pkts == 0)
2887: return;
2888:
2889: /* Transmit */
2890: CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2891: if (BGE_CHIPREV(sc->bge_chipid) == BGE_CHIPREV_5700_BX)
2892: CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2893:
2894: sc->bge_tx_prodidx = prodidx;
2895:
2896: /*
2897: * Set a timeout in case the chip goes out to lunch.
2898: */
2899: ifp->if_timer = 5;
2900: }
2901:
2902: void
2903: bge_init(void *xsc)
2904: {
2905: struct bge_softc *sc = xsc;
2906: struct ifnet *ifp;
2907: u_int16_t *m;
2908: int s;
2909:
2910: s = splnet();
2911:
2912: ifp = &sc->arpcom.ac_if;
2913:
2914: /* Cancel pending I/O and flush buffers. */
2915: bge_stop(sc);
2916: bge_reset(sc);
2917: bge_chipinit(sc);
2918:
2919: /*
2920: * Init the various state machines, ring
2921: * control blocks and firmware.
2922: */
2923: if (bge_blockinit(sc)) {
2924: printf("%s: initialization failure\n", sc->bge_dev.dv_xname);
2925: splx(s);
2926: return;
2927: }
2928:
2929: ifp = &sc->arpcom.ac_if;
2930:
2931: /* Specify MRU. */
2932: if (BGE_IS_JUMBO_CAPABLE(sc))
2933: CSR_WRITE_4(sc, BGE_RX_MTU,
2934: BGE_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN);
2935: else
2936: CSR_WRITE_4(sc, BGE_RX_MTU,
2937: ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
2938:
2939: /* Load our MAC address. */
2940: m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
2941: CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2942: CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2943:
2944: /* Disable hardware decapsulation of vlan frames. */
2945: BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
2946:
2947: /* Program promiscuous mode and multicast filters. */
2948: bge_iff(sc);
2949:
2950: /* Init RX ring. */
2951: bge_init_rx_ring_std(sc);
2952:
2953: /*
2954: * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2955: * memory to insure that the chip has in fact read the first
2956: * entry of the ring.
2957: */
2958: if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
2959: u_int32_t v, i;
2960: for (i = 0; i < 10; i++) {
2961: DELAY(20);
2962: v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2963: if (v == (MCLBYTES - ETHER_ALIGN))
2964: break;
2965: }
2966: if (i == 10)
2967: printf("%s: 5705 A0 chip failed to load RX ring\n",
2968: sc->bge_dev.dv_xname);
2969: }
2970:
2971: /* Init Jumbo RX ring. */
2972: if (BGE_IS_JUMBO_CAPABLE(sc))
2973: bge_init_rx_ring_jumbo(sc);
2974:
2975: /* Init our RX return ring index */
2976: sc->bge_rx_saved_considx = 0;
2977:
2978: /* Init TX ring. */
2979: bge_init_tx_ring(sc);
2980:
2981: /* Turn on transmitter */
2982: BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2983:
2984: /* Turn on receiver */
2985: BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2986:
2987: CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
2988:
2989: /* Tell firmware we're alive. */
2990: BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2991:
2992: /* Enable host interrupts. */
2993: BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2994: BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2995: CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2996:
2997: bge_ifmedia_upd(ifp);
2998:
2999: ifp->if_flags |= IFF_RUNNING;
3000: ifp->if_flags &= ~IFF_OACTIVE;
3001:
3002: splx(s);
3003:
3004: timeout_add(&sc->bge_timeout, hz);
3005: }
3006:
3007: /*
3008: * Set media options.
3009: */
3010: int
3011: bge_ifmedia_upd(struct ifnet *ifp)
3012: {
3013: struct bge_softc *sc = ifp->if_softc;
3014: struct mii_data *mii = &sc->bge_mii;
3015: struct ifmedia *ifm = &sc->bge_ifmedia;
3016:
3017: /* If this is a 1000baseX NIC, enable the TBI port. */
3018: if (sc->bge_flags & BGE_TBI) {
3019: if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3020: return (EINVAL);
3021: switch(IFM_SUBTYPE(ifm->ifm_media)) {
3022: case IFM_AUTO:
3023: /*
3024: * The BCM5704 ASIC appears to have a special
3025: * mechanism for programming the autoneg
3026: * advertisement registers in TBI mode.
3027: */
3028: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
3029: u_int32_t sgdig;
3030: CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0);
3031: sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG);
3032: sgdig |= BGE_SGDIGCFG_AUTO|
3033: BGE_SGDIGCFG_PAUSE_CAP|
3034: BGE_SGDIGCFG_ASYM_PAUSE;
3035: CSR_WRITE_4(sc, BGE_SGDIG_CFG,
3036: sgdig|BGE_SGDIGCFG_SEND);
3037: DELAY(5);
3038: CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig);
3039: }
3040: break;
3041: case IFM_1000_SX:
3042: if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3043: BGE_CLRBIT(sc, BGE_MAC_MODE,
3044: BGE_MACMODE_HALF_DUPLEX);
3045: } else {
3046: BGE_SETBIT(sc, BGE_MAC_MODE,
3047: BGE_MACMODE_HALF_DUPLEX);
3048: }
3049: break;
3050: default:
3051: return (EINVAL);
3052: }
3053: /* XXX 802.3x flow control for 1000BASE-SX */
3054: return (0);
3055: }
3056:
3057: sc->bge_link_evt++;
3058: if (mii->mii_instance) {
3059: struct mii_softc *miisc;
3060: LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3061: mii_phy_reset(miisc);
3062: }
3063: mii_mediachg(mii);
3064:
3065: return (0);
3066: }
3067:
3068: /*
3069: * Report current media status.
3070: */
3071: void
3072: bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3073: {
3074: struct bge_softc *sc = ifp->if_softc;
3075: struct mii_data *mii = &sc->bge_mii;
3076:
3077: if (sc->bge_flags & BGE_TBI) {
3078: ifmr->ifm_status = IFM_AVALID;
3079: ifmr->ifm_active = IFM_ETHER;
3080: if (CSR_READ_4(sc, BGE_MAC_STS) &
3081: BGE_MACSTAT_TBI_PCS_SYNCHED) {
3082: ifmr->ifm_status |= IFM_ACTIVE;
3083: } else {
3084: ifmr->ifm_active |= IFM_NONE;
3085: return;
3086: }
3087: ifmr->ifm_active |= IFM_1000_SX;
3088: if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3089: ifmr->ifm_active |= IFM_HDX;
3090: else
3091: ifmr->ifm_active |= IFM_FDX;
3092: return;
3093: }
3094:
3095: mii_pollstat(mii);
3096: ifmr->ifm_status = mii->mii_media_status;
3097: ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
3098: sc->bge_flowflags;
3099: }
3100:
3101: int
3102: bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3103: {
3104: struct bge_softc *sc = ifp->if_softc;
3105: struct ifreq *ifr = (struct ifreq *) data;
3106: struct ifaddr *ifa = (struct ifaddr *)data;
3107: int s, error = 0;
3108: struct mii_data *mii;
3109:
3110: s = splnet();
3111:
3112: if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
3113: splx(s);
3114: return (error);
3115: }
3116:
3117: switch(command) {
3118: case SIOCSIFADDR:
3119: ifp->if_flags |= IFF_UP;
3120: if (!(ifp->if_flags & IFF_RUNNING))
3121: bge_init(sc);
3122: #ifdef INET
3123: if (ifa->ifa_addr->sa_family == AF_INET)
3124: arp_ifinit(&sc->arpcom, ifa);
3125: #endif /* INET */
3126: break;
3127: case SIOCSIFMTU:
3128: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
3129: error = EINVAL;
3130: else if (ifp->if_mtu != ifr->ifr_mtu)
3131: ifp->if_mtu = ifr->ifr_mtu;
3132: break;
3133: case SIOCSIFFLAGS:
3134: if (ifp->if_flags & IFF_UP) {
3135: if (ifp->if_flags & IFF_RUNNING)
3136: bge_iff(sc);
3137: else
3138: bge_init(sc);
3139: } else {
3140: if (ifp->if_flags & IFF_RUNNING)
3141: bge_stop(sc);
3142: }
3143: sc->bge_if_flags = ifp->if_flags;
3144: break;
3145: case SIOCADDMULTI:
3146: case SIOCDELMULTI:
3147: error = (command == SIOCADDMULTI)
3148: ? ether_addmulti(ifr, &sc->arpcom)
3149: : ether_delmulti(ifr, &sc->arpcom);
3150:
3151: if (error == ENETRESET) {
3152: if (ifp->if_flags & IFF_RUNNING)
3153: bge_iff(sc);
3154: error = 0;
3155: }
3156: break;
3157: case SIOCSIFMEDIA:
3158: /* XXX Flow control is not supported for 1000BASE-SX */
3159: if (sc->bge_flags & BGE_TBI) {
3160: ifr->ifr_media &= ~IFM_ETH_FMASK;
3161: sc->bge_flowflags = 0;
3162: }
3163:
3164: /* Flow control requires full-duplex mode. */
3165: if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3166: (ifr->ifr_media & IFM_FDX) == 0) {
3167: ifr->ifr_media &= ~IFM_ETH_FMASK;
3168: }
3169: if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3170: if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3171: /* We can do both TXPAUSE and RXPAUSE. */
3172: ifr->ifr_media |=
3173: IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3174: }
3175: sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3176: }
3177: /* FALLTHROUGH */
3178: case SIOCGIFMEDIA:
3179: if (sc->bge_flags & BGE_TBI) {
3180: error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
3181: command);
3182: } else {
3183: mii = &sc->bge_mii;
3184: error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
3185: command);
3186: }
3187: break;
3188: default:
3189: error = ENOTTY;
3190: break;
3191: }
3192:
3193: splx(s);
3194:
3195: return (error);
3196: }
3197:
3198: void
3199: bge_watchdog(struct ifnet *ifp)
3200: {
3201: struct bge_softc *sc;
3202:
3203: sc = ifp->if_softc;
3204:
3205: printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
3206:
3207: bge_init(sc);
3208:
3209: ifp->if_oerrors++;
3210: }
3211:
3212: void
3213: bge_stop_block(struct bge_softc *sc, bus_size_t reg, u_int32_t bit)
3214: {
3215: int i;
3216:
3217: BGE_CLRBIT(sc, reg, bit);
3218:
3219: for (i = 0; i < BGE_TIMEOUT; i++) {
3220: if ((CSR_READ_4(sc, reg) & bit) == 0)
3221: return;
3222: delay(100);
3223: }
3224:
3225: DPRINTFN(5, ("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
3226: sc->bge_dev.dv_xname, (u_long) reg, bit));
3227: }
3228:
3229: /*
3230: * Stop the adapter and free any mbufs allocated to the
3231: * RX and TX lists.
3232: */
3233: void
3234: bge_stop(struct bge_softc *sc)
3235: {
3236: struct ifnet *ifp = &sc->arpcom.ac_if;
3237: struct ifmedia_entry *ifm;
3238: struct mii_data *mii;
3239: int mtmp, itmp;
3240:
3241: timeout_del(&sc->bge_timeout);
3242:
3243: ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3244:
3245: /*
3246: * Disable all of the receiver blocks
3247: */
3248: bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3249: bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3250: bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3251: if (!(BGE_IS_5705_OR_BEYOND(sc)))
3252: bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3253: bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3254: bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3255: bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3256:
3257: /*
3258: * Disable all of the transmit blocks
3259: */
3260: bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3261: bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3262: bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3263: bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3264: bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3265: if (!(BGE_IS_5705_OR_BEYOND(sc)))
3266: bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3267: bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3268:
3269: /*
3270: * Shut down all of the memory managers and related
3271: * state machines.
3272: */
3273: bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3274: bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3275: if (!(BGE_IS_5705_OR_BEYOND(sc)))
3276: bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3277:
3278: CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3279: CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3280:
3281: if (!(BGE_IS_5705_OR_BEYOND(sc))) {
3282: bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3283: bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3284: }
3285:
3286: /* Disable host interrupts. */
3287: BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3288: CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3289:
3290: /*
3291: * Tell firmware we're shutting down.
3292: */
3293: BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3294:
3295: /* Free the RX lists. */
3296: bge_free_rx_ring_std(sc);
3297:
3298: /* Free jumbo RX list. */
3299: if (BGE_IS_JUMBO_CAPABLE(sc))
3300: bge_free_rx_ring_jumbo(sc);
3301:
3302: /* Free TX buffers. */
3303: bge_free_tx_ring(sc);
3304:
3305: /*
3306: * Isolate/power down the PHY, but leave the media selection
3307: * unchanged so that things will be put back to normal when
3308: * we bring the interface back up.
3309: */
3310: if (!(sc->bge_flags & BGE_TBI)) {
3311: mii = &sc->bge_mii;
3312: itmp = ifp->if_flags;
3313: ifp->if_flags |= IFF_UP;
3314: ifm = mii->mii_media.ifm_cur;
3315: mtmp = ifm->ifm_media;
3316: ifm->ifm_media = IFM_ETHER|IFM_NONE;
3317: mii_mediachg(mii);
3318: ifm->ifm_media = mtmp;
3319: ifp->if_flags = itmp;
3320: }
3321:
3322: sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3323:
3324: /*
3325: * We can't just call bge_link_upd() cause chip is almost stopped so
3326: * bge_link_upd -> bge_tick_locked -> bge_stats_update sequence may
3327: * lead to hardware deadlock. So we just clearing MAC's link state
3328: * (PHY may still have link UP).
3329: */
3330: sc->bge_link = 0;
3331: }
3332:
3333: /*
3334: * Stop all chip I/O so that the kernel's probe routines don't
3335: * get confused by errant DMAs when rebooting.
3336: */
3337: void
3338: bge_shutdown(void *xsc)
3339: {
3340: struct bge_softc *sc = (struct bge_softc *)xsc;
3341:
3342: bge_stop(sc);
3343: bge_reset(sc);
3344: }
3345:
3346: void
3347: bge_link_upd(struct bge_softc *sc)
3348: {
3349: struct ifnet *ifp = &sc->arpcom.ac_if;
3350: struct mii_data *mii = &sc->bge_mii;
3351: u_int32_t link, status;
3352:
3353: /* Clear 'pending link event' flag */
3354: sc->bge_link_evt = 0;
3355:
3356: /*
3357: * Process link state changes.
3358: * Grrr. The link status word in the status block does
3359: * not work correctly on the BCM5700 rev AX and BX chips,
3360: * according to all available information. Hence, we have
3361: * to enable MII interrupts in order to properly obtain
3362: * async link changes. Unfortunately, this also means that
3363: * we have to read the MAC status register to detect link
3364: * changes, thereby adding an additional register access to
3365: * the interrupt handler.
3366: *
3367: * XXX: perhaps link state detection procedure used for
3368: * BGE_CHIPID_BCM5700_B2 can be used for other BCM5700 revisions.
3369: */
3370:
3371: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700 &&
3372: sc->bge_chipid != BGE_CHIPID_BCM5700_B2) {
3373: status = CSR_READ_4(sc, BGE_MAC_STS);
3374: if (status & BGE_MACSTAT_MI_INTERRUPT) {
3375: timeout_del(&sc->bge_timeout);
3376: bge_tick(sc);
3377:
3378: if (!sc->bge_link &&
3379: mii->mii_media_status & IFM_ACTIVE &&
3380: IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3381: sc->bge_link++;
3382: } else if (sc->bge_link &&
3383: (!(mii->mii_media_status & IFM_ACTIVE) ||
3384: IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) {
3385: sc->bge_link = 0;
3386: }
3387:
3388: /* Clear the interrupt */
3389: CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3390: BGE_EVTENB_MI_INTERRUPT);
3391: bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR);
3392: bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR,
3393: BRGPHY_INTRS);
3394: }
3395: return;
3396: }
3397:
3398: if (sc->bge_flags & BGE_TBI) {
3399: status = CSR_READ_4(sc, BGE_MAC_STS);
3400: if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) {
3401: if (!sc->bge_link) {
3402: sc->bge_link++;
3403: if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704)
3404: BGE_CLRBIT(sc, BGE_MAC_MODE,
3405: BGE_MACMODE_TBI_SEND_CFGS);
3406: CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3407: status = CSR_READ_4(sc, BGE_MAC_MODE);
3408: ifp->if_link_state =
3409: (status & BGE_MACMODE_HALF_DUPLEX) ?
3410: LINK_STATE_HALF_DUPLEX :
3411: LINK_STATE_FULL_DUPLEX;
3412: if_link_state_change(ifp);
3413: }
3414: } else if (sc->bge_link) {
3415: sc->bge_link = 0;
3416: ifp->if_link_state = LINK_STATE_DOWN;
3417: if_link_state_change(ifp);
3418: }
3419: /* Discard link events for MII/GMII cards if MI auto-polling disabled */
3420: } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) {
3421: /*
3422: * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit
3423: * in status word always set. Workaround this bug by reading
3424: * PHY link status directly.
3425: */
3426: link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0;
3427:
3428: if (link != sc->bge_link ||
3429: BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5700) {
3430: timeout_del(&sc->bge_timeout);
3431: bge_tick(sc);
3432:
3433: if (!sc->bge_link &&
3434: mii->mii_media_status & IFM_ACTIVE &&
3435: IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
3436: sc->bge_link++;
3437: else if (sc->bge_link &&
3438: (!(mii->mii_media_status & IFM_ACTIVE) ||
3439: IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE))
3440: sc->bge_link = 0;
3441: }
3442: }
3443:
3444: /* Clear the attention */
3445: CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3446: BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3447: BGE_MACSTAT_LINK_CHANGED);
3448: }
3449:
3450: void
3451: bge_power(int why, void *xsc)
3452: {
3453: struct bge_softc *sc = (struct bge_softc *)xsc;
3454: struct ifnet *ifp;
3455:
3456: if (why == PWR_RESUME) {
3457: ifp = &sc->arpcom.ac_if;
3458: if (ifp->if_flags & IFF_UP) {
3459: bge_init(xsc);
3460: if (ifp->if_flags & IFF_RUNNING)
3461: bge_start(ifp);
3462: }
3463: }
3464: }
CVSweb