Annotation of sys/dev/pci/if_lmc.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: if_lmc.c,v 1.23 2006/05/13 19:10:02 brad Exp $ */
2: /* $NetBSD: if_lmc.c,v 1.1 1999/03/25 03:32:43 explorer Exp $ */
3:
4: /*-
5: * Copyright (c) 1997-1999 LAN Media Corporation (LMC)
6: * All rights reserved. www.lanmedia.com
7: *
8: * This code is written by Michael Graff <graff@vix.com> for LMC.
9: * The code is derived from permitted modifications to software created
10: * by Matt Thomas (matt@3am-software.com).
11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above
18: * copyright notice, this list of conditions and the following disclaimer
19: * in the documentation and/or other materials provided with the
20: * distribution.
21: * 3. All marketing or advertising materials mentioning features or
22: * use of this software must display the following acknowledgement:
23: * This product includes software developed by LAN Media Corporation
24: * and its contributors.
25: * 4. Neither the name of LAN Media Corporation nor the names of its
26: * contributors may be used to endorse or promote products derived
27: * from this software without specific prior written permission.
28: *
29: * THIS SOFTWARE IS PROVIDED BY LAN MEDIA CORPORATION AND CONTRIBUTORS
30: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
39: * THE POSSIBILITY OF SUCH DAMAGE.
40: */
41:
42: /*-
43: * Copyright (c) 1994-1997 Matt Thomas (matt@3am-software.com)
44: * All rights reserved.
45: *
46: * Redistribution and use in source and binary forms, with or without
47: * modification, are permitted provided that the following conditions
48: * are met:
49: * 1. Redistributions of source code must retain the above copyright
50: * notice, this list of conditions and the following disclaimer.
51: * 2. The name of the author may not be used to endorse or promote products
52: * derived from this software without specific prior written permission
53: *
54: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64: */
65:
66: /*
67: * LMC1200 (DS1) & LMC5245 (DS3) LED definitions
68: * led0 yellow = far-end link is in Red alarm condition
69: * led1 blue = received an Alarm Indication signal (upstream failure)
70: * led2 Green = power to adapter, Gate Array loaded & driver attached
71: * led3 red = Loss of Signal (LOS) or out of frame (OOF) conditions
72: * detected on T3 receive signal
73: *
74: * LMC1000 (SSI) & LMC5200 (HSSI) LED definitions
75: * led0 Green = power to adapter, Gate Array loaded & driver attached
76: * led1 Green = DSR and DTR and RTS and CTS are set (CA, TA for LMC5200)
77: * led2 Green = Cable detected (Green indicates non-loopback mode for LMC5200)
78: * led3 red = No timing is available from the cable or the on-board
79: * frequency generator. (ST not available for LMC5200)
80: */
81:
82: #include "bpfilter.h"
83:
84: #include <sys/param.h>
85: #include <sys/systm.h>
86: #include <sys/mbuf.h>
87: #include <sys/socket.h>
88: #include <sys/ioctl.h>
89: #include <sys/errno.h>
90: #include <sys/malloc.h>
91: #include <sys/kernel.h>
92: #include <sys/proc.h>
93: #include <sys/device.h>
94:
95: #include <dev/pci/pcidevs.h>
96:
97: #include <net/if.h>
98: #include <net/if_types.h>
99: #include <net/if_dl.h>
100: #include <net/netisr.h>
101:
102: #if NBPFILTER > 0
103: #include <net/bpf.h>
104: #endif
105:
106: #include <net/if_sppp.h>
107:
108: #include <machine/bus.h>
109:
110: #include <dev/pci/pcireg.h>
111: #include <dev/pci/pcivar.h>
112: #include <dev/ic/dc21040reg.h>
113:
114: #include <dev/pci/if_lmc_types.h>
115: #include <dev/pci/if_lmcioctl.h>
116: #include <dev/pci/if_lmcvar.h>
117:
118: /*
119: * This module supports
120: * the DEC 21140A pass 2.2 PCI Fast Ethernet Controller.
121: */
122: static ifnet_ret_t lmc_ifstart_one(struct ifnet *ifp);
123: static ifnet_ret_t lmc_ifstart(struct ifnet *ifp);
124: static struct mbuf *lmc_txput(lmc_softc_t * const sc, struct mbuf *m);
125: static void lmc_rx_intr(lmc_softc_t * const sc);
126:
127: static void lmc_watchdog(struct ifnet *ifp);
128: static void lmc_ifup(lmc_softc_t * const sc);
129: static void lmc_ifdown(lmc_softc_t * const sc);
130:
131: /*
132: * Code the read the SROM and MII bit streams (I2C)
133: */
134: static inline void
135: lmc_delay_300ns(lmc_softc_t * const sc)
136: {
137: int idx;
138: for (idx = (300 / 33) + 1; idx > 0; idx--)
139: (void)LMC_CSR_READ(sc, csr_busmode);
140: }
141:
142: #define EMIT \
143: do { \
144: LMC_CSR_WRITE(sc, csr_srom_mii, csr); \
145: lmc_delay_300ns(sc); \
146: } while (0)
147:
148: static inline void
149: lmc_srom_idle(lmc_softc_t * const sc)
150: {
151: unsigned bit, csr;
152:
153: csr = SROMSEL ; EMIT;
154: csr = SROMSEL | SROMRD; EMIT;
155: csr ^= SROMCS; EMIT;
156: csr ^= SROMCLKON; EMIT;
157:
158: /*
159: * Write 25 cycles of 0 which will force the SROM to be idle.
160: */
161: for (bit = 3 + SROM_BITWIDTH + 16; bit > 0; bit--) {
162: csr ^= SROMCLKOFF; EMIT; /* clock low; data not valid */
163: csr ^= SROMCLKON; EMIT; /* clock high; data valid */
164: }
165: csr ^= SROMCLKOFF; EMIT;
166: csr ^= SROMCS; EMIT;
167: csr = 0; EMIT;
168: }
169:
170:
171: static void
172: lmc_srom_read(lmc_softc_t * const sc)
173: {
174: unsigned idx;
175: const unsigned bitwidth = SROM_BITWIDTH;
176: const unsigned cmdmask = (SROMCMD_RD << bitwidth);
177: const unsigned msb = 1 << (bitwidth + 3 - 1);
178: unsigned lastidx = (1 << bitwidth) - 1;
179:
180: lmc_srom_idle(sc);
181:
182: for (idx = 0; idx <= lastidx; idx++) {
183: unsigned lastbit, data, bits, bit, csr;
184: csr = SROMSEL ; EMIT;
185: csr = SROMSEL | SROMRD; EMIT;
186: csr ^= SROMCSON; EMIT;
187: csr ^= SROMCLKON; EMIT;
188:
189: lastbit = 0;
190: for (bits = idx|cmdmask, bit = bitwidth + 3
191: ; bit > 0
192: ; bit--, bits <<= 1) {
193: const unsigned thisbit = bits & msb;
194: csr ^= SROMCLKOFF; EMIT; /* clock L data invalid */
195: if (thisbit != lastbit) {
196: csr ^= SROMDOUT; EMIT;/* clock L invert data */
197: } else {
198: EMIT;
199: }
200: csr ^= SROMCLKON; EMIT; /* clock H data valid */
201: lastbit = thisbit;
202: }
203: csr ^= SROMCLKOFF; EMIT;
204:
205: for (data = 0, bits = 0; bits < 16; bits++) {
206: data <<= 1;
207: csr ^= SROMCLKON; EMIT; /* clock H data valid */
208: data |= LMC_CSR_READ(sc, csr_srom_mii) & SROMDIN ? 1 : 0;
209: csr ^= SROMCLKOFF; EMIT; /* clock L data invalid */
210: }
211: sc->lmc_rombuf[idx*2] = data & 0xFF;
212: sc->lmc_rombuf[idx*2+1] = data >> 8;
213: csr = SROMSEL | SROMRD; EMIT;
214: csr = 0; EMIT;
215: }
216: lmc_srom_idle(sc);
217: }
218:
219: #define MII_EMIT do { LMC_CSR_WRITE(sc, csr_srom_mii, csr); lmc_delay_300ns(sc); } while (0)
220:
221: static inline void
222: lmc_mii_writebits(lmc_softc_t * const sc, unsigned data, unsigned bits)
223: {
224: unsigned msb = 1 << (bits - 1);
225: unsigned csr = LMC_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK);
226: unsigned lastbit = (csr & MII_DOUT) ? msb : 0;
227:
228: csr |= MII_WR; MII_EMIT; /* clock low; assert write */
229:
230: for (; bits > 0; bits--, data <<= 1) {
231: const unsigned thisbit = data & msb;
232: if (thisbit != lastbit) {
233: csr ^= MII_DOUT; MII_EMIT; /* clock low; invert data */
234: }
235: csr ^= MII_CLKON; MII_EMIT; /* clock high; data valid */
236: lastbit = thisbit;
237: csr ^= MII_CLKOFF; MII_EMIT; /* clock low; data not valid */
238: }
239: }
240:
241: static void
242: lmc_mii_turnaround(lmc_softc_t * const sc, u_int32_t cmd)
243: {
244: u_int32_t csr;
245:
246: csr = LMC_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK);
247: if (cmd == MII_WRCMD) {
248: csr |= MII_DOUT; MII_EMIT; /* clock low; change data */
249: csr ^= MII_CLKON; MII_EMIT; /* clock high; data valid */
250: csr ^= MII_CLKOFF; MII_EMIT; /* clock low; data not valid */
251: csr ^= MII_DOUT; MII_EMIT; /* clock low; change data */
252: } else {
253: csr |= MII_RD; MII_EMIT; /* clock low; switch to read */
254: }
255: csr ^= MII_CLKON; MII_EMIT; /* clock high; data valid */
256: csr ^= MII_CLKOFF; MII_EMIT; /* clock low; data not valid */
257: }
258:
259: static u_int32_t
260: lmc_mii_readbits(lmc_softc_t * const sc)
261: {
262: u_int32_t data;
263: u_int32_t csr = LMC_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK);
264: int idx;
265:
266: for (idx = 0, data = 0; idx < 16; idx++) {
267: data <<= 1; /* this is NOOP on the first pass through */
268: csr ^= MII_CLKON; MII_EMIT; /* clock high; data valid */
269: if (LMC_CSR_READ(sc, csr_srom_mii) & MII_DIN)
270: data |= 1;
271: csr ^= MII_CLKOFF; MII_EMIT; /* clock low; data not valid */
272: }
273: csr ^= MII_RD; MII_EMIT; /* clock low; turn off read */
274:
275: return data;
276: }
277:
278: u_int32_t
279: lmc_mii_readreg(lmc_softc_t * const sc, u_int32_t devaddr, u_int32_t regno)
280: {
281: u_int32_t csr = LMC_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK);
282: u_int32_t data;
283:
284: csr &= ~(MII_RD|MII_CLK); MII_EMIT;
285: lmc_mii_writebits(sc, MII_PREAMBLE, 32);
286: lmc_mii_writebits(sc, MII_RDCMD, 8);
287: lmc_mii_writebits(sc, devaddr, 5);
288: lmc_mii_writebits(sc, regno, 5);
289: lmc_mii_turnaround(sc, MII_RDCMD);
290:
291: data = lmc_mii_readbits(sc);
292: return (data);
293: }
294:
295: void
296: lmc_mii_writereg(lmc_softc_t * const sc, u_int32_t devaddr,
297: u_int32_t regno, u_int32_t data)
298: {
299: u_int32_t csr;
300:
301: csr = LMC_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK);
302: csr &= ~(MII_RD|MII_CLK); MII_EMIT;
303: lmc_mii_writebits(sc, MII_PREAMBLE, 32);
304: lmc_mii_writebits(sc, MII_WRCMD, 8);
305: lmc_mii_writebits(sc, devaddr, 5);
306: lmc_mii_writebits(sc, regno, 5);
307: lmc_mii_turnaround(sc, MII_WRCMD);
308: lmc_mii_writebits(sc, data, 16);
309: }
310:
311: int
312: lmc_read_macaddr(lmc_softc_t * const sc)
313: {
314: lmc_srom_read(sc);
315:
316: bcopy(sc->lmc_rombuf + 20, sc->lmc_enaddr, 6);
317:
318: return 0;
319: }
320:
321: /*
322: * Check to make certain there is a signal from the modem, and flicker
323: * lights as needed.
324: */
325: static void
326: lmc_watchdog(struct ifnet *ifp)
327: {
328: lmc_softc_t * const sc = LMC_IFP_TO_SOFTC(ifp);
329: int state;
330: u_int32_t ostatus;
331: u_int32_t link_status;
332: u_int32_t ticks;
333:
334: state = 0;
335:
336: /*
337: * Make sure the tx jabber and rx watchdog are off,
338: * and the transmit and receive processes are running.
339: */
340: LMC_CSR_WRITE (sc, csr_15, 0x00000011);
341: sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN;
342: LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
343:
344: /* Is the transmit clock still available? */
345: ticks = LMC_CSR_READ (sc, csr_gp_timer);
346: ticks = 0x0000ffff - (ticks & 0x0000ffff);
347: if (ticks == 0)
348: {
349: /* no clock found ? */
350: if (sc->tx_clockState != 0)
351: {
352: sc->tx_clockState = 0;
353: if (sc->lmc_cardtype == LMC_CARDTYPE_SSI)
354: lmc_led_on (sc, LMC_MII16_LED3); /* ON red */
355: }
356: else
357: if (sc->tx_clockState == 0)
358: {
359: sc->tx_clockState = 1;
360: if (sc->lmc_cardtype == LMC_CARDTYPE_SSI)
361: lmc_led_off (sc, LMC_MII16_LED3); /* OFF red */
362: }
363: }
364:
365: link_status = sc->lmc_media->get_link_status(sc);
366: ostatus = ((sc->lmc_flags & LMC_MODEMOK) == LMC_MODEMOK);
367:
368: /*
369: * hardware level link lost, but the interface is marked as up.
370: * Mark it as down.
371: */
372: if (link_status == LMC_LINK_DOWN && ostatus) {
373: printf(LMC_PRINTF_FMT ": physical link down\n",
374: LMC_PRINTF_ARGS);
375: sc->lmc_flags &= ~LMC_MODEMOK;
376: if (sc->lmc_cardtype == LMC_CARDTYPE_DS3 ||
377: sc->lmc_cardtype == LMC_CARDTYPE_T1)
378: lmc_led_on (sc, LMC_DS3_LED3 | LMC_DS3_LED2);
379: /* turn on red LED */
380: else {
381: lmc_led_off (sc, LMC_MII16_LED1);
382: lmc_led_on (sc, LMC_MII16_LED0);
383: if (sc->lmc_timing == LMC_CTL_CLOCK_SOURCE_EXT)
384: lmc_led_on (sc, LMC_MII16_LED3);
385: }
386:
387: }
388:
389: /*
390: * hardware link is up, but the interface is marked as down.
391: * Bring it back up again.
392: */
393: if (link_status != LMC_LINK_DOWN && !ostatus) {
394: printf(LMC_PRINTF_FMT ": physical link up\n",
395: LMC_PRINTF_ARGS);
396: if (sc->lmc_flags & LMC_IFUP)
397: lmc_ifup(sc);
398: sc->lmc_flags |= LMC_MODEMOK;
399: if (sc->lmc_cardtype == LMC_CARDTYPE_DS3 ||
400: sc->lmc_cardtype == LMC_CARDTYPE_T1)
401: {
402: sc->lmc_miireg16 |= LMC_DS3_LED3;
403: lmc_led_off (sc, LMC_DS3_LED3);
404: /* turn off red LED */
405: lmc_led_on (sc, LMC_DS3_LED2);
406: } else {
407: lmc_led_on (sc, LMC_MII16_LED0 | LMC_MII16_LED1
408: | LMC_MII16_LED2);
409: if (sc->lmc_timing != LMC_CTL_CLOCK_SOURCE_EXT)
410: lmc_led_off (sc, LMC_MII16_LED3);
411: }
412:
413: return;
414: }
415:
416: /* Call media specific watchdog functions */
417: sc->lmc_media->watchdog(sc);
418:
419: /*
420: * remember the timer value
421: */
422: ticks = LMC_CSR_READ(sc, csr_gp_timer);
423: LMC_CSR_WRITE(sc, csr_gp_timer, 0xffffffffUL);
424: sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff);
425:
426: ifp->if_timer = 1;
427: }
428:
429: /*
430: * Mark the interface as "up" and enable TX/RX and TX/RX interrupts.
431: * This also does a full software reset.
432: */
433: static void
434: lmc_ifup(lmc_softc_t * const sc)
435: {
436: sc->lmc_if.if_timer = 0;
437:
438: lmc_dec_reset(sc);
439: lmc_reset(sc);
440:
441: sc->lmc_media->set_link_status(sc, LMC_LINK_UP);
442: sc->lmc_media->set_status(sc, NULL);
443:
444: sc->lmc_flags |= LMC_IFUP;
445:
446: /*
447: * for DS3 & DS1 adapters light the green light, led2
448: */
449: if (sc->lmc_cardtype == LMC_CARDTYPE_DS3 ||
450: sc->lmc_cardtype == LMC_CARDTYPE_T1)
451: lmc_led_on (sc, LMC_MII16_LED2);
452: else
453: lmc_led_on (sc, LMC_MII16_LED0 | LMC_MII16_LED2);
454:
455: /*
456: * select what interrupts we want to get
457: */
458: sc->lmc_intrmask |= (TULIP_STS_NORMALINTR
459: | TULIP_STS_RXINTR
460: | TULIP_STS_RXNOBUF
461: | TULIP_STS_TXINTR
462: | TULIP_STS_ABNRMLINTR
463: | TULIP_STS_SYSERROR
464: | TULIP_STS_TXSTOPPED
465: | TULIP_STS_TXUNDERFLOW
466: | TULIP_STS_RXSTOPPED
467: );
468: LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask);
469:
470: sc->lmc_cmdmode |= TULIP_CMD_TXRUN;
471: sc->lmc_cmdmode |= TULIP_CMD_RXRUN;
472: LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
473:
474: sc->lmc_if.if_timer = 1;
475: }
476:
477: /*
478: * Mark the interface as "down" and disable TX/RX and TX/RX interrupts.
479: * This is done by performing a full reset on the interface.
480: */
481: static void
482: lmc_ifdown(lmc_softc_t * const sc)
483: {
484: sc->lmc_if.if_timer = 0;
485: sc->lmc_flags &= ~LMC_IFUP;
486:
487: sc->lmc_media->set_link_status(sc, LMC_LINK_DOWN);
488: lmc_led_off(sc, LMC_MII16_LED_ALL);
489:
490: lmc_dec_reset(sc);
491: lmc_reset(sc);
492: sc->lmc_media->set_status(sc, NULL);
493: }
494:
495: static void
496: lmc_rx_intr(lmc_softc_t * const sc)
497: {
498: lmc_ringinfo_t * const ri = &sc->lmc_rxinfo;
499: struct ifnet * const ifp = &sc->lmc_if;
500: u_int32_t status;
501: int fillok = 1;
502:
503: sc->lmc_rxtick++;
504:
505: for (;;) {
506: lmc_desc_t *eop = ri->ri_nextin;
507: int total_len = 0, last_offset = 0;
508: struct mbuf *ms = NULL, *me = NULL;
509: int accept = 0;
510: bus_dmamap_t map;
511: int error;
512:
513: if (fillok && sc->lmc_rxq.ifq_len < LMC_RXQ_TARGET)
514: goto queue_mbuf;
515:
516: /*
517: * If the TULIP has no descriptors, there can't be any receive
518: * descriptors to process.
519: */
520: if (eop == ri->ri_nextout)
521: break;
522:
523: /*
524: * 90% of the packets will fit in one descriptor. So we
525: * optimize for that case.
526: */
527: LMC_RXDESC_POSTSYNC(sc, eop, sizeof(*eop));
528: status = letoh32(((volatile lmc_desc_t *) eop)->d_status);
529: if ((status &
530: (TULIP_DSTS_OWNER|TULIP_DSTS_RxFIRSTDESC|TULIP_DSTS_RxLASTDESC)) ==
531: (TULIP_DSTS_RxFIRSTDESC|TULIP_DSTS_RxLASTDESC)) {
532: IF_DEQUEUE(&sc->lmc_rxq, ms);
533: me = ms;
534: } else {
535: /*
536: * If still owned by the TULIP, don't touch it.
537: */
538: if (status & TULIP_DSTS_OWNER)
539: break;
540:
541: /*
542: * It is possible (though improbable unless the
543: * BIG_PACKET support is enabled or MCLBYTES < 1518)
544: * for a received packet to cross more than one
545: * receive descriptor.
546: */
547: while ((status & TULIP_DSTS_RxLASTDESC) == 0) {
548: if (++eop == ri->ri_last)
549: eop = ri->ri_first;
550: LMC_RXDESC_POSTSYNC(sc, eop, sizeof(*eop));
551: status = letoh32(((volatile lmc_desc_t *)
552: eop)->d_status);
553: if (eop == ri->ri_nextout ||
554: (status & TULIP_DSTS_OWNER)) {
555: return;
556: }
557: total_len++;
558: }
559:
560: /*
561: * Dequeue the first buffer for the start of the
562: * packet. Hopefully this will be the only one we
563: * need to dequeue. However, if the packet consumed
564: * multiple descriptors, then we need to dequeue
565: * those buffers and chain to the starting mbuf.
566: * All buffers but the last buffer have the same
567: * length so we can set that now. (we add to
568: * last_offset instead of multiplying since we
569: * normally won't go into the loop and thereby
570: * saving a ourselves from doing a multiplication
571: * by 0 in the normal case).
572: */
573: IF_DEQUEUE(&sc->lmc_rxq, ms);
574: for (me = ms; total_len > 0; total_len--) {
575: map = LMC_GETCTX(me, bus_dmamap_t);
576: LMC_RXMAP_POSTSYNC(sc, map);
577: bus_dmamap_unload(sc->lmc_dmatag, map);
578: sc->lmc_rxmaps[sc->lmc_rxmaps_free++] = map;
579: #if defined(DIAGNOSTIC)
580: LMC_SETCTX(me, NULL);
581: #endif
582: me->m_len = LMC_RX_BUFLEN;
583: last_offset += LMC_RX_BUFLEN;
584: IF_DEQUEUE(&sc->lmc_rxq, me->m_next);
585: me = me->m_next;
586: }
587: }
588:
589: /*
590: * Now get the size of received packet (minus the CRC).
591: */
592: total_len = ((status >> 16) & 0x7FFF);
593: if (sc->ictl.crc_length == 16)
594: total_len -= 2;
595: else
596: total_len -= 4;
597:
598: if ((sc->lmc_flags & LMC_RXIGNORE) == 0
599: && ((status & LMC_DSTS_ERRSUM) == 0
600: #ifdef BIG_PACKET
601: || (total_len <= sc->lmc_if.if_mtu + PPP_HEADER_LEN
602: && (status & TULIP_DSTS_RxOVERFLOW) == 0)
603: #endif
604: )) {
605:
606: map = LMC_GETCTX(me, bus_dmamap_t);
607: bus_dmamap_sync(sc->lmc_dmatag, map, 0, me->m_len,
608: BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
609: bus_dmamap_unload(sc->lmc_dmatag, map);
610: sc->lmc_rxmaps[sc->lmc_rxmaps_free++] = map;
611: #if defined(DIAGNOSTIC)
612: LMC_SETCTX(me, NULL);
613: #endif
614:
615: me->m_len = total_len - last_offset;
616: #if NBPFILTER > 0
617: if (sc->lmc_bpf != NULL) {
618: if (me == ms)
619: LMC_BPF_TAP(sc, mtod(ms, caddr_t),
620: total_len, BPF_DIRECTION_IN);
621: else
622: LMC_BPF_MTAP(sc, ms, BPF_DIRECTION_IN);
623: }
624: #endif
625: sc->lmc_flags |= LMC_RXACT;
626: accept = 1;
627: } else {
628: ifp->if_ierrors++;
629: if (status & TULIP_DSTS_RxOVERFLOW) {
630: sc->lmc_dot3stats.dot3StatsInternalMacReceiveErrors++;
631: }
632: map = LMC_GETCTX(me, bus_dmamap_t);
633: bus_dmamap_unload(sc->lmc_dmatag, map);
634: sc->lmc_rxmaps[sc->lmc_rxmaps_free++] = map;
635: #if defined(DIAGNOSTIC)
636: LMC_SETCTX(me, NULL);
637: #endif
638: }
639:
640: ifp->if_ipackets++;
641: if (++eop == ri->ri_last)
642: eop = ri->ri_first;
643: ri->ri_nextin = eop;
644:
645: queue_mbuf:
646: /*
647: * Either we are priming the TULIP with mbufs (m == NULL)
648: * or we are about to accept an mbuf for the upper layers
649: * so we need to allocate an mbuf to replace it. If we
650: * can't replace it, send up it anyways. This may cause
651: * us to drop packets in the future but that's better than
652: * being caught in livelock.
653: *
654: * Note that if this packet crossed multiple descriptors
655: * we don't even try to reallocate all the mbufs here.
656: * Instead we rely on the test of the beginning of
657: * the loop to refill for the extra consumed mbufs.
658: */
659: if (accept || ms == NULL) {
660: struct mbuf *m0;
661: MGETHDR(m0, M_DONTWAIT, MT_DATA);
662: if (m0 != NULL) {
663: MCLGET(m0, M_DONTWAIT);
664: if ((m0->m_flags & M_EXT) == 0) {
665: m_freem(m0);
666: m0 = NULL;
667: }
668: }
669: if (accept) {
670: ms->m_pkthdr.len = total_len;
671: ms->m_pkthdr.rcvif = ifp;
672: sppp_input(ifp, ms);
673: }
674: ms = m0;
675: }
676: if (ms == NULL) {
677: /*
678: * Couldn't allocate a new buffer. Don't bother
679: * trying to replenish the receive queue.
680: */
681: fillok = 0;
682: sc->lmc_flags |= LMC_RXBUFSLOW;
683: continue;
684: }
685: /*
686: * Now give the buffer(s) to the TULIP and save in our
687: * receive queue.
688: */
689: do {
690: u_int32_t ctl;
691: lmc_desc_t * const nextout = ri->ri_nextout;
692:
693: if (sc->lmc_rxmaps_free > 0) {
694: map = sc->lmc_rxmaps[--sc->lmc_rxmaps_free];
695: } else {
696: m_freem(ms);
697: sc->lmc_flags |= LMC_RXBUFSLOW;
698: #if defined(LMC_DEBUG)
699: sc->lmc_dbg.dbg_rxlowbufs++;
700: #endif
701: break;
702: }
703: LMC_SETCTX(ms, map);
704: error = bus_dmamap_load(sc->lmc_dmatag, map,
705: mtod(ms, void *), LMC_RX_BUFLEN,
706: NULL, BUS_DMA_NOWAIT);
707: if (error) {
708: printf(LMC_PRINTF_FMT
709: ": unable to load rx map, "
710: "error = %d\n",
711: LMC_PRINTF_ARGS, error);
712: panic("lmc_rx_intr"); /* XXX */
713: }
714:
715: ctl = letoh32(nextout->d_ctl);
716: /* For some weird reason we lose TULIP_DFLAG_ENDRING */
717: if ((nextout+1) == ri->ri_last)
718: ctl = LMC_CTL(LMC_CTL_FLGS(ctl)|
719: TULIP_DFLAG_ENDRING, 0, 0);
720: nextout->d_addr1 = htole32(map->dm_segs[0].ds_addr);
721: if (map->dm_nsegs == 2) {
722: nextout->d_addr2 = htole32(map->dm_segs[1].ds_addr);
723: nextout->d_ctl =
724: htole32(LMC_CTL(LMC_CTL_FLGS(ctl),
725: map->dm_segs[0].ds_len,
726: map->dm_segs[1].ds_len));
727: } else {
728: nextout->d_addr2 = 0;
729: nextout->d_ctl =
730: htole32(LMC_CTL(LMC_CTL_FLGS(ctl),
731: map->dm_segs[0].ds_len, 0));
732: }
733: LMC_RXDESC_POSTSYNC(sc, nextout, sizeof(*nextout));
734: ri->ri_nextout->d_status = htole32(TULIP_DSTS_OWNER);
735: LMC_RXDESC_POSTSYNC(sc, nextout, sizeof(u_int32_t));
736: if (++ri->ri_nextout == ri->ri_last)
737: ri->ri_nextout = ri->ri_first;
738: me = ms->m_next;
739: ms->m_next = NULL;
740: IF_ENQUEUE(&sc->lmc_rxq, ms);
741: } while ((ms = me) != NULL);
742:
743: if (sc->lmc_rxq.ifq_len >= LMC_RXQ_TARGET)
744: sc->lmc_flags &= ~LMC_RXBUFSLOW;
745: }
746: }
747:
748: static int
749: lmc_tx_intr(lmc_softc_t * const sc)
750: {
751: lmc_ringinfo_t * const ri = &sc->lmc_txinfo;
752: struct mbuf *m;
753: int xmits = 0;
754: int descs = 0;
755: u_int32_t d_status;
756:
757: sc->lmc_txtick++;
758:
759: while (ri->ri_free < ri->ri_max) {
760: u_int32_t flag;
761:
762: LMC_TXDESC_POSTSYNC(sc, ri->ri_nextin, sizeof(*ri->ri_nextin));
763: d_status = letoh32(((volatile lmc_desc_t *) ri->ri_nextin)->d_status);
764: if (d_status & TULIP_DSTS_OWNER)
765: break;
766:
767: flag = LMC_CTL_FLGS(letoh32(ri->ri_nextin->d_ctl));
768: if (flag & TULIP_DFLAG_TxLASTSEG) {
769: IF_DEQUEUE(&sc->lmc_txq, m);
770: if (m != NULL) {
771: bus_dmamap_t map = LMC_GETCTX(m, bus_dmamap_t);
772: LMC_TXMAP_POSTSYNC(sc, map);
773: sc->lmc_txmaps[sc->lmc_txmaps_free++] = map;
774: #if NBPFILTER > 0
775: if (sc->lmc_bpf != NULL)
776: LMC_BPF_MTAP(sc, m, BPF_DIRECTION_OUT);
777: #endif
778: m_freem(m);
779: #if defined(LMC_DEBUG)
780: } else {
781: printf(LMC_PRINTF_FMT ": tx_intr: failed to dequeue mbuf?!?\n", LMC_PRINTF_ARGS);
782: #endif
783: }
784: xmits++;
785: if (d_status & LMC_DSTS_ERRSUM) {
786: sc->lmc_if.if_oerrors++;
787: if (d_status & TULIP_DSTS_TxUNDERFLOW) {
788: sc->lmc_dot3stats.dot3StatsInternalTransmitUnderflows++;
789: }
790: } else {
791: if (d_status & TULIP_DSTS_TxDEFERRED) {
792: sc->lmc_dot3stats.dot3StatsDeferredTransmissions++;
793: }
794: }
795: }
796:
797: if (++ri->ri_nextin == ri->ri_last)
798: ri->ri_nextin = ri->ri_first;
799:
800: ri->ri_free++;
801: descs++;
802: sc->lmc_if.if_flags &= ~IFF_OACTIVE;
803: }
804: /*
805: * If nothing left to transmit, disable the timer.
806: * Else if progress, reset the timer back to 2 ticks.
807: */
808: sc->lmc_if.if_opackets += xmits;
809:
810: return descs;
811: }
812:
813: static void
814: lmc_print_abnormal_interrupt (lmc_softc_t * const sc, u_int32_t csr)
815: {
816: printf(LMC_PRINTF_FMT ": Abnormal interrupt\n", LMC_PRINTF_ARGS);
817: }
818:
819: static const char * const lmc_system_errors[] = {
820: "parity error",
821: "master abort",
822: "target abort",
823: "reserved #3",
824: "reserved #4",
825: "reserved #5",
826: "reserved #6",
827: "reserved #7",
828: };
829:
830: static void
831: lmc_intr_handler(lmc_softc_t * const sc, int *progress_p)
832: {
833: u_int32_t csr;
834:
835: while ((csr = LMC_CSR_READ(sc, csr_status)) & sc->lmc_intrmask) {
836:
837: *progress_p = 1;
838: LMC_CSR_WRITE(sc, csr_status, csr);
839:
840: if (csr & TULIP_STS_SYSERROR) {
841: sc->lmc_last_system_error = (csr & TULIP_STS_ERRORMASK) >> TULIP_STS_ERR_SHIFT;
842: if (sc->lmc_flags & LMC_NOMESSAGES) {
843: sc->lmc_flags |= LMC_SYSTEMERROR;
844: } else {
845: printf(LMC_PRINTF_FMT ": system error: %s\n",
846: LMC_PRINTF_ARGS,
847: lmc_system_errors[sc->lmc_last_system_error]);
848: }
849: sc->lmc_flags |= LMC_NEEDRESET;
850: sc->lmc_system_errors++;
851: break;
852: }
853: if (csr & (TULIP_STS_RXINTR | TULIP_STS_RXNOBUF)) {
854: u_int32_t misses = LMC_CSR_READ(sc, csr_missed_frames);
855: if (csr & TULIP_STS_RXNOBUF)
856: sc->lmc_dot3stats.dot3StatsMissedFrames += misses & 0xFFFF;
857: /*
858: * Pass 2.[012] of the 21140A-A[CDE] may hang and/or corrupt data
859: * on receive overflows.
860: */
861: if ((misses & 0x0FFE0000) && (sc->lmc_features & LMC_HAVE_RXBADOVRFLW)) {
862: sc->lmc_dot3stats.dot3StatsInternalMacReceiveErrors++;
863: /*
864: * Stop the receiver process and spin until it's stopped.
865: * Tell rx_intr to drop the packets it dequeues.
866: */
867: LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode & ~TULIP_CMD_RXRUN);
868: while ((LMC_CSR_READ(sc, csr_status) & TULIP_STS_RXSTOPPED) == 0)
869: ;
870: LMC_CSR_WRITE(sc, csr_status, TULIP_STS_RXSTOPPED);
871: sc->lmc_flags |= LMC_RXIGNORE;
872: }
873: lmc_rx_intr(sc);
874: if (sc->lmc_flags & LMC_RXIGNORE) {
875: /*
876: * Restart the receiver.
877: */
878: sc->lmc_flags &= ~LMC_RXIGNORE;
879: LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
880: }
881: }
882: if (csr & TULIP_STS_ABNRMLINTR) {
883: u_int32_t tmp = csr & sc->lmc_intrmask
884: & ~(TULIP_STS_NORMALINTR|TULIP_STS_ABNRMLINTR);
885: if (csr & TULIP_STS_TXUNDERFLOW) {
886: if ((sc->lmc_cmdmode & TULIP_CMD_THRESHOLDCTL) != TULIP_CMD_THRSHLD160) {
887: sc->lmc_cmdmode += TULIP_CMD_THRSHLD96;
888: sc->lmc_flags |= LMC_NEWTXTHRESH;
889: } else if (sc->lmc_features & LMC_HAVE_STOREFWD) {
890: sc->lmc_cmdmode |= TULIP_CMD_STOREFWD;
891: sc->lmc_flags |= LMC_NEWTXTHRESH;
892: }
893: }
894: if (sc->lmc_flags & LMC_NOMESSAGES) {
895: sc->lmc_statusbits |= tmp;
896: } else {
897: lmc_print_abnormal_interrupt(sc, tmp);
898: sc->lmc_flags |= LMC_NOMESSAGES;
899: }
900: LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
901: }
902:
903: if (csr & TULIP_STS_TXINTR)
904: lmc_tx_intr(sc);
905:
906: if (sc->lmc_flags & LMC_WANTTXSTART)
907: lmc_ifstart(&sc->lmc_if);
908: }
909: }
910:
911: lmc_intrfunc_t
912: lmc_intr_normal(void *arg)
913: {
914: lmc_softc_t * sc = (lmc_softc_t *) arg;
915: int progress = 0;
916:
917: lmc_intr_handler(sc, &progress);
918:
919: #if !defined(LMC_VOID_INTRFUNC)
920: return progress;
921: #endif
922: }
923:
924: static struct mbuf *
925: lmc_mbuf_compress(struct mbuf *m)
926: {
927: struct mbuf *m0;
928: #if MCLBYTES >= LMC_MTU + PPP_HEADER_LEN && !defined(BIG_PACKET)
929: MGETHDR(m0, M_DONTWAIT, MT_DATA);
930: if (m0 != NULL) {
931: if (m->m_pkthdr.len > MHLEN) {
932: MCLGET(m0, M_DONTWAIT);
933: if ((m0->m_flags & M_EXT) == 0) {
934: m_freem(m);
935: m_freem(m0);
936: return NULL;
937: }
938: }
939: m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t));
940: m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len;
941: }
942: #else
943: int mlen = MHLEN;
944: int len = m->m_pkthdr.len;
945: struct mbuf **mp = &m0;
946:
947: while (len > 0) {
948: if (mlen == MHLEN) {
949: MGETHDR(*mp, M_DONTWAIT, MT_DATA);
950: } else {
951: MGET(*mp, M_DONTWAIT, MT_DATA);
952: }
953: if (*mp == NULL) {
954: m_freem(m0);
955: m0 = NULL;
956: break;
957: }
958: if (len > MLEN) {
959: MCLGET(*mp, M_DONTWAIT);
960: if (((*mp)->m_flags & M_EXT) == 0) {
961: m_freem(m0);
962: m0 = NULL;
963: break;
964: }
965: (*mp)->m_len = (len <= MCLBYTES ? len : MCLBYTES);
966: } else {
967: (*mp)->m_len = (len <= mlen ? len : mlen);
968: }
969: m_copydata(m, m->m_pkthdr.len - len,
970: (*mp)->m_len, mtod((*mp), caddr_t));
971: len -= (*mp)->m_len;
972: mp = &(*mp)->m_next;
973: mlen = MLEN;
974: }
975: #endif
976: m_freem(m);
977: return m0;
978: }
979:
980: /*
981: * queue the mbuf handed to us for the interface. If we cannot
982: * queue it, return the mbuf. Return NULL if the mbuf was queued.
983: */
984: static struct mbuf *
985: lmc_txput(lmc_softc_t * const sc, struct mbuf *m)
986: {
987: lmc_ringinfo_t * const ri = &sc->lmc_txinfo;
988: lmc_desc_t *eop, *nextout;
989: int segcnt, free;
990: u_int32_t d_status, ctl;
991: bus_dmamap_t map;
992: int error;
993:
994: #if defined(LMC_DEBUG)
995: if ((sc->lmc_cmdmode & TULIP_CMD_TXRUN) == 0) {
996: printf(LMC_PRINTF_FMT ": txput: tx not running\n",
997: LMC_PRINTF_ARGS);
998: sc->lmc_flags |= LMC_WANTTXSTART;
999: goto finish;
1000: }
1001: #endif
1002:
1003: /*
1004: * Now we try to fill in our transmit descriptors. This is
1005: * a bit reminiscent of going on the Ark two by two
1006: * since each descriptor for the TULIP can describe
1007: * two buffers. So we advance through packet filling
1008: * each of the two entries at a time to fill each
1009: * descriptor. Clear the first and last segment bits
1010: * in each descriptor (actually just clear everything
1011: * but the end-of-ring or chain bits) to make sure
1012: * we don't get messed up by previously sent packets.
1013: *
1014: * We may fail to put the entire packet on the ring if
1015: * there is either not enough ring entries free or if the
1016: * packet has more than MAX_TXSEG segments. In the former
1017: * case we will just wait for the ring to empty. In the
1018: * latter case we have to recopy.
1019: */
1020: d_status = 0;
1021: eop = nextout = ri->ri_nextout;
1022: segcnt = 0;
1023: free = ri->ri_free;
1024: /*
1025: * Reclaim some DMA maps from if we are out.
1026: */
1027: if (sc->lmc_txmaps_free == 0) {
1028: #if defined(LMC_DEBUG)
1029: sc->lmc_dbg.dbg_no_txmaps++;
1030: #endif
1031: free += lmc_tx_intr(sc);
1032: }
1033: if (sc->lmc_txmaps_free > 0) {
1034: map = sc->lmc_txmaps[sc->lmc_txmaps_free-1];
1035: } else {
1036: sc->lmc_flags |= LMC_WANTTXSTART;
1037: #if defined(LMC_DEBUG)
1038: sc->lmc_dbg.dbg_txput_finishes[1]++;
1039: #endif
1040: goto finish;
1041: }
1042: error = bus_dmamap_load_mbuf(sc->lmc_dmatag, map, m, BUS_DMA_NOWAIT);
1043: if (error != 0) {
1044: if (error == EFBIG) {
1045: /*
1046: * The packet exceeds the number of transmit buffer
1047: * entries that we can use for one packet, so we have
1048: * to recopy it into one mbuf and then try again.
1049: */
1050: m = lmc_mbuf_compress(m);
1051: if (m == NULL) {
1052: #if defined(LMC_DEBUG)
1053: sc->lmc_dbg.dbg_txput_finishes[2]++;
1054: #endif
1055: goto finish;
1056: }
1057: error = bus_dmamap_load_mbuf(sc->lmc_dmatag, map, m,
1058: BUS_DMA_NOWAIT);
1059: }
1060: if (error != 0) {
1061: printf(LMC_PRINTF_FMT ": unable to load tx map, "
1062: "error = %d\n", LMC_PRINTF_ARGS, error);
1063: #if defined(LMC_DEBUG)
1064: sc->lmc_dbg.dbg_txput_finishes[3]++;
1065: #endif
1066: goto finish;
1067: }
1068: }
1069: if ((free -= (map->dm_nsegs + 1) / 2) <= 0
1070: /*
1071: * See if there's any unclaimed space in the transmit ring.
1072: */
1073: && (free += lmc_tx_intr(sc)) <= 0) {
1074: /*
1075: * There's no more room but since nothing
1076: * has been committed at this point, just
1077: * show output is active, put back the
1078: * mbuf and return.
1079: */
1080: sc->lmc_flags |= LMC_WANTTXSTART;
1081: #if defined(LMC_DEBUG)
1082: sc->lmc_dbg.dbg_txput_finishes[4]++;
1083: #endif
1084: bus_dmamap_unload(sc->lmc_dmatag, map);
1085: goto finish;
1086: }
1087: for (; map->dm_nsegs - segcnt > 1; segcnt += 2) {
1088: int flg;
1089:
1090: eop = nextout;
1091: flg = LMC_CTL_FLGS(letoh32(eop->d_ctl));
1092: flg &= TULIP_DFLAG_ENDRING;
1093: flg |= TULIP_DFLAG_TxNOPADDING;
1094: if (sc->ictl.crc_length == 16)
1095: flg |= TULIP_DFLAG_TxHASCRC;
1096: eop->d_status = htole32(d_status);
1097: eop->d_addr1 = htole32(map->dm_segs[segcnt].ds_addr);
1098: eop->d_addr2 = htole32(map->dm_segs[segcnt+1].ds_addr);
1099: eop->d_ctl = htole32(LMC_CTL(flg,
1100: map->dm_segs[segcnt].ds_len,
1101: map->dm_segs[segcnt+1].ds_len));
1102: d_status = TULIP_DSTS_OWNER;
1103: if (++nextout == ri->ri_last)
1104: nextout = ri->ri_first;
1105: }
1106: if (segcnt < map->dm_nsegs) {
1107: int flg;
1108:
1109: eop = nextout;
1110: flg = LMC_CTL_FLGS(letoh32(eop->d_ctl));
1111: flg &= TULIP_DFLAG_ENDRING;
1112: flg |= TULIP_DFLAG_TxNOPADDING;
1113: if (sc->ictl.crc_length == 16)
1114: flg |= TULIP_DFLAG_TxHASCRC;
1115: eop->d_status = htole32(d_status);
1116: eop->d_addr1 = htole32(map->dm_segs[segcnt].ds_addr);
1117: eop->d_addr2 = 0;
1118: eop->d_ctl = htole32(LMC_CTL(flg,
1119: map->dm_segs[segcnt].ds_len, 0));
1120: if (++nextout == ri->ri_last)
1121: nextout = ri->ri_first;
1122: }
1123: LMC_TXMAP_PRESYNC(sc, map);
1124: LMC_SETCTX(m, map);
1125: map = NULL;
1126: --sc->lmc_txmaps_free; /* commit to using the dmamap */
1127:
1128: /*
1129: * The descriptors have been filled in. Now get ready
1130: * to transmit.
1131: */
1132: IF_ENQUEUE(&sc->lmc_txq, m);
1133: m = NULL;
1134:
1135: /*
1136: * Make sure the next descriptor after this packet is owned
1137: * by us since it may have been set up above if we ran out
1138: * of room in the ring.
1139: */
1140: nextout->d_status = 0;
1141: LMC_TXDESC_PRESYNC(sc, nextout, sizeof(u_int32_t));
1142:
1143: /*
1144: * Mark the last and first segments, indicate we want a transmit
1145: * complete interrupt, and tell it to transmit!
1146: */
1147: ctl = letoh32(eop->d_ctl);
1148: eop->d_ctl = htole32(LMC_CTL(
1149: LMC_CTL_FLGS(ctl)|TULIP_DFLAG_TxLASTSEG|TULIP_DFLAG_TxWANTINTR,
1150: LMC_CTL_LEN1(ctl),
1151: LMC_CTL_LEN2(ctl)));
1152:
1153: /*
1154: * Note that ri->ri_nextout is still the start of the packet
1155: * and until we set the OWNER bit, we can still back out of
1156: * everything we have done.
1157: */
1158: ctl = letoh32(ri->ri_nextout->d_ctl);
1159: ri->ri_nextout->d_ctl = htole32(LMC_CTL(
1160: LMC_CTL_FLGS(ctl)|TULIP_DFLAG_TxFIRSTSEG,
1161: LMC_CTL_LEN1(ctl),
1162: LMC_CTL_LEN2(ctl)));
1163: if (eop < ri->ri_nextout) {
1164: LMC_TXDESC_PRESYNC(sc, ri->ri_nextout,
1165: (caddr_t) ri->ri_last - (caddr_t) ri->ri_nextout);
1166: LMC_TXDESC_PRESYNC(sc, ri->ri_first,
1167: (caddr_t) (eop + 1) - (caddr_t) ri->ri_first);
1168: } else {
1169: LMC_TXDESC_PRESYNC(sc, ri->ri_nextout,
1170: (caddr_t) (eop + 1) - (caddr_t) ri->ri_nextout);
1171: }
1172: ri->ri_nextout->d_status = htole32(TULIP_DSTS_OWNER);
1173: LMC_TXDESC_PRESYNC(sc, ri->ri_nextout, sizeof(u_int32_t));
1174:
1175: LMC_CSR_WRITE(sc, csr_txpoll, 1);
1176:
1177: /*
1178: * This advances the ring for us.
1179: */
1180: ri->ri_nextout = nextout;
1181: ri->ri_free = free;
1182:
1183: /*
1184: * switch back to the single queueing ifstart.
1185: */
1186: sc->lmc_flags &= ~LMC_WANTTXSTART;
1187: sc->lmc_if.if_start = lmc_ifstart_one;
1188:
1189: /*
1190: * If we want a txstart, there must be not enough space in the
1191: * transmit ring. So we want to enable transmit done interrupts
1192: * so we can immediately reclaim some space. When the transmit
1193: * interrupt is posted, the interrupt handler will call tx_intr
1194: * to reclaim space and then txstart (since WANTTXSTART is set).
1195: * txstart will move the packet into the transmit ring and clear
1196: * WANTTXSTART thereby causing TXINTR to be cleared.
1197: */
1198: finish:
1199: if (sc->lmc_flags & LMC_WANTTXSTART) {
1200: sc->lmc_if.if_flags |= IFF_OACTIVE;
1201: sc->lmc_if.if_start = lmc_ifstart;
1202: }
1203:
1204: return m;
1205: }
1206:
1207:
1208: /*
1209: * This routine is entered at splnet()
1210: */
1211: static int
1212: lmc_ifioctl(struct ifnet * ifp, ioctl_cmd_t cmd, caddr_t data)
1213: {
1214: lmc_softc_t * const sc = LMC_IFP_TO_SOFTC(ifp);
1215: int s;
1216: struct proc *p = curproc;
1217: int error = 0;
1218: struct ifreq *ifr = (struct ifreq *)data;
1219: u_int32_t new_state;
1220: u_int32_t old_state;
1221: lmc_ctl_t ctl;
1222:
1223: s = LMC_RAISESPL();
1224:
1225: switch (cmd) {
1226: case LMCIOCGINFO:
1227: error = copyout(&sc->ictl, ifr->ifr_data, sizeof(lmc_ctl_t));
1228:
1229: goto out;
1230: break;
1231:
1232: case LMCIOCSINFO:
1233: error = suser(p, 0);
1234: if (error)
1235: goto out;
1236:
1237: error = copyin(ifr->ifr_data, &ctl, sizeof(lmc_ctl_t));
1238: if (error != 0)
1239: goto out;
1240:
1241: sc->lmc_media->set_status(sc, &ctl);
1242:
1243: goto out;
1244: break;
1245:
1246: case SIOCSIFMTU:
1247: /*
1248: * Don't allow the MTU to get larger than we can handle
1249: */
1250: if (ifr->ifr_mtu > LMC_MTU) {
1251: error = EINVAL;
1252: goto out;
1253: } else {
1254: ifp->if_mtu = ifr->ifr_mtu;
1255: }
1256: break;
1257: }
1258:
1259: /*
1260: * call the sppp ioctl layer
1261: */
1262: error = sppp_ioctl(ifp, cmd, data);
1263: if (error != 0)
1264: goto out;
1265:
1266: /*
1267: * If we are transitioning from up to down or down to up, call
1268: * our init routine.
1269: */
1270: new_state = ifp->if_flags & IFF_UP;
1271: old_state = sc->lmc_flags & LMC_IFUP;
1272:
1273: if (new_state && !old_state)
1274: lmc_ifup(sc);
1275: else if (!new_state && old_state)
1276: lmc_ifdown(sc);
1277:
1278: out:
1279: LMC_RESTORESPL(s);
1280:
1281: return error;
1282: }
1283:
1284: /*
1285: * These routines gets called at device spl (from sppp_output).
1286: */
1287:
1288: static ifnet_ret_t
1289: lmc_ifstart(struct ifnet * const ifp)
1290: {
1291: lmc_softc_t * const sc = LMC_IFP_TO_SOFTC(ifp);
1292: struct mbuf *m, *m0;
1293:
1294: if (sc->lmc_flags & LMC_IFUP) {
1295: while (sppp_isempty(ifp) == 0) {
1296: m = sppp_pick(ifp);
1297: if (m == NULL)
1298: break;
1299: if ((m = lmc_txput(sc, m)) != NULL)
1300: break;
1301: m0 = sppp_dequeue(ifp);
1302: #if defined(LMC_DEBUG)
1303: if (m0 != m)
1304: printf("lmc_ifstart: mbuf mismatch!\n");
1305: #endif
1306: }
1307: LMC_CSR_WRITE(sc, csr_txpoll, 1);
1308: }
1309: }
1310:
1311: static ifnet_ret_t
1312: lmc_ifstart_one(struct ifnet * const ifp)
1313: {
1314: lmc_softc_t * const sc = LMC_IFP_TO_SOFTC(ifp);
1315: struct mbuf *m, *m0;
1316:
1317: if ((sc->lmc_flags & LMC_IFUP) && (sppp_isempty(ifp) == 0)) {
1318: m = sppp_pick(ifp);
1319: if ((m = lmc_txput(sc, m)) != NULL)
1320: return;
1321: m0 = sppp_dequeue(ifp);
1322: #if defined(LMC_DEBUG)
1323: if (m0 != m)
1324: printf("lmc_ifstart: mbuf mismatch!\n");
1325: #endif
1326: LMC_CSR_WRITE(sc, csr_txpoll, 1);
1327: }
1328: }
1329:
1330: /*
1331: * Set up the OS interface magic and attach to the operating system
1332: * network services.
1333: */
1334: void
1335: lmc_attach(lmc_softc_t * const sc)
1336: {
1337: struct ifnet * const ifp = &sc->lmc_if;
1338:
1339: ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
1340: ifp->if_ioctl = lmc_ifioctl;
1341: ifp->if_start = lmc_ifstart;
1342: ifp->if_watchdog = lmc_watchdog;
1343: ifp->if_timer = 1;
1344: ifp->if_mtu = LMC_MTU;
1345: IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
1346: IFQ_SET_READY(&ifp->if_snd);
1347:
1348: if_attach(ifp);
1349: if_alloc_sadl(ifp);
1350:
1351: sppp_attach((struct ifnet *)&sc->lmc_sppp);
1352: sc->lmc_sppp.pp_flags = PP_CISCO | PP_KEEPALIVE;
1353: sc->lmc_sppp.pp_framebytes = 3;
1354:
1355: #if NBPFILTER > 0
1356: LMC_BPF_ATTACH(sc);
1357: #endif
1358:
1359: /*
1360: * turn off those LEDs...
1361: */
1362: sc->lmc_miireg16 |= LMC_MII16_LED_ALL;
1363: /*
1364: * for DS3 & DS1 adapters light the green light, led2
1365: */
1366: if (sc->lmc_cardtype == LMC_CARDTYPE_DS3 ||
1367: sc->lmc_cardtype == LMC_CARDTYPE_T1)
1368: lmc_led_on (sc, LMC_MII16_LED2);
1369: else
1370: lmc_led_on (sc, LMC_MII16_LED0 | LMC_MII16_LED2);
1371: }
1372:
1373: void
1374: lmc_initring(lmc_softc_t * const sc, lmc_ringinfo_t * const ri,
1375: lmc_desc_t *descs, int ndescs)
1376: {
1377: ri->ri_max = ndescs;
1378: ri->ri_first = descs;
1379: ri->ri_last = ri->ri_first + ri->ri_max;
1380: bzero((caddr_t) ri->ri_first, sizeof(ri->ri_first[0]) * ri->ri_max);
1381: ri->ri_last[-1].d_ctl = htole32(LMC_CTL(TULIP_DFLAG_ENDRING, 0, 0));
1382: }
CVSweb