Annotation of sys/dev/pci/hifn7751.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: hifn7751.c,v 1.152 2006/06/29 21:34:51 deraadt Exp $ */
2:
3: /*
4: * Invertex AEON / Hifn 7751 driver
5: * Copyright (c) 1999 Invertex Inc. All rights reserved.
6: * Copyright (c) 1999 Theo de Raadt
7: * Copyright (c) 2000-2001 Network Security Technologies, Inc.
8: * http://www.netsec.net
9: * Copyright (c) 2003 Hifn Inc.
10:
11: * This driver is based on a previous driver by Invertex, for which they
12: * requested: Please send any comments, feedback, bug-fixes, or feature
13: * requests to software@invertex.com.
14: *
15: * Redistribution and use in source and binary forms, with or without
16: * modification, are permitted provided that the following conditions
17: * are met:
18: *
19: * 1. Redistributions of source code must retain the above copyright
20: * notice, this list of conditions and the following disclaimer.
21: * 2. Redistributions in binary form must reproduce the above copyright
22: * notice, this list of conditions and the following disclaimer in the
23: * documentation and/or other materials provided with the distribution.
24: * 3. The name of the author may not be used to endorse or promote products
25: * derived from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37: *
38: * Effort sponsored in part by the Defense Advanced Research Projects
39: * Agency (DARPA) and Air Force Research Laboratory, Air Force
40: * Materiel Command, USAF, under agreement number F30602-01-2-0537.
41: *
42: */
43:
44: /*
45: * Driver for various Hifn encryption processors.
46: */
47:
48: #include <sys/param.h>
49: #include <sys/systm.h>
50: #include <sys/proc.h>
51: #include <sys/errno.h>
52: #include <sys/malloc.h>
53: #include <sys/kernel.h>
54: #include <sys/mbuf.h>
55: #include <sys/device.h>
56:
57: #include <crypto/cryptodev.h>
58: #include <dev/rndvar.h>
59:
60: #include <dev/pci/pcireg.h>
61: #include <dev/pci/pcivar.h>
62: #include <dev/pci/pcidevs.h>
63:
64: #include <dev/pci/hifn7751reg.h>
65: #include <dev/pci/hifn7751var.h>
66:
67: #undef HIFN_DEBUG
68:
69: /*
70: * Prototypes and count for the pci_device structure
71: */
72: int hifn_probe(struct device *, void *, void *);
73: void hifn_attach(struct device *, struct device *, void *);
74:
75: struct cfattach hifn_ca = {
76: sizeof(struct hifn_softc), hifn_probe, hifn_attach,
77: };
78:
79: struct cfdriver hifn_cd = {
80: 0, "hifn", DV_DULL
81: };
82:
83: void hifn_reset_board(struct hifn_softc *, int);
84: void hifn_reset_puc(struct hifn_softc *);
85: void hifn_puc_wait(struct hifn_softc *);
86: int hifn_enable_crypto(struct hifn_softc *, pcireg_t);
87: void hifn_set_retry(struct hifn_softc *);
88: void hifn_init_dma(struct hifn_softc *);
89: void hifn_init_pci_registers(struct hifn_softc *);
90: int hifn_sramsize(struct hifn_softc *);
91: int hifn_dramsize(struct hifn_softc *);
92: int hifn_ramtype(struct hifn_softc *);
93: void hifn_sessions(struct hifn_softc *);
94: int hifn_intr(void *);
95: u_int hifn_write_command(struct hifn_command *, u_int8_t *);
96: u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
97: int hifn_newsession(u_int32_t *, struct cryptoini *);
98: int hifn_freesession(u_int64_t);
99: int hifn_process(struct cryptop *);
100: void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
101: int hifn_crypto(struct hifn_softc *, struct hifn_command *,
102: struct cryptop *);
103: int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
104: int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
105: int hifn_dmamap_aligned(bus_dmamap_t);
106: int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
107: int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
108: int hifn_init_pubrng(struct hifn_softc *);
109: void hifn_rng(void *);
110: void hifn_tick(void *);
111: void hifn_abort(struct hifn_softc *);
112: void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
113: void hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t);
114: u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t);
115: int hifn_compression(struct hifn_softc *, struct cryptop *,
116: struct hifn_command *);
117: struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
118: int hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
119: void hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
120: u_int8_t *);
121:
122: struct hifn_stats hifnstats;
123:
124: const struct pci_matchid hifn_devices[] = {
125: { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON },
126: { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751 },
127: { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811 },
128: { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951 },
129: { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955 },
130: { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956 },
131: { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751 },
132: };
133:
134: int
135: hifn_probe(struct device *parent, void *match, void *aux)
136: {
137: return (pci_matchbyid((struct pci_attach_args *)aux, hifn_devices,
138: sizeof(hifn_devices)/sizeof(hifn_devices[0])));
139: }
140:
141: void
142: hifn_attach(struct device *parent, struct device *self, void *aux)
143: {
144: struct hifn_softc *sc = (struct hifn_softc *)self;
145: struct pci_attach_args *pa = aux;
146: pci_chipset_tag_t pc = pa->pa_pc;
147: pci_intr_handle_t ih;
148: const char *intrstr = NULL;
149: char rbase;
150: bus_size_t iosize0, iosize1;
151: u_int16_t ena;
152: int rseg;
153: caddr_t kva;
154: int algs[CRYPTO_ALGORITHM_MAX + 1];
155:
156: sc->sc_pci_pc = pa->pa_pc;
157: sc->sc_pci_tag = pa->pa_tag;
158:
159: if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_HIFN &&
160: (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_HIFN_7951))
161: sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
162:
163: if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_HIFN &&
164: (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_HIFN_7955 ||
165: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_HIFN_7956))
166: sc->sc_flags = HIFN_IS_7956 | HIFN_HAS_AES | HIFN_HAS_RNG |
167: HIFN_HAS_PUBLIC;
168:
169: if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_HIFN &&
170: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_HIFN_7811)
171: sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS |
172: HIFN_NO_BURSTWRITE;
173:
174: if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0,
175: &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0, 0)) {
176: printf(": can't find mem space %d\n", 0);
177: return;
178: }
179:
180: if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0,
181: &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1, 0)) {
182: printf(": can't find mem space %d\n", 1);
183: goto fail_io0;
184: }
185:
186: hifn_set_retry(sc);
187:
188: if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
189: sc->sc_waw_lastgroup = -1;
190: sc->sc_waw_lastreg = 1;
191: }
192:
193: sc->sc_dmat = pa->pa_dmat;
194: if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1,
195: sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
196: printf(": can't create dma map\n");
197: goto fail_io1;
198: }
199: if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0,
200: sc->sc_dmasegs, 1, &sc->sc_dmansegs, BUS_DMA_NOWAIT)) {
201: printf(": can't alloc dma buffer\n");
202: bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
203: goto fail_io1;
204: }
205: if (bus_dmamem_map(sc->sc_dmat, sc->sc_dmasegs, sc->sc_dmansegs,
206: sizeof(*sc->sc_dma), &kva, BUS_DMA_NOWAIT)) {
207: printf(": can't map dma buffers (%lu bytes)\n",
208: (u_long)sizeof(*sc->sc_dma));
209: bus_dmamem_free(sc->sc_dmat, sc->sc_dmasegs, sc->sc_dmansegs);
210: bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
211: goto fail_io1;
212: }
213: if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva,
214: sizeof(*sc->sc_dma), NULL, BUS_DMA_NOWAIT)) {
215: printf(": can't load dma map\n");
216: bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
217: bus_dmamem_free(sc->sc_dmat, sc->sc_dmasegs, sc->sc_dmansegs);
218: bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
219: goto fail_io1;
220: }
221: sc->sc_dma = (struct hifn_dma *)kva;
222: bzero(sc->sc_dma, sizeof(*sc->sc_dma));
223:
224: hifn_reset_board(sc, 0);
225:
226: if (hifn_enable_crypto(sc, pa->pa_id) != 0) {
227: printf("%s: crypto enabling failed\n", sc->sc_dv.dv_xname);
228: goto fail_mem;
229: }
230: hifn_reset_puc(sc);
231:
232: hifn_init_dma(sc);
233: hifn_init_pci_registers(sc);
234:
235: if (sc->sc_flags & HIFN_IS_7956)
236: sc->sc_drammodel = 1;
237: else if (hifn_ramtype(sc))
238: goto fail_mem;
239:
240: if (sc->sc_drammodel == 0)
241: hifn_sramsize(sc);
242: else
243: hifn_dramsize(sc);
244:
245: /*
246: * Workaround for NetSec 7751 rev A: half ram size because two
247: * of the address lines were left floating
248: */
249: if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
250: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 &&
251: PCI_REVISION(pa->pa_class) == 0x61)
252: sc->sc_ramsize >>= 1;
253:
254: if (pci_intr_map(pa, &ih)) {
255: printf(": couldn't map interrupt\n");
256: goto fail_mem;
257: }
258: intrstr = pci_intr_string(pc, ih);
259: sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc,
260: self->dv_xname);
261: if (sc->sc_ih == NULL) {
262: printf(": couldn't establish interrupt");
263: if (intrstr != NULL)
264: printf(" at %s", intrstr);
265: printf("\n");
266: goto fail_mem;
267: }
268:
269: hifn_sessions(sc);
270:
271: rseg = sc->sc_ramsize / 1024;
272: rbase = 'K';
273: if (sc->sc_ramsize >= (1024 * 1024)) {
274: rbase = 'M';
275: rseg /= 1024;
276: }
277: printf("%d%cB %cram, %s\n", rseg, rbase,
278: sc->sc_drammodel ? 'd' : 's', intrstr);
279:
280: sc->sc_cid = crypto_get_driverid(0);
281: if (sc->sc_cid < 0)
282: goto fail_intr;
283:
284: WRITE_REG_0(sc, HIFN_0_PUCNFG,
285: READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
286: ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
287:
288: bzero(algs, sizeof(algs));
289:
290: algs[CRYPTO_LZS_COMP] = CRYPTO_ALG_FLAG_SUPPORTED;
291: switch (ena) {
292: case HIFN_PUSTAT_ENA_2:
293: algs[CRYPTO_3DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
294: algs[CRYPTO_ARC4] = CRYPTO_ALG_FLAG_SUPPORTED;
295: /*FALLTHROUGH*/
296: case HIFN_PUSTAT_ENA_1:
297: algs[CRYPTO_MD5] = CRYPTO_ALG_FLAG_SUPPORTED;
298: algs[CRYPTO_SHA1] = CRYPTO_ALG_FLAG_SUPPORTED;
299: algs[CRYPTO_MD5_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
300: algs[CRYPTO_SHA1_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
301: algs[CRYPTO_DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
302: }
303: if (sc->sc_flags & HIFN_HAS_AES)
304: algs[CRYPTO_AES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
305:
306: crypto_register(sc->sc_cid, algs, hifn_newsession,
307: hifn_freesession, hifn_process);
308:
309: bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0,
310: sc->sc_dmamap->dm_mapsize,
311: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
312:
313: if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
314: hifn_init_pubrng(sc);
315:
316: timeout_set(&sc->sc_tickto, hifn_tick, sc);
317: timeout_add(&sc->sc_tickto, hz);
318:
319: return;
320:
321: fail_intr:
322: pci_intr_disestablish(pc, sc->sc_ih);
323: fail_mem:
324: bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
325: bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
326: bus_dmamem_free(sc->sc_dmat, sc->sc_dmasegs, sc->sc_dmansegs);
327: bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
328:
329: /* Turn off DMA polling */
330: WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
331: HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
332:
333: fail_io1:
334: bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1);
335: fail_io0:
336: bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0);
337: }
338:
339: int
340: hifn_init_pubrng(struct hifn_softc *sc)
341: {
342: u_int32_t r;
343: int i;
344:
345: if ((sc->sc_flags & HIFN_IS_7811) == 0) {
346: /* Reset 7951 public key/rng engine */
347: WRITE_REG_1(sc, HIFN_1_PUB_RESET,
348: READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
349:
350: for (i = 0; i < 100; i++) {
351: DELAY(1000);
352: if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
353: HIFN_PUBRST_RESET) == 0)
354: break;
355: }
356:
357: if (i == 100) {
358: printf("%s: public key init failed\n",
359: sc->sc_dv.dv_xname);
360: return (1);
361: }
362: }
363:
364: /* Enable the rng, if available */
365: if (sc->sc_flags & HIFN_HAS_RNG) {
366: if (sc->sc_flags & HIFN_IS_7811) {
367: r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
368: if (r & HIFN_7811_RNGENA_ENA) {
369: r &= ~HIFN_7811_RNGENA_ENA;
370: WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
371: }
372: WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
373: HIFN_7811_RNGCFG_DEFL);
374: r |= HIFN_7811_RNGENA_ENA;
375: WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
376: } else
377: WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
378: READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
379: HIFN_RNGCFG_ENA);
380:
381: sc->sc_rngfirst = 1;
382: if (hz >= 100)
383: sc->sc_rnghz = hz / 100;
384: else
385: sc->sc_rnghz = 1;
386: timeout_set(&sc->sc_rngto, hifn_rng, sc);
387: timeout_add(&sc->sc_rngto, sc->sc_rnghz);
388: }
389:
390: /* Enable public key engine, if available */
391: if (sc->sc_flags & HIFN_HAS_PUBLIC) {
392: WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
393: sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
394: WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
395: }
396:
397: return (0);
398: }
399:
400: void
401: hifn_rng(void *vsc)
402: {
403: struct hifn_softc *sc = vsc;
404: u_int32_t num1, sts, num2;
405: int i;
406:
407: if (sc->sc_flags & HIFN_IS_7811) {
408: for (i = 0; i < 5; i++) {
409: sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
410: if (sts & HIFN_7811_RNGSTS_UFL) {
411: printf("%s: RNG underflow: disabling\n",
412: sc->sc_dv.dv_xname);
413: return;
414: }
415: if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
416: break;
417:
418: /*
419: * There are at least two words in the RNG FIFO
420: * at this point.
421: */
422: num1 = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
423: num2 = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
424: if (sc->sc_rngfirst)
425: sc->sc_rngfirst = 0;
426: else {
427: add_true_randomness(num1);
428: add_true_randomness(num2);
429: }
430: }
431: } else {
432: num1 = READ_REG_1(sc, HIFN_1_RNG_DATA);
433:
434: if (sc->sc_rngfirst)
435: sc->sc_rngfirst = 0;
436: else
437: add_true_randomness(num1);
438: }
439:
440: timeout_add(&sc->sc_rngto, sc->sc_rnghz);
441: }
442:
443: void
444: hifn_puc_wait(struct hifn_softc *sc)
445: {
446: int i;
447:
448: for (i = 5000; i > 0; i--) {
449: DELAY(1);
450: if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
451: break;
452: }
453: if (!i)
454: printf("%s: proc unit did not reset\n", sc->sc_dv.dv_xname);
455: }
456:
457: /*
458: * Reset the processing unit.
459: */
460: void
461: hifn_reset_puc(struct hifn_softc *sc)
462: {
463: /* Reset processing unit */
464: WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
465: hifn_puc_wait(sc);
466: }
467:
468: void
469: hifn_set_retry(struct hifn_softc *sc)
470: {
471: u_int32_t r;
472:
473: r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT);
474: r &= 0xffff0000;
475: pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r);
476: }
477:
478: /*
479: * Resets the board. Values in the regesters are left as is
480: * from the reset (i.e. initial values are assigned elsewhere).
481: */
482: void
483: hifn_reset_board(struct hifn_softc *sc, int full)
484: {
485: u_int32_t reg;
486:
487: /*
488: * Set polling in the DMA configuration register to zero. 0x7 avoids
489: * resetting the board and zeros out the other fields.
490: */
491: WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
492: HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
493:
494: /*
495: * Now that polling has been disabled, we have to wait 1 ms
496: * before resetting the board.
497: */
498: DELAY(1000);
499:
500: /* Reset the DMA unit */
501: if (full) {
502: WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
503: DELAY(1000);
504: } else {
505: WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
506: HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
507: hifn_reset_puc(sc);
508: }
509:
510: bzero(sc->sc_dma, sizeof(*sc->sc_dma));
511:
512: /* Bring dma unit out of reset */
513: WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
514: HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
515:
516: hifn_puc_wait(sc);
517:
518: hifn_set_retry(sc);
519:
520: if (sc->sc_flags & HIFN_IS_7811) {
521: for (reg = 0; reg < 1000; reg++) {
522: if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
523: HIFN_MIPSRST_CRAMINIT)
524: break;
525: DELAY(1000);
526: }
527: if (reg == 1000)
528: printf(": cram init timeout\n");
529: }
530: }
531:
532: u_int32_t
533: hifn_next_signature(u_int32_t a, u_int cnt)
534: {
535: int i;
536: u_int32_t v;
537:
538: for (i = 0; i < cnt; i++) {
539:
540: /* get the parity */
541: v = a & 0x80080125;
542: v ^= v >> 16;
543: v ^= v >> 8;
544: v ^= v >> 4;
545: v ^= v >> 2;
546: v ^= v >> 1;
547:
548: a = (v & 1) ^ (a << 1);
549: }
550:
551: return a;
552: }
553:
554: struct pci2id {
555: u_short pci_vendor;
556: u_short pci_prod;
557: char card_id[13];
558: } pci2id[] = {
559: {
560: PCI_VENDOR_HIFN,
561: PCI_PRODUCT_HIFN_7951,
562: { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
563: 0x00, 0x00, 0x00, 0x00, 0x00 }
564: }, {
565: PCI_VENDOR_HIFN,
566: PCI_PRODUCT_HIFN_7955,
567: { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
568: 0x00, 0x00, 0x00, 0x00, 0x00 }
569: }, {
570: PCI_VENDOR_HIFN,
571: PCI_PRODUCT_HIFN_7956,
572: { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
573: 0x00, 0x00, 0x00, 0x00, 0x00 }
574: }, {
575: PCI_VENDOR_NETSEC,
576: PCI_PRODUCT_NETSEC_7751,
577: { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
578: 0x00, 0x00, 0x00, 0x00, 0x00 }
579: }, {
580: PCI_VENDOR_INVERTEX,
581: PCI_PRODUCT_INVERTEX_AEON,
582: { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
583: 0x00, 0x00, 0x00, 0x00, 0x00 }
584: }, {
585: PCI_VENDOR_HIFN,
586: PCI_PRODUCT_HIFN_7811,
587: { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
588: 0x00, 0x00, 0x00, 0x00, 0x00 }
589: }, {
590: /*
591: * Other vendors share this PCI ID as well, such as
592: * http://www.powercrypt.com, and obviously they also
593: * use the same key.
594: */
595: PCI_VENDOR_HIFN,
596: PCI_PRODUCT_HIFN_7751,
597: { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
598: 0x00, 0x00, 0x00, 0x00, 0x00 }
599: },
600: };
601:
602: /*
603: * Checks to see if crypto is already enabled. If crypto isn't enable,
604: * "hifn_enable_crypto" is called to enable it. The check is important,
605: * as enabling crypto twice will lock the board.
606: */
607: int
608: hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid)
609: {
610: u_int32_t dmacfg, ramcfg, encl, addr, i;
611: char *offtbl = NULL;
612:
613: for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
614: if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) &&
615: pci2id[i].pci_prod == PCI_PRODUCT(pciid)) {
616: offtbl = pci2id[i].card_id;
617: break;
618: }
619: }
620:
621: if (offtbl == NULL) {
622: #ifdef HIFN_DEBUG
623: printf(": Unknown card!\n");
624: #endif
625: return (1);
626: }
627:
628: ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
629: dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
630:
631: /*
632: * The RAM config register's encrypt level bit needs to be set before
633: * every read performed on the encryption level register.
634: */
635: WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
636:
637: encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
638:
639: /*
640: * Make sure we don't re-unlock. Two unlocks kills chip until the
641: * next reboot.
642: */
643: if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
644: #ifdef HIFN_DEBUG
645: printf(": Strong Crypto already enabled!\n");
646: #endif
647: goto report;
648: }
649:
650: if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
651: #ifdef HIFN_DEBUG
652: printf(": Unknown encryption level\n");
653: #endif
654: return 1;
655: }
656:
657: WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
658: HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
659: DELAY(1000);
660: addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1);
661: DELAY(1000);
662: WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0);
663: DELAY(1000);
664:
665: for (i = 0; i <= 12; i++) {
666: addr = hifn_next_signature(addr, offtbl[i] + 0x101);
667: WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr);
668:
669: DELAY(1000);
670: }
671:
672: WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
673: encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
674:
675: #ifdef HIFN_DEBUG
676: if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
677: printf(": engine is permanently locked until next system reset");
678: else
679: printf(": engine enabled successfully!");
680: #endif
681:
682: report:
683: WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
684: WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
685:
686: switch (encl) {
687: case HIFN_PUSTAT_ENA_0:
688: offtbl = "LZS";
689: break;
690: case HIFN_PUSTAT_ENA_1:
691: offtbl = "LZS DES";
692: break;
693: case HIFN_PUSTAT_ENA_2:
694: offtbl = "LZS 3DES ARC4 MD5 SHA1";
695: break;
696: default:
697: offtbl = "disabled";
698: break;
699: }
700: printf(": %s", offtbl);
701: if (sc->sc_flags & HIFN_HAS_RNG)
702: printf(" RNG");
703: if (sc->sc_flags & HIFN_HAS_AES)
704: printf(" AES");
705: if (sc->sc_flags & HIFN_HAS_PUBLIC)
706: printf(" PK");
707: printf(", ");
708:
709: return (0);
710: }
711:
712: /*
713: * Give initial values to the registers listed in the "Register Space"
714: * section of the HIFN Software Development reference manual.
715: */
716: void
717: hifn_init_pci_registers(struct hifn_softc *sc)
718: {
719: /* write fixed values needed by the Initialization registers */
720: WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
721: WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
722: WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
723:
724: /* write all 4 ring address registers */
725: WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
726: offsetof(struct hifn_dma, cmdr[0]));
727: WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
728: offsetof(struct hifn_dma, srcr[0]));
729: WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
730: offsetof(struct hifn_dma, dstr[0]));
731: WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
732: offsetof(struct hifn_dma, resr[0]));
733:
734: DELAY(2000);
735:
736: /* write status register */
737: WRITE_REG_1(sc, HIFN_1_DMA_CSR,
738: HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
739: HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
740: HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
741: HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
742: HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
743: HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
744: HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
745: HIFN_DMACSR_S_WAIT |
746: HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
747: HIFN_DMACSR_C_WAIT |
748: HIFN_DMACSR_ENGINE |
749: ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
750: HIFN_DMACSR_PUBDONE : 0) |
751: ((sc->sc_flags & HIFN_IS_7811) ?
752: HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
753:
754: sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
755: sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
756: HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
757: HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
758: HIFN_DMAIER_ENGINE |
759: ((sc->sc_flags & HIFN_IS_7811) ?
760: HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
761: sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
762: WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
763: CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2);
764:
765: if (sc->sc_flags & HIFN_IS_7956) {
766: WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
767: HIFN_PUCNFG_TCALLPHASES |
768: HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
769: WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956);
770: } else {
771: WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
772: HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
773: HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
774: (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
775: }
776:
777: WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
778: WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
779: HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
780: ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
781: ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
782: }
783:
784: /*
785: * The maximum number of sessions supported by the card
786: * is dependent on the amount of context ram, which
787: * encryption algorithms are enabled, and how compression
788: * is configured. This should be configured before this
789: * routine is called.
790: */
791: void
792: hifn_sessions(struct hifn_softc *sc)
793: {
794: u_int32_t pucnfg;
795: int ctxsize;
796:
797: pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
798:
799: if (pucnfg & HIFN_PUCNFG_COMPSING) {
800: if (pucnfg & HIFN_PUCNFG_ENCCNFG)
801: ctxsize = 128;
802: else
803: ctxsize = 512;
804: /*
805: * 7955/7956 has internal context memory of 32K
806: */
807: if (sc->sc_flags & HIFN_IS_7956)
808: sc->sc_maxses = 32768 / ctxsize;
809: else
810: sc->sc_maxses = 1 +
811: ((sc->sc_ramsize - 32768) / ctxsize);
812: }
813: else
814: sc->sc_maxses = sc->sc_ramsize / 16384;
815:
816: if (sc->sc_maxses > 2048)
817: sc->sc_maxses = 2048;
818: }
819:
820: /*
821: * Determine ram type (sram or dram). Board should be just out of a reset
822: * state when this is called.
823: */
824: int
825: hifn_ramtype(struct hifn_softc *sc)
826: {
827: u_int8_t data[8], dataexpect[8];
828: int i;
829:
830: for (i = 0; i < sizeof(data); i++)
831: data[i] = dataexpect[i] = 0x55;
832: if (hifn_writeramaddr(sc, 0, data))
833: return (-1);
834: if (hifn_readramaddr(sc, 0, data))
835: return (-1);
836: if (bcmp(data, dataexpect, sizeof(data)) != 0) {
837: sc->sc_drammodel = 1;
838: return (0);
839: }
840:
841: for (i = 0; i < sizeof(data); i++)
842: data[i] = dataexpect[i] = 0xaa;
843: if (hifn_writeramaddr(sc, 0, data))
844: return (-1);
845: if (hifn_readramaddr(sc, 0, data))
846: return (-1);
847: if (bcmp(data, dataexpect, sizeof(data)) != 0) {
848: sc->sc_drammodel = 1;
849: return (0);
850: }
851:
852: return (0);
853: }
854:
855: #define HIFN_SRAM_MAX (32 << 20)
856: #define HIFN_SRAM_STEP_SIZE 16384
857: #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
858:
859: int
860: hifn_sramsize(struct hifn_softc *sc)
861: {
862: u_int32_t a;
863: u_int8_t data[8];
864: u_int8_t dataexpect[sizeof(data)];
865: int32_t i;
866:
867: for (i = 0; i < sizeof(data); i++)
868: data[i] = dataexpect[i] = i ^ 0x5a;
869:
870: for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
871: a = i * HIFN_SRAM_STEP_SIZE;
872: bcopy(&i, data, sizeof(i));
873: hifn_writeramaddr(sc, a, data);
874: }
875:
876: for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
877: a = i * HIFN_SRAM_STEP_SIZE;
878: bcopy(&i, dataexpect, sizeof(i));
879: if (hifn_readramaddr(sc, a, data) < 0)
880: return (0);
881: if (bcmp(data, dataexpect, sizeof(data)) != 0)
882: return (0);
883: sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
884: }
885:
886: return (0);
887: }
888:
889: /*
890: * XXX For dram boards, one should really try all of the
891: * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
892: * is already set up correctly.
893: */
894: int
895: hifn_dramsize(struct hifn_softc *sc)
896: {
897: u_int32_t cnfg;
898:
899: if (sc->sc_flags & HIFN_IS_7956) {
900: /*
901: * 7956/7956 have a fixed internal ram of only 32K.
902: */
903: sc->sc_ramsize = 32768;
904: } else {
905: cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
906: HIFN_PUCNFG_DRAMMASK;
907: sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
908: }
909: return (0);
910: }
911:
912: void
913: hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp,
914: int *dstp, int *resp)
915: {
916: struct hifn_dma *dma = sc->sc_dma;
917:
918: if (dma->cmdi == HIFN_D_CMD_RSIZE) {
919: dma->cmdi = 0;
920: dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
921: HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
922: HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
923: BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
924: }
925: *cmdp = dma->cmdi++;
926: dma->cmdk = dma->cmdi;
927:
928: if (dma->srci == HIFN_D_SRC_RSIZE) {
929: dma->srci = 0;
930: dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
931: HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
932: HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
933: BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
934: }
935: *srcp = dma->srci++;
936: dma->srck = dma->srci;
937:
938: if (dma->dsti == HIFN_D_DST_RSIZE) {
939: dma->dsti = 0;
940: dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
941: HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
942: HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
943: BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
944: }
945: *dstp = dma->dsti++;
946: dma->dstk = dma->dsti;
947:
948: if (dma->resi == HIFN_D_RES_RSIZE) {
949: dma->resi = 0;
950: dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
951: HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
952: HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
953: BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
954: }
955: *resp = dma->resi++;
956: dma->resk = dma->resi;
957: }
958:
959: int
960: hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
961: {
962: struct hifn_dma *dma = sc->sc_dma;
963: struct hifn_base_command wc;
964: const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
965: int r, cmdi, resi, srci, dsti;
966:
967: wc.masks = htole16(3 << 13);
968: wc.session_num = htole16(addr >> 14);
969: wc.total_source_count = htole16(8);
970: wc.total_dest_count = htole16(addr & 0x3fff);
971:
972: hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
973:
974: WRITE_REG_1(sc, HIFN_1_DMA_CSR,
975: HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
976: HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
977:
978: /* build write command */
979: bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
980: *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc;
981: bcopy(data, &dma->test_src, sizeof(dma->test_src));
982:
983: dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
984: + offsetof(struct hifn_dma, test_src));
985: dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
986: + offsetof(struct hifn_dma, test_dst));
987:
988: dma->cmdr[cmdi].l = htole32(16 | masks);
989: dma->srcr[srci].l = htole32(8 | masks);
990: dma->dstr[dsti].l = htole32(4 | masks);
991: dma->resr[resi].l = htole32(4 | masks);
992:
993: bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
994: 0, sc->sc_dmamap->dm_mapsize,
995: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
996:
997: for (r = 10000; r >= 0; r--) {
998: DELAY(10);
999: bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1000: 0, sc->sc_dmamap->dm_mapsize,
1001: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1002: if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1003: break;
1004: bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1005: 0, sc->sc_dmamap->dm_mapsize,
1006: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1007: }
1008: if (r == 0) {
1009: printf("%s: writeramaddr -- "
1010: "result[%d](addr %d) still valid\n",
1011: sc->sc_dv.dv_xname, resi, addr);
1012: r = -1;
1013: return (-1);
1014: } else
1015: r = 0;
1016:
1017: WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1018: HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1019: HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1020:
1021: return (r);
1022: }
1023:
1024: int
1025: hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1026: {
1027: struct hifn_dma *dma = sc->sc_dma;
1028: struct hifn_base_command rc;
1029: const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1030: int r, cmdi, srci, dsti, resi;
1031:
1032: rc.masks = htole16(2 << 13);
1033: rc.session_num = htole16(addr >> 14);
1034: rc.total_source_count = htole16(addr & 0x3fff);
1035: rc.total_dest_count = htole16(8);
1036:
1037: hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1038:
1039: WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1040: HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1041: HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1042:
1043: bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1044: *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc;
1045:
1046: dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1047: offsetof(struct hifn_dma, test_src));
1048: dma->test_src = 0;
1049: dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1050: offsetof(struct hifn_dma, test_dst));
1051: dma->test_dst = 0;
1052: dma->cmdr[cmdi].l = htole32(8 | masks);
1053: dma->srcr[srci].l = htole32(8 | masks);
1054: dma->dstr[dsti].l = htole32(8 | masks);
1055: dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1056:
1057: bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1058: 0, sc->sc_dmamap->dm_mapsize,
1059: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1060:
1061: for (r = 10000; r >= 0; r--) {
1062: DELAY(10);
1063: bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1064: 0, sc->sc_dmamap->dm_mapsize,
1065: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1066: if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1067: break;
1068: bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1069: 0, sc->sc_dmamap->dm_mapsize,
1070: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1071: }
1072: if (r == 0) {
1073: printf("%s: readramaddr -- "
1074: "result[%d](addr %d) still valid\n",
1075: sc->sc_dv.dv_xname, resi, addr);
1076: r = -1;
1077: } else {
1078: r = 0;
1079: bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1080: }
1081:
1082: WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1083: HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1084: HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1085:
1086: return (r);
1087: }
1088:
1089: /*
1090: * Initialize the descriptor rings.
1091: */
1092: void
1093: hifn_init_dma(struct hifn_softc *sc)
1094: {
1095: struct hifn_dma *dma = sc->sc_dma;
1096: int i;
1097:
1098: hifn_set_retry(sc);
1099:
1100: /* initialize static pointer values */
1101: for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1102: dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1103: offsetof(struct hifn_dma, command_bufs[i][0]));
1104: for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1105: dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1106: offsetof(struct hifn_dma, result_bufs[i][0]));
1107:
1108: dma->cmdr[HIFN_D_CMD_RSIZE].p =
1109: htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1110: offsetof(struct hifn_dma, cmdr[0]));
1111: dma->srcr[HIFN_D_SRC_RSIZE].p =
1112: htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1113: offsetof(struct hifn_dma, srcr[0]));
1114: dma->dstr[HIFN_D_DST_RSIZE].p =
1115: htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1116: offsetof(struct hifn_dma, dstr[0]));
1117: dma->resr[HIFN_D_RES_RSIZE].p =
1118: htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1119: offsetof(struct hifn_dma, resr[0]));
1120:
1121: dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1122: dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1123: dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1124: }
1125:
1126: /*
1127: * Writes out the raw command buffer space. Returns the
1128: * command buffer size.
1129: */
1130: u_int
1131: hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1132: {
1133: u_int8_t *buf_pos;
1134: struct hifn_base_command *base_cmd;
1135: struct hifn_mac_command *mac_cmd;
1136: struct hifn_crypt_command *cry_cmd;
1137: struct hifn_comp_command *comp_cmd;
1138: int using_mac, using_crypt, using_comp, len, ivlen;
1139: u_int32_t dlen, slen;
1140:
1141: buf_pos = buf;
1142: using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1143: using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1144: using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP;
1145:
1146: base_cmd = (struct hifn_base_command *)buf_pos;
1147: base_cmd->masks = htole16(cmd->base_masks);
1148: slen = cmd->src_map->dm_mapsize;
1149: if (cmd->sloplen)
1150: dlen = cmd->dst_map->dm_mapsize - cmd->sloplen +
1151: sizeof(u_int32_t);
1152: else
1153: dlen = cmd->dst_map->dm_mapsize;
1154: base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1155: base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1156: dlen >>= 16;
1157: slen >>= 16;
1158: base_cmd->session_num = htole16(
1159: ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1160: ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1161: buf_pos += sizeof(struct hifn_base_command);
1162:
1163: if (using_comp) {
1164: comp_cmd = (struct hifn_comp_command *)buf_pos;
1165: dlen = cmd->compcrd->crd_len;
1166: comp_cmd->source_count = htole16(dlen & 0xffff);
1167: dlen >>= 16;
1168: comp_cmd->masks = htole16(cmd->comp_masks |
1169: ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M));
1170: comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip);
1171: comp_cmd->reserved = 0;
1172: buf_pos += sizeof(struct hifn_comp_command);
1173: }
1174:
1175: if (using_mac) {
1176: mac_cmd = (struct hifn_mac_command *)buf_pos;
1177: dlen = cmd->maccrd->crd_len;
1178: mac_cmd->source_count = htole16(dlen & 0xffff);
1179: dlen >>= 16;
1180: mac_cmd->masks = htole16(cmd->mac_masks |
1181: ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1182: mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1183: mac_cmd->reserved = 0;
1184: buf_pos += sizeof(struct hifn_mac_command);
1185: }
1186:
1187: if (using_crypt) {
1188: cry_cmd = (struct hifn_crypt_command *)buf_pos;
1189: dlen = cmd->enccrd->crd_len;
1190: cry_cmd->source_count = htole16(dlen & 0xffff);
1191: dlen >>= 16;
1192: cry_cmd->masks = htole16(cmd->cry_masks |
1193: ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1194: cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1195: cry_cmd->reserved = 0;
1196: buf_pos += sizeof(struct hifn_crypt_command);
1197: }
1198:
1199: if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1200: bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1201: buf_pos += HIFN_MAC_KEY_LENGTH;
1202: }
1203:
1204: if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1205: switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1206: case HIFN_CRYPT_CMD_ALG_3DES:
1207: bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1208: buf_pos += HIFN_3DES_KEY_LENGTH;
1209: break;
1210: case HIFN_CRYPT_CMD_ALG_DES:
1211: bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1212: buf_pos += HIFN_DES_KEY_LENGTH;
1213: break;
1214: case HIFN_CRYPT_CMD_ALG_RC4:
1215: len = 256;
1216: do {
1217: int clen;
1218:
1219: clen = MIN(cmd->cklen, len);
1220: bcopy(cmd->ck, buf_pos, clen);
1221: len -= clen;
1222: buf_pos += clen;
1223: } while (len > 0);
1224: bzero(buf_pos, 4);
1225: buf_pos += 4;
1226: break;
1227: case HIFN_CRYPT_CMD_ALG_AES:
1228: /*
1229: * AES key are variable 128, 192 and
1230: * 256 bits (16, 24 and 32 bytes).
1231: */
1232: bcopy(cmd->ck, buf_pos, cmd->cklen);
1233: buf_pos += cmd->cklen;
1234: break;
1235: }
1236: }
1237:
1238: if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1239: if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
1240: HIFN_CRYPT_CMD_ALG_AES)
1241: ivlen = HIFN_AES_IV_LENGTH;
1242: else
1243: ivlen = HIFN_IV_LENGTH;
1244: bcopy(cmd->iv, buf_pos, ivlen);
1245: buf_pos += ivlen;
1246: }
1247:
1248: if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT |
1249: HIFN_BASE_CMD_COMP)) == 0) {
1250: bzero(buf_pos, 8);
1251: buf_pos += 8;
1252: }
1253:
1254: return (buf_pos - buf);
1255: }
1256:
1257: int
1258: hifn_dmamap_aligned(bus_dmamap_t map)
1259: {
1260: int i;
1261:
1262: for (i = 0; i < map->dm_nsegs; i++) {
1263: if (map->dm_segs[i].ds_addr & 3)
1264: return (0);
1265: if ((i != (map->dm_nsegs - 1)) &&
1266: (map->dm_segs[i].ds_len & 3))
1267: return (0);
1268: }
1269: return (1);
1270: }
1271:
1272: int
1273: hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1274: {
1275: struct hifn_dma *dma = sc->sc_dma;
1276: bus_dmamap_t map = cmd->dst_map;
1277: u_int32_t p, l;
1278: int idx, used = 0, i;
1279:
1280: idx = dma->dsti;
1281: for (i = 0; i < map->dm_nsegs - 1; i++) {
1282: dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1283: dma->dstr[idx].l = htole32(HIFN_D_VALID |
1284: HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len);
1285: HIFN_DSTR_SYNC(sc, idx,
1286: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1287: used++;
1288:
1289: if (++idx == HIFN_D_DST_RSIZE) {
1290: dma->dstr[idx].l = htole32(HIFN_D_VALID |
1291: HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1292: HIFN_DSTR_SYNC(sc, idx,
1293: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1294: idx = 0;
1295: }
1296: }
1297:
1298: if (cmd->sloplen == 0) {
1299: p = map->dm_segs[i].ds_addr;
1300: l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1301: map->dm_segs[i].ds_len;
1302: } else {
1303: p = sc->sc_dmamap->dm_segs[0].ds_addr +
1304: offsetof(struct hifn_dma, slop[cmd->slopidx]);
1305: l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1306: sizeof(u_int32_t);
1307:
1308: if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) {
1309: dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1310: dma->dstr[idx].l = htole32(HIFN_D_VALID |
1311: HIFN_D_MASKDONEIRQ |
1312: (map->dm_segs[i].ds_len - cmd->sloplen));
1313: HIFN_DSTR_SYNC(sc, idx,
1314: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1315: used++;
1316:
1317: if (++idx == HIFN_D_DST_RSIZE) {
1318: dma->dstr[idx].l = htole32(HIFN_D_VALID |
1319: HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1320: HIFN_DSTR_SYNC(sc, idx,
1321: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1322: idx = 0;
1323: }
1324: }
1325: }
1326: dma->dstr[idx].p = htole32(p);
1327: dma->dstr[idx].l = htole32(l);
1328: HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1329: used++;
1330:
1331: if (++idx == HIFN_D_DST_RSIZE) {
1332: dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1333: HIFN_D_MASKDONEIRQ);
1334: HIFN_DSTR_SYNC(sc, idx,
1335: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1336: idx = 0;
1337: }
1338:
1339: dma->dsti = idx;
1340: dma->dstu += used;
1341: return (idx);
1342: }
1343:
1344: int
1345: hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1346: {
1347: struct hifn_dma *dma = sc->sc_dma;
1348: bus_dmamap_t map = cmd->src_map;
1349: int idx, i;
1350: u_int32_t last = 0;
1351:
1352: idx = dma->srci;
1353: for (i = 0; i < map->dm_nsegs; i++) {
1354: if (i == map->dm_nsegs - 1)
1355: last = HIFN_D_LAST;
1356:
1357: dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr);
1358: dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len |
1359: HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1360: HIFN_SRCR_SYNC(sc, idx,
1361: BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1362:
1363: if (++idx == HIFN_D_SRC_RSIZE) {
1364: dma->srcr[idx].l = htole32(HIFN_D_VALID |
1365: HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1366: HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1367: BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1368: idx = 0;
1369: }
1370: }
1371: dma->srci = idx;
1372: dma->srcu += map->dm_nsegs;
1373: return (idx);
1374: }
1375:
1376: int
1377: hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd,
1378: struct cryptop *crp)
1379: {
1380: struct hifn_dma *dma = sc->sc_dma;
1381: u_int32_t cmdlen;
1382: int cmdi, resi, s, err = 0;
1383:
1384: if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
1385: HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map))
1386: return (ENOMEM);
1387:
1388: if (crp->crp_flags & CRYPTO_F_IMBUF) {
1389: if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1390: cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
1391: err = ENOMEM;
1392: goto err_srcmap1;
1393: }
1394: } else if (crp->crp_flags & CRYPTO_F_IOV) {
1395: if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1396: cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
1397: err = ENOMEM;
1398: goto err_srcmap1;
1399: }
1400: } else {
1401: err = EINVAL;
1402: goto err_srcmap1;
1403: }
1404:
1405: if (hifn_dmamap_aligned(cmd->src_map)) {
1406: cmd->sloplen = cmd->src_map->dm_mapsize & 3;
1407: if (crp->crp_flags & CRYPTO_F_IOV)
1408: cmd->dstu.dst_io = cmd->srcu.src_io;
1409: else if (crp->crp_flags & CRYPTO_F_IMBUF)
1410: cmd->dstu.dst_m = cmd->srcu.src_m;
1411: cmd->dst_map = cmd->src_map;
1412: } else {
1413: if (crp->crp_flags & CRYPTO_F_IOV) {
1414: err = EINVAL;
1415: goto err_srcmap;
1416: } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1417: int totlen, len;
1418: struct mbuf *m, *m0, *mlast;
1419:
1420: totlen = cmd->src_map->dm_mapsize;
1421: if (cmd->srcu.src_m->m_flags & M_PKTHDR) {
1422: len = MHLEN;
1423: MGETHDR(m0, M_DONTWAIT, MT_DATA);
1424: } else {
1425: len = MLEN;
1426: MGET(m0, M_DONTWAIT, MT_DATA);
1427: }
1428: if (m0 == NULL) {
1429: err = ENOMEM;
1430: goto err_srcmap;
1431: }
1432: if (len == MHLEN)
1433: M_DUP_PKTHDR(m0, cmd->srcu.src_m);
1434: if (totlen >= MINCLSIZE) {
1435: MCLGET(m0, M_DONTWAIT);
1436: if (m0->m_flags & M_EXT)
1437: len = MCLBYTES;
1438: }
1439: totlen -= len;
1440: m0->m_pkthdr.len = m0->m_len = len;
1441: mlast = m0;
1442:
1443: while (totlen > 0) {
1444: MGET(m, M_DONTWAIT, MT_DATA);
1445: if (m == NULL) {
1446: err = ENOMEM;
1447: m_freem(m0);
1448: goto err_srcmap;
1449: }
1450: len = MLEN;
1451: if (totlen >= MINCLSIZE) {
1452: MCLGET(m, M_DONTWAIT);
1453: if (m->m_flags & M_EXT)
1454: len = MCLBYTES;
1455: }
1456:
1457: m->m_len = len;
1458: if (m0->m_flags & M_PKTHDR)
1459: m0->m_pkthdr.len += len;
1460: totlen -= len;
1461:
1462: mlast->m_next = m;
1463: mlast = m;
1464: }
1465: cmd->dstu.dst_m = m0;
1466: }
1467: }
1468:
1469: if (cmd->dst_map == NULL) {
1470: if (bus_dmamap_create(sc->sc_dmat,
1471: HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER,
1472: HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1473: err = ENOMEM;
1474: goto err_srcmap;
1475: }
1476: if (crp->crp_flags & CRYPTO_F_IMBUF) {
1477: if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1478: cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
1479: err = ENOMEM;
1480: goto err_dstmap1;
1481: }
1482: } else if (crp->crp_flags & CRYPTO_F_IOV) {
1483: if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1484: cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
1485: err = ENOMEM;
1486: goto err_dstmap1;
1487: }
1488: }
1489: }
1490:
1491: #ifdef HIFN_DEBUG
1492: printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1493: sc->sc_dv.dv_xname,
1494: READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER),
1495: dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1496: cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs);
1497: #endif
1498:
1499: if (cmd->src_map == cmd->dst_map)
1500: bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1501: 0, cmd->src_map->dm_mapsize,
1502: BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1503: else {
1504: bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1505: 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1506: bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1507: 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1508: }
1509:
1510: s = splnet();
1511:
1512: /*
1513: * need 1 cmd, and 1 res
1514: * need N src, and N dst
1515: */
1516: if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1517: (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1518: splx(s);
1519: err = ENOMEM;
1520: goto err_dstmap;
1521: }
1522: if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
1523: (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) {
1524: splx(s);
1525: err = ENOMEM;
1526: goto err_dstmap;
1527: }
1528:
1529: if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1530: dma->cmdi = 0;
1531: dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1532: HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1533: HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1534: BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1535: }
1536: cmdi = dma->cmdi++;
1537: cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1538: HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1539:
1540: /* .p for command/result already set */
1541: dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1542: HIFN_D_MASKDONEIRQ);
1543: HIFN_CMDR_SYNC(sc, cmdi,
1544: BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1545: dma->cmdu++;
1546: if (sc->sc_c_busy == 0) {
1547: WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1548: sc->sc_c_busy = 1;
1549: SET_LED(sc, HIFN_MIPSRST_LED0);
1550: }
1551:
1552: /*
1553: * Always enable the command wait interrupt. We are obviously
1554: * missing an interrupt or two somewhere. Enabling the command wait
1555: * interrupt will guarantee we get called periodically until all
1556: * of the queues are drained and thus work around this.
1557: */
1558: sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1559: WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1560:
1561: hifnstats.hst_ipackets++;
1562: hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
1563:
1564: hifn_dmamap_load_src(sc, cmd);
1565: if (sc->sc_s_busy == 0) {
1566: WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1567: sc->sc_s_busy = 1;
1568: SET_LED(sc, HIFN_MIPSRST_LED1);
1569: }
1570:
1571: /*
1572: * Unlike other descriptors, we don't mask done interrupt from
1573: * result descriptor.
1574: */
1575: #ifdef HIFN_DEBUG
1576: printf("load res\n");
1577: #endif
1578: if (dma->resi == HIFN_D_RES_RSIZE) {
1579: dma->resi = 0;
1580: dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1581: HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1582: HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1583: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1584: }
1585: resi = dma->resi++;
1586: dma->hifn_commands[resi] = cmd;
1587: HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1588: dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1589: HIFN_D_VALID | HIFN_D_LAST);
1590: HIFN_RESR_SYNC(sc, resi,
1591: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1592: dma->resu++;
1593: if (sc->sc_r_busy == 0) {
1594: WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1595: sc->sc_r_busy = 1;
1596: SET_LED(sc, HIFN_MIPSRST_LED2);
1597: }
1598:
1599: if (cmd->sloplen)
1600: cmd->slopidx = resi;
1601:
1602: hifn_dmamap_load_dst(sc, cmd);
1603:
1604: if (sc->sc_d_busy == 0) {
1605: WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1606: sc->sc_d_busy = 1;
1607: }
1608:
1609: #ifdef HIFN_DEBUG
1610: printf("%s: command: stat %8x ier %8x\n",
1611: sc->sc_dv.dv_xname,
1612: READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER));
1613: #endif
1614:
1615: sc->sc_active = 5;
1616: cmd->cmd_callback = hifn_callback;
1617: splx(s);
1618: return (err); /* success */
1619:
1620: err_dstmap:
1621: if (cmd->src_map != cmd->dst_map)
1622: bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1623: err_dstmap1:
1624: if (cmd->src_map != cmd->dst_map)
1625: bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
1626: err_srcmap:
1627: if (crp->crp_flags & CRYPTO_F_IMBUF &&
1628: cmd->srcu.src_m != cmd->dstu.dst_m)
1629: m_freem(cmd->dstu.dst_m);
1630: bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1631: err_srcmap1:
1632: bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
1633: return (err);
1634: }
1635:
1636: void
1637: hifn_tick(void *vsc)
1638: {
1639: struct hifn_softc *sc = vsc;
1640: int s;
1641:
1642: s = splnet();
1643: if (sc->sc_active == 0) {
1644: struct hifn_dma *dma = sc->sc_dma;
1645: u_int32_t r = 0;
1646:
1647: if (dma->cmdu == 0 && sc->sc_c_busy) {
1648: sc->sc_c_busy = 0;
1649: r |= HIFN_DMACSR_C_CTRL_DIS;
1650: CLR_LED(sc, HIFN_MIPSRST_LED0);
1651: }
1652: if (dma->srcu == 0 && sc->sc_s_busy) {
1653: sc->sc_s_busy = 0;
1654: r |= HIFN_DMACSR_S_CTRL_DIS;
1655: CLR_LED(sc, HIFN_MIPSRST_LED1);
1656: }
1657: if (dma->dstu == 0 && sc->sc_d_busy) {
1658: sc->sc_d_busy = 0;
1659: r |= HIFN_DMACSR_D_CTRL_DIS;
1660: }
1661: if (dma->resu == 0 && sc->sc_r_busy) {
1662: sc->sc_r_busy = 0;
1663: r |= HIFN_DMACSR_R_CTRL_DIS;
1664: CLR_LED(sc, HIFN_MIPSRST_LED2);
1665: }
1666: if (r)
1667: WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1668: }
1669: else
1670: sc->sc_active--;
1671: splx(s);
1672: timeout_add(&sc->sc_tickto, hz);
1673: }
1674:
1675: int
1676: hifn_intr(void *arg)
1677: {
1678: struct hifn_softc *sc = arg;
1679: struct hifn_dma *dma = sc->sc_dma;
1680: u_int32_t dmacsr, restart;
1681: int i, u;
1682:
1683: dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
1684:
1685: #ifdef HIFN_DEBUG
1686: printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n",
1687: sc->sc_dv.dv_xname,
1688: dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER),
1689: dma->cmdu, dma->srcu, dma->dstu, dma->resu);
1690: #endif
1691:
1692: /* Nothing in the DMA unit interrupted */
1693: if ((dmacsr & sc->sc_dmaier) == 0)
1694: return (0);
1695:
1696: WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
1697:
1698: if (dmacsr & HIFN_DMACSR_ENGINE)
1699: WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR));
1700:
1701: if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
1702: (dmacsr & HIFN_DMACSR_PUBDONE))
1703: WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
1704: READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
1705:
1706: restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
1707: if (restart)
1708: printf("%s: overrun %x\n", sc->sc_dv.dv_xname, dmacsr);
1709:
1710: if (sc->sc_flags & HIFN_IS_7811) {
1711: if (dmacsr & HIFN_DMACSR_ILLR)
1712: printf("%s: illegal read\n", sc->sc_dv.dv_xname);
1713: if (dmacsr & HIFN_DMACSR_ILLW)
1714: printf("%s: illegal write\n", sc->sc_dv.dv_xname);
1715: }
1716:
1717: restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
1718: HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
1719: if (restart) {
1720: printf("%s: abort, resetting.\n", sc->sc_dv.dv_xname);
1721: hifnstats.hst_abort++;
1722: hifn_abort(sc);
1723: return (1);
1724: }
1725:
1726: if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) {
1727: /*
1728: * If no slots to process and we receive a "waiting on
1729: * command" interrupt, we disable the "waiting on command"
1730: * (by clearing it).
1731: */
1732: sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1733: WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1734: }
1735:
1736: /* clear the rings */
1737: i = dma->resk;
1738: while (dma->resu != 0) {
1739: HIFN_RESR_SYNC(sc, i,
1740: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1741: if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
1742: HIFN_RESR_SYNC(sc, i,
1743: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1744: break;
1745: }
1746:
1747: if (i != HIFN_D_RES_RSIZE) {
1748: struct hifn_command *cmd;
1749:
1750: HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
1751: cmd = dma->hifn_commands[i];
1752:
1753: (*cmd->cmd_callback)(sc, cmd, dma->result_bufs[i]);
1754: hifnstats.hst_opackets++;
1755: }
1756:
1757: if (++i == (HIFN_D_RES_RSIZE + 1))
1758: i = 0;
1759: else
1760: dma->resu--;
1761: }
1762: dma->resk = i;
1763:
1764: i = dma->srck; u = dma->srcu;
1765: while (u != 0) {
1766: HIFN_SRCR_SYNC(sc, i,
1767: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1768: if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
1769: HIFN_SRCR_SYNC(sc, i,
1770: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1771: break;
1772: }
1773: if (++i == (HIFN_D_SRC_RSIZE + 1))
1774: i = 0;
1775: else
1776: u--;
1777: }
1778: dma->srck = i; dma->srcu = u;
1779:
1780: i = dma->cmdk; u = dma->cmdu;
1781: while (u != 0) {
1782: HIFN_CMDR_SYNC(sc, i,
1783: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1784: if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
1785: HIFN_CMDR_SYNC(sc, i,
1786: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1787: break;
1788: }
1789: if (i != HIFN_D_CMD_RSIZE) {
1790: u--;
1791: HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
1792: }
1793: if (++i == (HIFN_D_CMD_RSIZE + 1))
1794: i = 0;
1795: }
1796: dma->cmdk = i; dma->cmdu = u;
1797:
1798: return (1);
1799: }
1800:
1801: /*
1802: * Allocate a new 'session' and return an encoded session id. 'sidp'
1803: * contains our registration id, and should contain an encoded session
1804: * id on successful allocation.
1805: */
1806: int
1807: hifn_newsession(u_int32_t *sidp, struct cryptoini *cri)
1808: {
1809: struct cryptoini *c;
1810: struct hifn_softc *sc = NULL;
1811: int i, mac = 0, cry = 0, comp = 0, sesn;
1812: struct hifn_session *ses = NULL;
1813:
1814: if (sidp == NULL || cri == NULL)
1815: return (EINVAL);
1816:
1817: for (i = 0; i < hifn_cd.cd_ndevs; i++) {
1818: sc = hifn_cd.cd_devs[i];
1819: if (sc == NULL)
1820: break;
1821: if (sc->sc_cid == (*sidp))
1822: break;
1823: }
1824: if (sc == NULL)
1825: return (EINVAL);
1826:
1827: if (sc->sc_sessions == NULL) {
1828: ses = sc->sc_sessions = (struct hifn_session *)malloc(
1829: sizeof(*ses), M_DEVBUF, M_NOWAIT);
1830: if (ses == NULL)
1831: return (ENOMEM);
1832: sesn = 0;
1833: sc->sc_nsessions = 1;
1834: } else {
1835: for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
1836: if (!sc->sc_sessions[sesn].hs_used) {
1837: ses = &sc->sc_sessions[sesn];
1838: break;
1839: }
1840: }
1841:
1842: if (ses == NULL) {
1843: sesn = sc->sc_nsessions;
1844: ses = (struct hifn_session *)malloc((sesn + 1) *
1845: sizeof(*ses), M_DEVBUF, M_NOWAIT);
1846: if (ses == NULL)
1847: return (ENOMEM);
1848: bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
1849: bzero(sc->sc_sessions, sesn * sizeof(*ses));
1850: free(sc->sc_sessions, M_DEVBUF);
1851: sc->sc_sessions = ses;
1852: ses = &sc->sc_sessions[sesn];
1853: sc->sc_nsessions++;
1854: }
1855: }
1856: bzero(ses, sizeof(*ses));
1857:
1858: for (c = cri; c != NULL; c = c->cri_next) {
1859: switch (c->cri_alg) {
1860: case CRYPTO_MD5:
1861: case CRYPTO_SHA1:
1862: case CRYPTO_MD5_HMAC:
1863: case CRYPTO_SHA1_HMAC:
1864: if (mac)
1865: return (EINVAL);
1866: mac = 1;
1867: break;
1868: case CRYPTO_DES_CBC:
1869: case CRYPTO_3DES_CBC:
1870: case CRYPTO_AES_CBC:
1871: get_random_bytes(ses->hs_iv,
1872: (c->cri_alg == CRYPTO_AES_CBC ?
1873: HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH));
1874: /*FALLTHROUGH*/
1875: case CRYPTO_ARC4:
1876: if (cry)
1877: return (EINVAL);
1878: cry = 1;
1879: break;
1880: case CRYPTO_LZS_COMP:
1881: if (comp)
1882: return (EINVAL);
1883: comp = 1;
1884: break;
1885: default:
1886: return (EINVAL);
1887: }
1888: }
1889: if (mac == 0 && cry == 0 && comp == 0)
1890: return (EINVAL);
1891:
1892: /*
1893: * XXX only want to support compression without chaining to
1894: * MAC/crypt engine right now
1895: */
1896: if ((comp && mac) || (comp && cry))
1897: return (EINVAL);
1898:
1899: *sidp = HIFN_SID(sc->sc_dv.dv_unit, sesn);
1900: ses->hs_used = 1;
1901:
1902: return (0);
1903: }
1904:
1905: /*
1906: * Deallocate a session.
1907: * XXX this routine should run a zero'd mac/encrypt key into context ram.
1908: * XXX to blow away any keys already stored there.
1909: */
1910: int
1911: hifn_freesession(u_int64_t tid)
1912: {
1913: struct hifn_softc *sc;
1914: int card, session;
1915: u_int32_t sid = ((u_int32_t)tid) & 0xffffffff;
1916:
1917: card = HIFN_CARD(sid);
1918: if (card >= hifn_cd.cd_ndevs || hifn_cd.cd_devs[card] == NULL)
1919: return (EINVAL);
1920:
1921: sc = hifn_cd.cd_devs[card];
1922: session = HIFN_SESSION(sid);
1923: if (session >= sc->sc_nsessions)
1924: return (EINVAL);
1925:
1926: bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
1927: return (0);
1928: }
1929:
1930: int
1931: hifn_process(struct cryptop *crp)
1932: {
1933: struct hifn_command *cmd = NULL;
1934: int card, session, err, ivlen;
1935: struct hifn_softc *sc;
1936: struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
1937:
1938: if (crp == NULL || crp->crp_callback == NULL) {
1939: hifnstats.hst_invalid++;
1940: return (EINVAL);
1941: }
1942:
1943: if (crp->crp_ilen == 0) {
1944: err = EINVAL;
1945: goto errout;
1946: }
1947:
1948: card = HIFN_CARD(crp->crp_sid);
1949: if (card >= hifn_cd.cd_ndevs || hifn_cd.cd_devs[card] == NULL) {
1950: err = EINVAL;
1951: goto errout;
1952: }
1953:
1954: sc = hifn_cd.cd_devs[card];
1955: session = HIFN_SESSION(crp->crp_sid);
1956: if (session >= sc->sc_nsessions) {
1957: err = EINVAL;
1958: goto errout;
1959: }
1960:
1961: cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command),
1962: M_DEVBUF, M_NOWAIT);
1963: if (cmd == NULL) {
1964: err = ENOMEM;
1965: goto errout;
1966: }
1967: bzero(cmd, sizeof(struct hifn_command));
1968:
1969: if (crp->crp_flags & CRYPTO_F_IMBUF) {
1970: cmd->srcu.src_m = (struct mbuf *)crp->crp_buf;
1971: cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf;
1972: } else if (crp->crp_flags & CRYPTO_F_IOV) {
1973: cmd->srcu.src_io = (struct uio *)crp->crp_buf;
1974: cmd->dstu.dst_io = (struct uio *)crp->crp_buf;
1975: } else {
1976: err = EINVAL;
1977: goto errout; /* XXX we don't handle contiguous buffers! */
1978: }
1979:
1980: crd1 = crp->crp_desc;
1981: if (crd1 == NULL) {
1982: err = EINVAL;
1983: goto errout;
1984: }
1985: crd2 = crd1->crd_next;
1986:
1987: if (crd2 == NULL) {
1988: if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
1989: crd1->crd_alg == CRYPTO_SHA1_HMAC ||
1990: crd1->crd_alg == CRYPTO_SHA1 ||
1991: crd1->crd_alg == CRYPTO_MD5) {
1992: maccrd = crd1;
1993: enccrd = NULL;
1994: } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
1995: crd1->crd_alg == CRYPTO_3DES_CBC ||
1996: crd1->crd_alg == CRYPTO_AES_CBC ||
1997: crd1->crd_alg == CRYPTO_ARC4) {
1998: if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
1999: cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2000: maccrd = NULL;
2001: enccrd = crd1;
2002: } else if (crd1->crd_alg == CRYPTO_LZS_COMP) {
2003: return (hifn_compression(sc, crp, cmd));
2004: } else {
2005: err = EINVAL;
2006: goto errout;
2007: }
2008: } else {
2009: if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2010: crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2011: crd1->crd_alg == CRYPTO_MD5 ||
2012: crd1->crd_alg == CRYPTO_SHA1) &&
2013: (crd2->crd_alg == CRYPTO_DES_CBC ||
2014: crd2->crd_alg == CRYPTO_3DES_CBC ||
2015: crd2->crd_alg == CRYPTO_AES_CBC ||
2016: crd2->crd_alg == CRYPTO_ARC4) &&
2017: ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2018: cmd->base_masks = HIFN_BASE_CMD_DECODE;
2019: maccrd = crd1;
2020: enccrd = crd2;
2021: } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2022: crd1->crd_alg == CRYPTO_ARC4 ||
2023: crd1->crd_alg == CRYPTO_AES_CBC ||
2024: crd1->crd_alg == CRYPTO_3DES_CBC) &&
2025: (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2026: crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2027: crd2->crd_alg == CRYPTO_MD5 ||
2028: crd2->crd_alg == CRYPTO_SHA1) &&
2029: (crd1->crd_flags & CRD_F_ENCRYPT)) {
2030: enccrd = crd1;
2031: maccrd = crd2;
2032: } else {
2033: /*
2034: * We cannot order the 7751 as requested
2035: */
2036: err = EINVAL;
2037: goto errout;
2038: }
2039: }
2040:
2041: if (enccrd) {
2042: cmd->enccrd = enccrd;
2043: cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2044: switch (enccrd->crd_alg) {
2045: case CRYPTO_ARC4:
2046: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2047: break;
2048: case CRYPTO_DES_CBC:
2049: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2050: HIFN_CRYPT_CMD_MODE_CBC |
2051: HIFN_CRYPT_CMD_NEW_IV;
2052: break;
2053: case CRYPTO_3DES_CBC:
2054: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2055: HIFN_CRYPT_CMD_MODE_CBC |
2056: HIFN_CRYPT_CMD_NEW_IV;
2057: break;
2058: case CRYPTO_AES_CBC:
2059: cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2060: HIFN_CRYPT_CMD_MODE_CBC |
2061: HIFN_CRYPT_CMD_NEW_IV;
2062: break;
2063: default:
2064: err = EINVAL;
2065: goto errout;
2066: }
2067: if (enccrd->crd_alg != CRYPTO_ARC4) {
2068: ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2069: HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2070: if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2071: if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2072: bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2073: else
2074: bcopy(sc->sc_sessions[session].hs_iv,
2075: cmd->iv, ivlen);
2076:
2077: if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2078: == 0) {
2079: if (crp->crp_flags & CRYPTO_F_IMBUF)
2080: m_copyback(cmd->srcu.src_m,
2081: enccrd->crd_inject,
2082: ivlen, cmd->iv);
2083: else if (crp->crp_flags & CRYPTO_F_IOV)
2084: cuio_copyback(cmd->srcu.src_io,
2085: enccrd->crd_inject,
2086: ivlen, cmd->iv);
2087: }
2088: } else {
2089: if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2090: bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2091: else if (crp->crp_flags & CRYPTO_F_IMBUF)
2092: m_copydata(cmd->srcu.src_m,
2093: enccrd->crd_inject,
2094: ivlen, cmd->iv);
2095: else if (crp->crp_flags & CRYPTO_F_IOV)
2096: cuio_copydata(cmd->srcu.src_io,
2097: enccrd->crd_inject,
2098: ivlen, cmd->iv);
2099: }
2100: }
2101:
2102: cmd->ck = enccrd->crd_key;
2103: cmd->cklen = enccrd->crd_klen >> 3;
2104: cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2105:
2106: /*
2107: * Need to specify the size for the AES key in the masks.
2108: */
2109: if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2110: HIFN_CRYPT_CMD_ALG_AES) {
2111: switch (cmd->cklen) {
2112: case 16:
2113: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2114: break;
2115: case 24:
2116: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2117: break;
2118: case 32:
2119: cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2120: break;
2121: default:
2122: err = EINVAL;
2123: goto errout;
2124: }
2125: }
2126: }
2127:
2128: if (maccrd) {
2129: cmd->maccrd = maccrd;
2130: cmd->base_masks |= HIFN_BASE_CMD_MAC;
2131:
2132: switch (maccrd->crd_alg) {
2133: case CRYPTO_MD5:
2134: cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2135: HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2136: HIFN_MAC_CMD_POS_IPSEC;
2137: break;
2138: case CRYPTO_MD5_HMAC:
2139: cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2140: HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2141: HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2142: break;
2143: case CRYPTO_SHA1:
2144: cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2145: HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2146: HIFN_MAC_CMD_POS_IPSEC;
2147: break;
2148: case CRYPTO_SHA1_HMAC:
2149: cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2150: HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2151: HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2152: break;
2153: }
2154:
2155: if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2156: maccrd->crd_alg == CRYPTO_MD5_HMAC) {
2157: cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2158: bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2159: bzero(cmd->mac + (maccrd->crd_klen >> 3),
2160: HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2161: }
2162: }
2163:
2164: cmd->crp = crp;
2165: cmd->session_num = session;
2166: cmd->softc = sc;
2167:
2168: err = hifn_crypto(sc, cmd, crp);
2169: if (!err)
2170: return 0;
2171:
2172: errout:
2173: if (cmd != NULL)
2174: free(cmd, M_DEVBUF);
2175: if (err == EINVAL)
2176: hifnstats.hst_invalid++;
2177: else
2178: hifnstats.hst_nomem++;
2179: crp->crp_etype = err;
2180: crypto_done(crp);
2181: return (0);
2182: }
2183:
2184: void
2185: hifn_abort(struct hifn_softc *sc)
2186: {
2187: struct hifn_dma *dma = sc->sc_dma;
2188: struct hifn_command *cmd;
2189: struct cryptop *crp;
2190: int i, u;
2191:
2192: i = dma->resk; u = dma->resu;
2193: while (u != 0) {
2194: cmd = dma->hifn_commands[i];
2195: crp = cmd->crp;
2196:
2197: if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2198: /* Salvage what we can. */
2199: hifnstats.hst_opackets++;
2200: (*cmd->cmd_callback)(sc, cmd, dma->result_bufs[i]);
2201: } else {
2202: if (cmd->src_map == cmd->dst_map)
2203: bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2204: 0, cmd->src_map->dm_mapsize,
2205: BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2206: else {
2207: bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2208: 0, cmd->src_map->dm_mapsize,
2209: BUS_DMASYNC_POSTWRITE);
2210: bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2211: 0, cmd->dst_map->dm_mapsize,
2212: BUS_DMASYNC_POSTREAD);
2213: }
2214:
2215: if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2216: m_freem(cmd->srcu.src_m);
2217: crp->crp_buf = (caddr_t)cmd->dstu.dst_m;
2218: }
2219:
2220: /* non-shared buffers cannot be restarted */
2221: if (cmd->src_map != cmd->dst_map) {
2222: /*
2223: * XXX should be EAGAIN, delayed until
2224: * after the reset.
2225: */
2226: crp->crp_etype = ENOMEM;
2227: bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2228: bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2229: } else
2230: crp->crp_etype = ENOMEM;
2231:
2232: bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2233: bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2234:
2235: free(cmd, M_DEVBUF);
2236: if (crp->crp_etype != EAGAIN)
2237: crypto_done(crp);
2238: }
2239:
2240: if (++i == HIFN_D_RES_RSIZE)
2241: i = 0;
2242: u--;
2243: }
2244: dma->resk = i; dma->resu = u;
2245:
2246: hifn_reset_board(sc, 1);
2247: hifn_init_dma(sc);
2248: hifn_init_pci_registers(sc);
2249: }
2250:
2251: void
2252: hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd,
2253: u_int8_t *resbuf)
2254: {
2255: struct hifn_dma *dma = sc->sc_dma;
2256: struct cryptop *crp = cmd->crp;
2257: struct cryptodesc *crd;
2258: struct mbuf *m;
2259: int totlen, i, u, ivlen;
2260:
2261: if (cmd->src_map == cmd->dst_map)
2262: bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2263: 0, cmd->src_map->dm_mapsize,
2264: BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2265: else {
2266: bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2267: 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2268: bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2269: 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2270: }
2271:
2272: if (crp->crp_flags & CRYPTO_F_IMBUF) {
2273: if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2274: crp->crp_buf = (caddr_t)cmd->dstu.dst_m;
2275: totlen = cmd->src_map->dm_mapsize;
2276: for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) {
2277: if (totlen < m->m_len) {
2278: m->m_len = totlen;
2279: totlen = 0;
2280: } else
2281: totlen -= m->m_len;
2282: }
2283: cmd->dstu.dst_m->m_pkthdr.len =
2284: cmd->srcu.src_m->m_pkthdr.len;
2285: m_freem(cmd->srcu.src_m);
2286: }
2287: }
2288:
2289: if (cmd->sloplen != 0) {
2290: if (crp->crp_flags & CRYPTO_F_IMBUF)
2291: m_copyback((struct mbuf *)crp->crp_buf,
2292: cmd->src_map->dm_mapsize - cmd->sloplen,
2293: cmd->sloplen, &dma->slop[cmd->slopidx]);
2294: else if (crp->crp_flags & CRYPTO_F_IOV)
2295: cuio_copyback((struct uio *)crp->crp_buf,
2296: cmd->src_map->dm_mapsize - cmd->sloplen,
2297: cmd->sloplen, &dma->slop[cmd->slopidx]);
2298: }
2299:
2300: i = dma->dstk; u = dma->dstu;
2301: while (u != 0) {
2302: bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2303: offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2304: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2305: if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2306: bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2307: offsetof(struct hifn_dma, dstr[i]),
2308: sizeof(struct hifn_desc),
2309: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2310: break;
2311: }
2312: if (++i == (HIFN_D_DST_RSIZE + 1))
2313: i = 0;
2314: else
2315: u--;
2316: }
2317: dma->dstk = i; dma->dstu = u;
2318:
2319: hifnstats.hst_obytes += cmd->dst_map->dm_mapsize;
2320:
2321: if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2322: HIFN_BASE_CMD_CRYPT) {
2323: for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2324: if (crd->crd_alg != CRYPTO_DES_CBC &&
2325: crd->crd_alg != CRYPTO_3DES_CBC &&
2326: crd->crd_alg != CRYPTO_AES_CBC)
2327: continue;
2328: ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2329: HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2330: if (crp->crp_flags & CRYPTO_F_IMBUF)
2331: m_copydata((struct mbuf *)crp->crp_buf,
2332: crd->crd_skip + crd->crd_len - ivlen, ivlen,
2333: cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2334: else if (crp->crp_flags & CRYPTO_F_IOV) {
2335: cuio_copydata((struct uio *)crp->crp_buf,
2336: crd->crd_skip + crd->crd_len - ivlen, ivlen,
2337: cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2338: }
2339: break;
2340: }
2341: }
2342:
2343: if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2344: u_int8_t *macbuf;
2345:
2346: macbuf = resbuf + sizeof(struct hifn_base_result);
2347: if (cmd->base_masks & HIFN_BASE_CMD_COMP)
2348: macbuf += sizeof(struct hifn_comp_result);
2349: macbuf += sizeof(struct hifn_mac_result);
2350:
2351: for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2352: int len;
2353:
2354: if (crd->crd_alg == CRYPTO_MD5)
2355: len = 16;
2356: else if (crd->crd_alg == CRYPTO_SHA1)
2357: len = 20;
2358: else if (crd->crd_alg == CRYPTO_MD5_HMAC ||
2359: crd->crd_alg == CRYPTO_SHA1_HMAC)
2360: len = 12;
2361: else
2362: continue;
2363:
2364: if (crp->crp_flags & CRYPTO_F_IMBUF)
2365: m_copyback((struct mbuf *)crp->crp_buf,
2366: crd->crd_inject, len, macbuf);
2367: else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2368: bcopy((caddr_t)macbuf, crp->crp_mac, len);
2369: break;
2370: }
2371: }
2372:
2373: if (cmd->src_map != cmd->dst_map) {
2374: bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2375: bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2376: }
2377: bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2378: bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2379: free(cmd, M_DEVBUF);
2380: crypto_done(crp);
2381: }
2382:
2383: int
2384: hifn_compression(struct hifn_softc *sc, struct cryptop *crp,
2385: struct hifn_command *cmd)
2386: {
2387: struct cryptodesc *crd = crp->crp_desc;
2388: int s, err = 0;
2389:
2390: cmd->compcrd = crd;
2391: cmd->base_masks |= HIFN_BASE_CMD_COMP;
2392:
2393: if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) {
2394: /*
2395: * XXX can only handle mbufs right now since we can
2396: * XXX dynamically resize them.
2397: */
2398: err = EINVAL;
2399: return (ENOMEM);
2400: }
2401:
2402: if ((crd->crd_flags & CRD_F_COMP) == 0)
2403: cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2404: if (crd->crd_alg == CRYPTO_LZS_COMP)
2405: cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS |
2406: HIFN_COMP_CMD_CLEARHIST;
2407:
2408: if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2409: HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) {
2410: err = ENOMEM;
2411: goto fail;
2412: }
2413:
2414: if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2415: HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
2416: err = ENOMEM;
2417: goto fail;
2418: }
2419:
2420: if (crp->crp_flags & CRYPTO_F_IMBUF) {
2421: int len;
2422:
2423: if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
2424: cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
2425: err = ENOMEM;
2426: goto fail;
2427: }
2428:
2429: len = cmd->src_map->dm_mapsize / MCLBYTES;
2430: if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0)
2431: len++;
2432: len *= MCLBYTES;
2433:
2434: if ((crd->crd_flags & CRD_F_COMP) == 0)
2435: len *= 4;
2436:
2437: if (len > HIFN_MAX_DMALEN)
2438: len = HIFN_MAX_DMALEN;
2439:
2440: cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m);
2441: if (cmd->dstu.dst_m == NULL) {
2442: err = ENOMEM;
2443: goto fail;
2444: }
2445:
2446: if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2447: cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2448: err = ENOMEM;
2449: goto fail;
2450: }
2451: } else if (crp->crp_flags & CRYPTO_F_IOV) {
2452: if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
2453: cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
2454: err = ENOMEM;
2455: goto fail;
2456: }
2457: if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2458: cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
2459: err = ENOMEM;
2460: goto fail;
2461: }
2462: }
2463:
2464: if (cmd->src_map == cmd->dst_map)
2465: bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2466: 0, cmd->src_map->dm_mapsize,
2467: BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2468: else {
2469: bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2470: 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2471: bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2472: 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2473: }
2474:
2475: cmd->crp = crp;
2476: /*
2477: * Always use session 0. The modes of compression we use are
2478: * stateless and there is always at least one compression
2479: * context, zero.
2480: */
2481: cmd->session_num = 0;
2482: cmd->softc = sc;
2483:
2484: s = splnet();
2485: err = hifn_compress_enter(sc, cmd);
2486: splx(s);
2487:
2488: if (err != 0)
2489: goto fail;
2490: return (0);
2491:
2492: fail:
2493: if (cmd->dst_map != NULL) {
2494: if (cmd->dst_map->dm_nsegs > 0)
2495: bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2496: bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2497: }
2498: if (cmd->src_map != NULL) {
2499: if (cmd->src_map->dm_nsegs > 0)
2500: bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2501: bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2502: }
2503: free(cmd, M_DEVBUF);
2504: if (err == EINVAL)
2505: hifnstats.hst_invalid++;
2506: else
2507: hifnstats.hst_nomem++;
2508: crp->crp_etype = err;
2509: crypto_done(crp);
2510: return (0);
2511: }
2512:
2513: /*
2514: * must be called at splnet()
2515: */
2516: int
2517: hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd)
2518: {
2519: struct hifn_dma *dma = sc->sc_dma;
2520: int cmdi, resi;
2521: u_int32_t cmdlen;
2522:
2523: if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
2524: (dma->resu + 1) > HIFN_D_CMD_RSIZE)
2525: return (ENOMEM);
2526:
2527: if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
2528: (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE)
2529: return (ENOMEM);
2530:
2531: if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2532: dma->cmdi = 0;
2533: dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2534: HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2535: HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2536: BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2537: }
2538: cmdi = dma->cmdi++;
2539: cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2540: HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2541:
2542: /* .p for command/result already set */
2543: dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2544: HIFN_D_MASKDONEIRQ);
2545: HIFN_CMDR_SYNC(sc, cmdi,
2546: BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2547: dma->cmdu++;
2548: if (sc->sc_c_busy == 0) {
2549: WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
2550: sc->sc_c_busy = 1;
2551: SET_LED(sc, HIFN_MIPSRST_LED0);
2552: }
2553:
2554: /*
2555: * Always enable the command wait interrupt. We are obviously
2556: * missing an interrupt or two somewhere. Enabling the command wait
2557: * interrupt will guarantee we get called periodically until all
2558: * of the queues are drained and thus work around this.
2559: */
2560: sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2561: WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2562:
2563: hifnstats.hst_ipackets++;
2564: hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
2565:
2566: hifn_dmamap_load_src(sc, cmd);
2567: if (sc->sc_s_busy == 0) {
2568: WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
2569: sc->sc_s_busy = 1;
2570: SET_LED(sc, HIFN_MIPSRST_LED1);
2571: }
2572:
2573: /*
2574: * Unlike other descriptors, we don't mask done interrupt from
2575: * result descriptor.
2576: */
2577: if (dma->resi == HIFN_D_RES_RSIZE) {
2578: dma->resi = 0;
2579: dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2580: HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2581: HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2582: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2583: }
2584: resi = dma->resi++;
2585: dma->hifn_commands[resi] = cmd;
2586: HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2587: dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2588: HIFN_D_VALID | HIFN_D_LAST);
2589: HIFN_RESR_SYNC(sc, resi,
2590: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2591: dma->resu++;
2592: if (sc->sc_r_busy == 0) {
2593: WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
2594: sc->sc_r_busy = 1;
2595: SET_LED(sc, HIFN_MIPSRST_LED2);
2596: }
2597:
2598: if (cmd->sloplen)
2599: cmd->slopidx = resi;
2600:
2601: hifn_dmamap_load_dst(sc, cmd);
2602:
2603: if (sc->sc_d_busy == 0) {
2604: WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
2605: sc->sc_d_busy = 1;
2606: }
2607: sc->sc_active = 5;
2608: cmd->cmd_callback = hifn_callback_comp;
2609: return (0);
2610: }
2611:
2612: void
2613: hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd,
2614: u_int8_t *resbuf)
2615: {
2616: struct hifn_base_result baseres;
2617: struct cryptop *crp = cmd->crp;
2618: struct hifn_dma *dma = sc->sc_dma;
2619: struct mbuf *m;
2620: int err = 0, i, u;
2621: u_int32_t olen;
2622: bus_size_t dstsize;
2623:
2624: bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2625: 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2626: bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2627: 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2628:
2629: dstsize = cmd->dst_map->dm_mapsize;
2630: bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2631:
2632: bcopy(resbuf, &baseres, sizeof(struct hifn_base_result));
2633:
2634: i = dma->dstk; u = dma->dstu;
2635: while (u != 0) {
2636: bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2637: offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2638: BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2639: if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2640: bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2641: offsetof(struct hifn_dma, dstr[i]),
2642: sizeof(struct hifn_desc),
2643: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2644: break;
2645: }
2646: if (++i == (HIFN_D_DST_RSIZE + 1))
2647: i = 0;
2648: else
2649: u--;
2650: }
2651: dma->dstk = i; dma->dstu = u;
2652:
2653: if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) {
2654: bus_size_t xlen;
2655:
2656: xlen = dstsize;
2657:
2658: m_freem(cmd->dstu.dst_m);
2659:
2660: if (xlen == HIFN_MAX_DMALEN) {
2661: /* We've done all we can. */
2662: err = E2BIG;
2663: goto out;
2664: }
2665:
2666: xlen += MCLBYTES;
2667:
2668: if (xlen > HIFN_MAX_DMALEN)
2669: xlen = HIFN_MAX_DMALEN;
2670:
2671: cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen,
2672: cmd->srcu.src_m);
2673: if (cmd->dstu.dst_m == NULL) {
2674: err = ENOMEM;
2675: goto out;
2676: }
2677: if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2678: cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2679: err = ENOMEM;
2680: goto out;
2681: }
2682:
2683: bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2684: 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2685: bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2686: 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2687:
2688: /* already at splnet... */
2689: err = hifn_compress_enter(sc, cmd);
2690: if (err != 0)
2691: goto out;
2692: return;
2693: }
2694:
2695: olen = dstsize - (letoh16(baseres.dst_cnt) |
2696: (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >>
2697: HIFN_BASE_RES_DSTLEN_S) << 16));
2698:
2699: crp->crp_olen = olen - cmd->compcrd->crd_skip;
2700:
2701: bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2702: bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2703: bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2704:
2705: m = cmd->dstu.dst_m;
2706: if (m->m_flags & M_PKTHDR)
2707: m->m_pkthdr.len = olen;
2708: crp->crp_buf = (caddr_t)m;
2709: for (; m != NULL; m = m->m_next) {
2710: if (olen >= m->m_len)
2711: olen -= m->m_len;
2712: else {
2713: m->m_len = olen;
2714: olen = 0;
2715: }
2716: }
2717:
2718: m_freem(cmd->srcu.src_m);
2719: free(cmd, M_DEVBUF);
2720: crp->crp_etype = 0;
2721: crypto_done(crp);
2722: return;
2723:
2724: out:
2725: if (cmd->dst_map != NULL) {
2726: if (cmd->src_map->dm_nsegs != 0)
2727: bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2728: bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2729: }
2730: if (cmd->src_map != NULL) {
2731: if (cmd->src_map->dm_nsegs != 0)
2732: bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2733: bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2734: }
2735: if (cmd->dstu.dst_m != NULL)
2736: m_freem(cmd->dstu.dst_m);
2737: free(cmd, M_DEVBUF);
2738: crp->crp_etype = err;
2739: crypto_done(crp);
2740: }
2741:
2742: struct mbuf *
2743: hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate)
2744: {
2745: int len;
2746: struct mbuf *m, *m0, *mlast;
2747:
2748: if (mtemplate->m_flags & M_PKTHDR) {
2749: len = MHLEN;
2750: MGETHDR(m0, M_DONTWAIT, MT_DATA);
2751: } else {
2752: len = MLEN;
2753: MGET(m0, M_DONTWAIT, MT_DATA);
2754: }
2755: if (m0 == NULL)
2756: return (NULL);
2757: if (len == MHLEN)
2758: M_DUP_PKTHDR(m0, mtemplate);
2759: MCLGET(m0, M_DONTWAIT);
2760: if (!(m0->m_flags & M_EXT))
2761: m_freem(m0);
2762: len = MCLBYTES;
2763:
2764: totlen -= len;
2765: m0->m_pkthdr.len = m0->m_len = len;
2766: mlast = m0;
2767:
2768: while (totlen > 0) {
2769: MGET(m, M_DONTWAIT, MT_DATA);
2770: if (m == NULL) {
2771: m_freem(m0);
2772: return (NULL);
2773: }
2774: MCLGET(m, M_DONTWAIT);
2775: if (!(m->m_flags & M_EXT)) {
2776: m_freem(m0);
2777: return (NULL);
2778: }
2779: len = MCLBYTES;
2780: m->m_len = len;
2781: if (m0->m_flags & M_PKTHDR)
2782: m0->m_pkthdr.len += len;
2783: totlen -= len;
2784:
2785: mlast->m_next = m;
2786: mlast = m;
2787: }
2788:
2789: return (m0);
2790: }
2791:
2792: void
2793: hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg,
2794: u_int32_t val)
2795: {
2796: /*
2797: * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2798: * and Group 1 registers; avoid conditions that could create
2799: * burst writes by doing a read in between the writes.
2800: */
2801: if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
2802: if (sc->sc_waw_lastgroup == reggrp &&
2803: sc->sc_waw_lastreg == reg - 4) {
2804: bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2805: }
2806: sc->sc_waw_lastgroup = reggrp;
2807: sc->sc_waw_lastreg = reg;
2808: }
2809: if (reggrp == 0)
2810: bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2811: else
2812: bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
2813:
2814: }
2815:
2816: u_int32_t
2817: hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg)
2818: {
2819: if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
2820: sc->sc_waw_lastgroup = -1;
2821: sc->sc_waw_lastreg = 1;
2822: }
2823: if (reggrp == 0)
2824: return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg));
2825: return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg));
2826: }
CVSweb