Annotation of sys/dev/pci/noct.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: noct.c,v 1.17 2006/06/29 21:34:51 deraadt Exp $ */
2:
3: /*
4: * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
5: * All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: *
16: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18: * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19: * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20: * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21: * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22: * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24: * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
25: * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26: * POSSIBILITY OF SUCH DAMAGE.
27: *
28: * Effort sponsored in part by the Defense Advanced Research Projects
29: * Agency (DARPA) and Air Force Research Laboratory, Air Force
30: * Materiel Command, USAF, under agreement number F30602-01-2-0537.
31: *
32: */
33:
34: /*
35: * Driver for the Netoctave NSP2000 security processor.
36: */
37:
38: #include <sys/param.h>
39: #include <sys/systm.h>
40: #include <sys/proc.h>
41: #include <sys/errno.h>
42: #include <sys/malloc.h>
43: #include <sys/kernel.h>
44: #include <sys/mbuf.h>
45: #include <sys/device.h>
46: #include <sys/extent.h>
47: #include <sys/kthread.h>
48:
49: #include <crypto/cryptodev.h>
50: #include <dev/rndvar.h>
51:
52: #include <dev/pci/pcireg.h>
53: #include <dev/pci/pcivar.h>
54: #include <dev/pci/pcidevs.h>
55:
56: #include <dev/pci/noctreg.h>
57: #include <dev/pci/noctvar.h>
58:
59: int noct_probe(struct device *, void *, void *);
60: void noct_attach(struct device *, struct device *, void *);
61: int noct_intr(void *);
62:
63: int noct_ram_size(struct noct_softc *);
64: void noct_ram_write(struct noct_softc *, u_int32_t, u_int64_t);
65: u_int64_t noct_ram_read(struct noct_softc *, u_int32_t);
66:
67: void noct_rng_enable(struct noct_softc *);
68: void noct_rng_disable(struct noct_softc *);
69: void noct_rng_init(struct noct_softc *);
70: void noct_rng_intr(struct noct_softc *);
71: void noct_rng_tick(void *);
72:
73: void noct_pkh_enable(struct noct_softc *);
74: void noct_pkh_disable(struct noct_softc *);
75: void noct_pkh_init(struct noct_softc *);
76: void noct_pkh_intr(struct noct_softc *);
77: void noct_pkh_freedesc(struct noct_softc *, int);
78: u_int32_t noct_pkh_nfree(struct noct_softc *);
79: int noct_kload(struct noct_softc *, struct crparam *, u_int32_t);
80: void noct_kload_cb(struct noct_softc *, u_int32_t, int);
81: void noct_modmul_cb(struct noct_softc *, u_int32_t, int);
82:
83: void noct_ea_enable(struct noct_softc *);
84: void noct_ea_disable(struct noct_softc *);
85: void noct_ea_init(struct noct_softc *);
86: void noct_ea_intr(struct noct_softc *);
87: void noct_ea_create_thread(void *);
88: void noct_ea_thread(void *);
89: u_int32_t noct_ea_nfree(struct noct_softc *);
90: void noct_ea_start(struct noct_softc *, struct noct_workq *);
91: void noct_ea_start_hash(struct noct_softc *, struct noct_workq *,
92: struct cryptop *, struct cryptodesc *);
93: void noct_ea_start_des(struct noct_softc *, struct noct_workq *,
94: struct cryptop *, struct cryptodesc *);
95: int noct_newsession(u_int32_t *, struct cryptoini *);
96: int noct_freesession(u_int64_t);
97: int noct_process(struct cryptop *);
98:
99: u_int32_t noct_read_4(struct noct_softc *, bus_size_t);
100: void noct_write_4(struct noct_softc *, bus_size_t, u_int32_t);
101: u_int64_t noct_read_8(struct noct_softc *, u_int32_t);
102: void noct_write_8(struct noct_softc *, u_int32_t, u_int64_t);
103:
104: struct noct_softc *noct_kfind(struct cryptkop *);
105: int noct_ksigbits(struct crparam *);
106: int noct_kprocess(struct cryptkop *);
107: int noct_kprocess_modexp(struct noct_softc *, struct cryptkop *);
108:
109: struct cfattach noct_ca = {
110: sizeof(struct noct_softc), noct_probe, noct_attach,
111: };
112:
113: struct cfdriver noct_cd = {
114: 0, "noct", DV_DULL
115: };
116:
117: #define SWAP32(x) (x) = htole32(ntohl((x)))
118:
119: int
120: noct_probe(parent, match, aux)
121: struct device *parent;
122: void *match;
123: void *aux;
124: {
125: struct pci_attach_args *pa = (struct pci_attach_args *) aux;
126:
127: if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETOCTAVE &&
128: PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETOCTAVE_NSP2K)
129: return (1);
130: return (0);
131: }
132:
133: void
134: noct_attach(parent, self, aux)
135: struct device *parent, *self;
136: void *aux;
137: {
138: struct noct_softc *sc = (struct noct_softc *)self;
139: struct pci_attach_args *pa = aux;
140: pci_chipset_tag_t pc = pa->pa_pc;
141: pci_intr_handle_t ih;
142: const char *intrstr = NULL;
143: bus_size_t iosize = 0;
144:
145: if (pci_mapreg_map(pa, NOCT_BAR0, PCI_MAPREG_MEM_TYPE_64BIT, 0,
146: &sc->sc_st, &sc->sc_sh, NULL, &iosize, 0)) {
147: printf(": can't map mem space\n");
148: goto fail;
149: }
150:
151: /* Before we do anything else, put the chip in little endian mode */
152: NOCT_WRITE_4(sc, NOCT_BRDG_ENDIAN, 0);
153: sc->sc_rar_last = 0xffffffff;
154: sc->sc_waw_last = 0xffffffff;
155: sc->sc_dmat = pa->pa_dmat;
156:
157: sc->sc_cid = crypto_get_driverid(0);
158: if (sc->sc_cid < 0) {
159: printf(": couldn't register cid\n");
160: goto fail;
161: }
162:
163: if (pci_intr_map(pa, &ih)) {
164: printf(": couldn't map interrupt\n");
165: goto fail;
166: }
167: intrstr = pci_intr_string(pc, ih);
168: sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, noct_intr, sc,
169: self->dv_xname);
170: if (sc->sc_ih == NULL) {
171: printf(": couldn't establish interrupt");
172: if (intrstr != NULL)
173: printf(" at %s", intrstr);
174: printf("\n");
175: goto fail;
176: }
177:
178: if (noct_ram_size(sc))
179: goto fail;
180:
181: printf(":");
182:
183: noct_rng_init(sc);
184: noct_pkh_init(sc);
185: noct_ea_init(sc);
186:
187: printf(", %uMB, %s\n", sc->sc_ramsize, intrstr);
188:
189: return;
190:
191: fail:
192: if (iosize != 0)
193: bus_space_unmap(sc->sc_st, sc->sc_sh, iosize);
194: }
195:
196: int
197: noct_intr(vsc)
198: void *vsc;
199: {
200: struct noct_softc *sc = vsc;
201: u_int32_t reg;
202: int r = 0;
203:
204: reg = NOCT_READ_4(sc, NOCT_BRDG_STAT);
205:
206: if (reg & BRDGSTS_RNG_INT) {
207: r = 1;
208: noct_rng_intr(sc);
209: }
210:
211: if (reg & BRDGSTS_PKP_INT) {
212: r = 1;
213: noct_pkh_intr(sc);
214: }
215:
216: if (reg & BRDGSTS_CCH_INT) {
217: r = 1;
218: noct_ea_intr(sc);
219: }
220:
221: return (r);
222: }
223:
224: int
225: noct_ram_size(sc)
226: struct noct_softc *sc;
227: {
228: u_int64_t t;
229:
230: noct_ram_write(sc, 0x000000, 64);
231: noct_ram_write(sc, 0x400000, 32);
232: t = noct_ram_read(sc, 0x000000);
233: noct_ram_write(sc, 0x000000, 128);
234: noct_ram_write(sc, 0x800000, t);
235: t = noct_ram_read(sc, 0x000000);
236:
237: if (t != 32 && t != 64 && t != 128) {
238: printf(": invalid ram size %llx\n", (unsigned long long)t);
239: return (1);
240: }
241:
242: sc->sc_ramsize = t;
243: return (0);
244: }
245:
246: void
247: noct_ram_write(sc, adr, dat)
248: struct noct_softc *sc;
249: u_int32_t adr;
250: u_int64_t dat;
251: {
252: u_int32_t reg;
253:
254: /* wait for pending writes to finish */
255: for (;;) {
256: reg = NOCT_READ_4(sc, NOCT_EA_CTX_ADDR);
257: if ((reg & EACTXADDR_WRITEPEND) == 0)
258: break;
259: }
260:
261: NOCT_WRITE_4(sc, NOCT_EA_CTX_ADDR, adr);
262: NOCT_WRITE_4(sc, NOCT_EA_CTX_DAT_1, (dat >> 32) & 0xffffffff);
263: NOCT_WRITE_4(sc, NOCT_EA_CTX_DAT_0, (dat >> 0) & 0xffffffff);
264:
265: for (;;) {
266: reg = NOCT_READ_4(sc, NOCT_EA_CTX_ADDR);
267: if ((reg & EACTXADDR_WRITEPEND) == 0)
268: break;
269: }
270: }
271:
272: u_int64_t
273: noct_ram_read(sc, adr)
274: struct noct_softc *sc;
275: u_int32_t adr;
276: {
277: u_int64_t dat;
278: u_int32_t reg;
279:
280: /* wait for pending reads to finish */
281: for (;;) {
282: reg = NOCT_READ_4(sc, NOCT_EA_CTX_ADDR);
283: if ((reg & EACTXADDR_READPEND) == 0)
284: break;
285: }
286:
287: NOCT_WRITE_4(sc, NOCT_EA_CTX_ADDR, adr | EACTXADDR_READPEND);
288:
289: for (;;) {
290: reg = NOCT_READ_4(sc, NOCT_EA_CTX_ADDR);
291: if ((reg & EACTXADDR_READPEND) == 0)
292: break;
293: }
294:
295: dat = NOCT_READ_4(sc, NOCT_EA_CTX_DAT_1);
296: dat <<= 32;
297: dat |= NOCT_READ_4(sc, NOCT_EA_CTX_DAT_0);
298: return (dat);
299: }
300:
301: void
302: noct_pkh_disable(sc)
303: struct noct_softc *sc;
304: {
305: u_int32_t r;
306:
307: /* Turn off PK irq */
308: NOCT_WRITE_4(sc, NOCT_BRDG_CTL,
309: NOCT_READ_4(sc, NOCT_BRDG_CTL) & ~(BRDGCTL_PKIRQ_ENA));
310:
311: /* Turn off PK interrupts */
312: r = NOCT_READ_4(sc, NOCT_PKH_IER);
313: r &= ~(PKHIER_CMDSI | PKHIER_SKSWR | PKHIER_SKSOFF | PKHIER_PKHLEN |
314: PKHIER_PKHOPCODE | PKHIER_BADQBASE | PKHIER_LOADERR |
315: PKHIER_STOREERR | PKHIER_CMDERR | PKHIER_ILL | PKHIER_PKERESV |
316: PKHIER_PKEWDT | PKHIER_PKENOTPRIME |
317: PKHIER_PKE_B | PKHIER_PKE_A | PKHIER_PKE_M | PKHIER_PKE_R |
318: PKHIER_PKEOPCODE);
319: NOCT_WRITE_4(sc, NOCT_PKH_IER, r);
320:
321: /* Disable PK unit */
322: r = NOCT_READ_4(sc, NOCT_PKH_CSR);
323: r &= ~PKHCSR_PKH_ENA;
324: NOCT_WRITE_4(sc, NOCT_PKH_CSR, r);
325: for (;;) {
326: r = NOCT_READ_4(sc, NOCT_PKH_CSR);
327: if ((r & PKHCSR_PKH_BUSY) == 0)
328: break;
329: }
330:
331: /* Clear status bits */
332: r |= PKHCSR_CMDSI | PKHCSR_SKSWR | PKHCSR_SKSOFF | PKHCSR_PKHLEN |
333: PKHCSR_PKHOPCODE | PKHCSR_BADQBASE | PKHCSR_LOADERR |
334: PKHCSR_STOREERR | PKHCSR_CMDERR | PKHCSR_ILL | PKHCSR_PKERESV |
335: PKHCSR_PKEWDT | PKHCSR_PKENOTPRIME |
336: PKHCSR_PKE_B | PKHCSR_PKE_A | PKHCSR_PKE_M | PKHCSR_PKE_R |
337: PKHCSR_PKEOPCODE;
338: NOCT_WRITE_4(sc, NOCT_PKH_CSR, r);
339: }
340:
341: void
342: noct_pkh_enable(sc)
343: struct noct_softc *sc;
344: {
345: u_int64_t adr;
346:
347: sc->sc_pkhwp = 0;
348: sc->sc_pkhrp = 0;
349:
350: adr = sc->sc_pkhmap->dm_segs[0].ds_addr;
351: NOCT_WRITE_4(sc, NOCT_PKH_Q_BASE_HI, (adr >> 32) & 0xffffffff);
352: NOCT_WRITE_4(sc, NOCT_PKH_Q_LEN, NOCT_PKH_QLEN);
353: NOCT_WRITE_4(sc, NOCT_PKH_Q_BASE_LO, (adr >> 0) & 0xffffffff);
354:
355: NOCT_WRITE_4(sc, NOCT_PKH_IER,
356: PKHIER_CMDSI | PKHIER_SKSWR | PKHIER_SKSOFF | PKHIER_PKHLEN |
357: PKHIER_PKHOPCODE | PKHIER_BADQBASE | PKHIER_LOADERR |
358: PKHIER_STOREERR | PKHIER_CMDERR | PKHIER_ILL | PKHIER_PKERESV |
359: PKHIER_PKEWDT | PKHIER_PKENOTPRIME |
360: PKHIER_PKE_B | PKHIER_PKE_A | PKHIER_PKE_M | PKHIER_PKE_R |
361: PKHIER_PKEOPCODE);
362:
363: NOCT_WRITE_4(sc, NOCT_PKH_CSR,
364: NOCT_READ_4(sc, NOCT_PKH_CSR) | PKHCSR_PKH_ENA);
365:
366: NOCT_WRITE_4(sc, NOCT_BRDG_CTL,
367: NOCT_READ_4(sc, NOCT_BRDG_CTL) | BRDGCTL_PKIRQ_ENA);
368: }
369:
370: void
371: noct_pkh_init(sc)
372: struct noct_softc *sc;
373: {
374: bus_dma_segment_t seg, bnseg;
375: int rseg, bnrseg;
376:
377: sc->sc_pkh_bn = extent_create("noctbn", 0, 255, M_DEVBUF,
378: NULL, NULL, EX_NOWAIT | EX_NOCOALESCE);
379: if (sc->sc_pkh_bn == NULL) {
380: printf("%s: failed pkh bn extent\n", sc->sc_dv.dv_xname);
381: goto fail;
382: }
383:
384: if (bus_dmamem_alloc(sc->sc_dmat, NOCT_PKH_BUFSIZE,
385: PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
386: printf("%s: failed pkh buf alloc\n", sc->sc_dv.dv_xname);
387: goto fail;
388: }
389: if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, NOCT_PKH_BUFSIZE,
390: (caddr_t *)&sc->sc_pkhcmd, BUS_DMA_NOWAIT)) {
391: printf("%s: failed pkh buf map\n", sc->sc_dv.dv_xname);
392: goto fail_1;
393: }
394: if (bus_dmamap_create(sc->sc_dmat, NOCT_PKH_BUFSIZE, rseg,
395: NOCT_PKH_BUFSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_pkhmap)) {
396: printf("%s: failed pkh map create\n", sc->sc_dv.dv_xname);
397: goto fail_2;
398: }
399: if (bus_dmamap_load_raw(sc->sc_dmat, sc->sc_pkhmap,
400: &seg, rseg, NOCT_PKH_BUFSIZE, BUS_DMA_NOWAIT)) {
401: printf("%s: failed pkh buf load\n", sc->sc_dv.dv_xname);
402: goto fail_3;
403: }
404:
405: /*
406: * Allocate shadow big number cache.
407: */
408: if (bus_dmamem_alloc(sc->sc_dmat, NOCT_BN_CACHE_SIZE, PAGE_SIZE, 0,
409: &bnseg, 1, &bnrseg, BUS_DMA_NOWAIT)) {
410: printf("%s: failed bnc buf alloc\n", sc->sc_dv.dv_xname);
411: goto fail_4;
412: }
413: if (bus_dmamem_map(sc->sc_dmat, &bnseg, bnrseg, NOCT_BN_CACHE_SIZE,
414: (caddr_t *)&sc->sc_bncache, BUS_DMA_NOWAIT)) {
415: printf("%s: failed bnc buf map\n", sc->sc_dv.dv_xname);
416: goto fail_5;
417: }
418: if (bus_dmamap_create(sc->sc_dmat, NOCT_BN_CACHE_SIZE, bnrseg,
419: NOCT_BN_CACHE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_bnmap)) {
420: printf("%s: failed bnc map create\n", sc->sc_dv.dv_xname);
421: goto fail_6;
422: }
423: if (bus_dmamap_load_raw(sc->sc_dmat, sc->sc_bnmap,
424: &bnseg, bnrseg, NOCT_BN_CACHE_SIZE, BUS_DMA_NOWAIT)) {
425: printf("%s: failed bnc buf load\n", sc->sc_dv.dv_xname);
426: goto fail_7;
427: }
428:
429: noct_pkh_disable(sc);
430: noct_pkh_enable(sc);
431:
432: #if 0
433: /*
434: * XXX MODEXP is implemented as MODMUL for debugging, don't
435: * XXX actually register.
436: */
437: crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0, noct_kprocess);
438: printf(" PK");
439: #endif
440:
441: return;
442:
443: fail_7:
444: bus_dmamap_destroy(sc->sc_dmat, sc->sc_bnmap);
445: fail_6:
446: bus_dmamem_unmap(sc->sc_dmat,
447: (caddr_t)sc->sc_pkhcmd, NOCT_PKH_BUFSIZE);
448: fail_5:
449: bus_dmamem_free(sc->sc_dmat, &bnseg, bnrseg);
450: fail_4:
451: bus_dmamap_unload(sc->sc_dmat, sc->sc_pkhmap);
452: fail_3:
453: bus_dmamap_destroy(sc->sc_dmat, sc->sc_pkhmap);
454: fail_2:
455: bus_dmamem_unmap(sc->sc_dmat,
456: (caddr_t)sc->sc_pkhcmd, NOCT_PKH_BUFSIZE);
457: fail_1:
458: bus_dmamem_free(sc->sc_dmat, &seg, rseg);
459: fail:
460: if (sc->sc_pkh_bn != NULL) {
461: extent_destroy(sc->sc_pkh_bn);
462: sc->sc_pkh_bn = NULL;
463: }
464: sc->sc_pkhcmd = NULL;
465: sc->sc_pkhmap = NULL;
466: }
467:
468: void
469: noct_pkh_intr(sc)
470: struct noct_softc *sc;
471: {
472: u_int32_t csr;
473: u_int32_t rp;
474:
475: csr = NOCT_READ_4(sc, NOCT_PKH_CSR);
476: NOCT_WRITE_4(sc, NOCT_PKH_CSR, csr |
477: PKHCSR_CMDSI | PKHCSR_SKSWR | PKHCSR_SKSOFF | PKHCSR_PKHLEN |
478: PKHCSR_PKHOPCODE | PKHCSR_BADQBASE | PKHCSR_LOADERR |
479: PKHCSR_STOREERR | PKHCSR_CMDERR | PKHCSR_ILL | PKHCSR_PKERESV |
480: PKHCSR_PKEWDT | PKHCSR_PKENOTPRIME |
481: PKHCSR_PKE_B | PKHCSR_PKE_A | PKHCSR_PKE_M | PKHCSR_PKE_R |
482: PKHCSR_PKEOPCODE);
483:
484: rp = (NOCT_READ_4(sc, NOCT_PKH_Q_PTR) & PKHQPTR_READ_M) >>
485: PKHQPTR_READ_S;
486:
487: while (sc->sc_pkhrp != rp) {
488: if (sc->sc_pkh_bnsw[sc->sc_pkhrp].bn_callback != NULL)
489: (*sc->sc_pkh_bnsw[sc->sc_pkhrp].bn_callback)(sc,
490: sc->sc_pkhrp, 0);
491: if (++sc->sc_pkhrp == NOCT_PKH_ENTRIES)
492: sc->sc_pkhrp = 0;
493: }
494: sc->sc_pkhrp = rp;
495:
496: if (csr & PKHCSR_CMDSI) {
497: /* command completed */
498: }
499:
500: if (csr & PKHCSR_SKSWR)
501: printf("%s:%x: sks write error\n", sc->sc_dv.dv_xname, rp);
502: if (csr & PKHCSR_SKSOFF)
503: printf("%s:%x: sks offset error\n", sc->sc_dv.dv_xname, rp);
504: if (csr & PKHCSR_PKHLEN)
505: printf("%s:%x: pkh invalid length\n", sc->sc_dv.dv_xname, rp);
506: if (csr & PKHCSR_PKHOPCODE)
507: printf("%s:%x: pkh bad opcode\n", sc->sc_dv.dv_xname, rp);
508: if (csr & PKHCSR_BADQBASE)
509: printf("%s:%x: pkh base qbase\n", sc->sc_dv.dv_xname, rp);
510: if (csr & PKHCSR_LOADERR)
511: printf("%s:%x: pkh load error\n", sc->sc_dv.dv_xname, rp);
512: if (csr & PKHCSR_STOREERR)
513: printf("%s:%x: pkh store error\n", sc->sc_dv.dv_xname, rp);
514: if (csr & PKHCSR_CMDERR)
515: printf("%s:%x: pkh command error\n", sc->sc_dv.dv_xname, rp);
516: if (csr & PKHCSR_ILL)
517: printf("%s:%x: pkh illegal access\n", sc->sc_dv.dv_xname, rp);
518: if (csr & PKHCSR_PKERESV)
519: printf("%s:%x: pke reserved error\n", sc->sc_dv.dv_xname, rp);
520: if (csr & PKHCSR_PKEWDT)
521: printf("%s:%x: pke watchdog\n", sc->sc_dv.dv_xname, rp);
522: if (csr & PKHCSR_PKENOTPRIME)
523: printf("%s:%x: pke not prime\n", sc->sc_dv.dv_xname, rp);
524: if (csr & PKHCSR_PKE_B)
525: printf("%s:%x: pke bad 'b'\n", sc->sc_dv.dv_xname, rp);
526: if (csr & PKHCSR_PKE_A)
527: printf("%s:%x: pke bad 'a'\n", sc->sc_dv.dv_xname, rp);
528: if (csr & PKHCSR_PKE_M)
529: printf("%s:%x: pke bad 'm'\n", sc->sc_dv.dv_xname, rp);
530: if (csr & PKHCSR_PKE_R)
531: printf("%s:%x: pke bad 'r'\n", sc->sc_dv.dv_xname, rp);
532: if (csr & PKHCSR_PKEOPCODE)
533: printf("%s:%x: pke bad opcode\n", sc->sc_dv.dv_xname, rp);
534: }
535:
536: void
537: noct_rng_disable(sc)
538: struct noct_softc *sc;
539: {
540: u_int64_t csr;
541: u_int32_t r;
542:
543: /* Turn off RN irq */
544: NOCT_WRITE_4(sc, NOCT_BRDG_CTL,
545: NOCT_READ_4(sc, NOCT_BRDG_CTL) & ~(BRDGCTL_RNIRQ_ENA));
546:
547: /* Turn off RNH interrupts */
548: r = NOCT_READ_4(sc, NOCT_RNG_CSR);
549: r &= ~(RNGCSR_INT_KEY | RNGCSR_INT_DUP |
550: RNGCSR_INT_BUS | RNGCSR_INT_ACCESS);
551: NOCT_WRITE_4(sc, NOCT_RNG_CSR, r);
552:
553: /* Turn off RN queue */
554: r = NOCT_READ_4(sc, NOCT_RNG_CSR);
555: r &= ~(RNGCSR_XFER_ENABLE | RNGCSR_INT_KEY | RNGCSR_INT_BUS |
556: RNGCSR_INT_DUP | RNGCSR_INT_ACCESS);
557: NOCT_WRITE_4(sc, NOCT_RNG_CSR, r);
558:
559: for (;;) {
560: r = NOCT_READ_4(sc, NOCT_RNG_CSR);
561: if ((r & RNGCSR_XFER_BUSY) == 0)
562: break;
563: }
564:
565: /* Turn off RN generator */
566: csr = NOCT_READ_8(sc, NOCT_RNG_CTL);
567: csr &= ~RNGCTL_RNG_ENA;
568: NOCT_WRITE_8(sc, NOCT_RNG_CTL, csr);
569: }
570:
571: void
572: noct_rng_enable(sc)
573: struct noct_softc *sc;
574: {
575: u_int64_t adr;
576: u_int32_t r;
577:
578: adr = sc->sc_rngmap->dm_segs[0].ds_addr;
579: NOCT_WRITE_4(sc, NOCT_RNG_Q_BASE_HI, (adr >> 32) & 0xffffffff);
580: NOCT_WRITE_4(sc, NOCT_RNG_Q_LEN, NOCT_RNG_QLEN);
581: NOCT_WRITE_4(sc, NOCT_RNG_Q_BASE_LO, (adr >> 0 ) & 0xffffffff);
582:
583: NOCT_WRITE_8(sc, NOCT_RNG_CTL,
584: RNGCTL_RNG_ENA |
585: RNGCTL_TOD_ENA |
586: RNGCTL_BUFSRC_SEED |
587: RNGCTL_SEEDSRC_INT |
588: RNGCTL_EXTCLK_ENA |
589: RNGCTL_DIAG |
590: (100 & RNGCTL_ITERCNT));
591:
592: /* Turn on interrupts and enable xfer */
593: r = RNGCSR_XFER_ENABLE | RNGCSR_INT_ACCESS |
594: RNGCSR_INT_KEY | RNGCSR_INT_BUS | RNGCSR_INT_DUP;
595: NOCT_WRITE_4(sc, NOCT_RNG_CSR, r);
596:
597: /* Turn on bridge/rng interrupts */
598: r = NOCT_READ_4(sc, NOCT_BRDG_CTL);
599: r |= BRDGCTL_RNIRQ_ENA;
600: NOCT_WRITE_4(sc, NOCT_BRDG_CTL, r);
601: }
602:
603: void
604: noct_rng_init(sc)
605: struct noct_softc *sc;
606: {
607: bus_dma_segment_t seg;
608: int rseg;
609:
610: if (bus_dmamem_alloc(sc->sc_dmat, NOCT_RNG_BUFSIZE,
611: PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
612: printf("%s: failed rng buf alloc\n", sc->sc_dv.dv_xname);
613: goto fail;
614: }
615: if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, NOCT_RNG_BUFSIZE,
616: (caddr_t *)&sc->sc_rngbuf, BUS_DMA_NOWAIT)) {
617: printf("%s: failed rng buf map\n", sc->sc_dv.dv_xname);
618: goto fail_1;
619: }
620: if (bus_dmamap_create(sc->sc_dmat, NOCT_RNG_BUFSIZE, rseg,
621: NOCT_RNG_BUFSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_rngmap)) {
622: printf("%s: failed rng map create\n", sc->sc_dv.dv_xname);
623: goto fail_2;
624: }
625: if (bus_dmamap_load_raw(sc->sc_dmat, sc->sc_rngmap,
626: &seg, rseg, NOCT_RNG_BUFSIZE, BUS_DMA_NOWAIT)) {
627: printf("%s: failed rng buf load\n", sc->sc_dv.dv_xname);
628: goto fail_3;
629: }
630:
631: noct_rng_disable(sc);
632: noct_rng_enable(sc);
633:
634: printf(" RNG");
635:
636: if (hz > 100)
637: sc->sc_rngtick = hz/100;
638: else
639: sc->sc_rngtick = 1;
640: timeout_set(&sc->sc_rngto, noct_rng_tick, sc);
641: timeout_add(&sc->sc_rngto, sc->sc_rngtick);
642:
643: return;
644:
645: fail_3:
646: bus_dmamap_destroy(sc->sc_dmat, sc->sc_rngmap);
647: fail_2:
648: bus_dmamem_unmap(sc->sc_dmat,
649: (caddr_t)sc->sc_rngbuf, NOCT_RNG_BUFSIZE);
650: fail_1:
651: bus_dmamem_free(sc->sc_dmat, &seg, rseg);
652: fail:
653: sc->sc_rngbuf = NULL;
654: sc->sc_rngmap = NULL;
655: }
656:
657: void
658: noct_rng_intr(sc)
659: struct noct_softc *sc;
660: {
661: u_int32_t csr;
662: int enable = 1;
663:
664: csr = NOCT_READ_4(sc, NOCT_RNG_CSR);
665: NOCT_WRITE_4(sc, NOCT_RNG_CSR, csr);
666:
667: if (csr & RNGCSR_ERR_KEY) {
668: u_int32_t ctl;
669:
670: enable = 0;
671: ctl = NOCT_READ_4(sc, NOCT_RNG_CTL);
672: printf("%s: rng bad key(s)", sc->sc_dv.dv_xname);
673: if (ctl & RNGCTL_KEY1PAR_ERR)
674: printf(", key1 parity");
675: if (ctl & RNGCTL_KEY2PAR_ERR)
676: printf(", key2 parity");
677: printf("\n");
678: }
679: if (csr & RNGCSR_ERR_BUS) {
680: enable = 0;
681: printf("%s: rng bus error\n", sc->sc_dv.dv_xname);
682: }
683: if (csr & RNGCSR_ERR_DUP) {
684: enable = 0;
685: printf("%s: rng duplicate block\n", sc->sc_dv.dv_xname);
686: }
687: if (csr & RNGCSR_ERR_ACCESS) {
688: enable = 0;
689: printf("%s: rng invalid access\n", sc->sc_dv.dv_xname);
690: }
691:
692: if (!enable)
693: noct_rng_disable(sc);
694: }
695:
696: void
697: noct_rng_tick(vsc)
698: void *vsc;
699: {
700: struct noct_softc *sc = vsc;
701: u_int64_t val;
702: u_int32_t reg, rd, wr;
703: int cons = 0;
704:
705: reg = NOCT_READ_4(sc, NOCT_RNG_Q_PTR);
706: rd = (reg & RNGQPTR_READ_M) >> RNGQPTR_READ_S;
707: wr = (reg & RNGQPTR_WRITE_M) >> RNGQPTR_WRITE_S;
708:
709: while (rd != wr && cons < 32) {
710: val = sc->sc_rngbuf[rd];
711: add_true_randomness((val >> 32) & 0xffffffff);
712: add_true_randomness((val >> 0) & 0xffffffff);
713: if (++rd == NOCT_RNG_ENTRIES)
714: rd = 0;
715: cons++;
716: }
717:
718: if (cons != 0)
719: NOCT_WRITE_4(sc, NOCT_RNG_Q_PTR, rd);
720: timeout_add(&sc->sc_rngto, sc->sc_rngtick);
721: }
722:
723: u_int32_t
724: noct_ea_nfree(sc)
725: struct noct_softc *sc;
726: {
727: if (sc->sc_eawp == sc->sc_earp)
728: return (NOCT_EA_ENTRIES);
729: if (sc->sc_eawp < sc->sc_earp)
730: return (sc->sc_earp - sc->sc_eawp - 1);
731: return (sc->sc_earp + NOCT_EA_ENTRIES - sc->sc_eawp - 1);
732: }
733:
734: void
735: noct_ea_disable(sc)
736: struct noct_softc *sc;
737: {
738: u_int32_t r;
739:
740: /* Turn off EA irq */
741: NOCT_WRITE_4(sc, NOCT_BRDG_CTL,
742: NOCT_READ_4(sc, NOCT_BRDG_CTL) & ~(BRDGCTL_EAIRQ_ENA));
743:
744: /* Turn off EA interrupts */
745: r = NOCT_READ_4(sc, NOCT_EA_IER);
746: r &= ~(EAIER_QALIGN | EAIER_CMDCMPL | EAIER_OPERR | EAIER_CMDREAD |
747: EAIER_CMDWRITE | EAIER_DATAREAD | EAIER_DATAWRITE |
748: EAIER_INTRNLLEN | EAIER_EXTRNLLEN | EAIER_DESBLOCK |
749: EAIER_DESKEY | EAIER_ILL);
750: NOCT_WRITE_4(sc, NOCT_EA_IER, r);
751:
752: /* Disable EA unit */
753: r = NOCT_READ_4(sc, NOCT_EA_CSR);
754: r &= ~EACSR_ENABLE;
755: NOCT_WRITE_4(sc, NOCT_EA_CSR, r);
756: for (;;) {
757: r = NOCT_READ_4(sc, NOCT_EA_CSR);
758: if ((r & EACSR_BUSY) == 0)
759: break;
760: }
761:
762: /* Clear status bits */
763: r = NOCT_READ_4(sc, NOCT_EA_CSR);
764: r |= EACSR_QALIGN | EACSR_CMDCMPL | EACSR_OPERR | EACSR_CMDREAD |
765: EACSR_CMDWRITE | EACSR_DATAREAD | EACSR_DATAWRITE |
766: EACSR_INTRNLLEN | EACSR_EXTRNLLEN | EACSR_DESBLOCK |
767: EACSR_DESKEY | EACSR_ILL;
768: NOCT_WRITE_4(sc, NOCT_EA_CSR, r);
769: }
770:
771: void
772: noct_ea_enable(sc)
773: struct noct_softc *sc;
774: {
775: u_int64_t adr;
776:
777: sc->sc_eawp = 0;
778: sc->sc_earp = 0;
779:
780: adr = sc->sc_eamap->dm_segs[0].ds_addr;
781: NOCT_WRITE_4(sc, NOCT_EA_Q_BASE_HI, (adr >> 32) & 0xffffffff);
782: NOCT_WRITE_4(sc, NOCT_EA_Q_LEN, NOCT_EA_QLEN);
783: NOCT_WRITE_4(sc, NOCT_EA_Q_BASE_LO, (adr >> 0) & 0xffffffff);
784:
785: NOCT_WRITE_4(sc, NOCT_EA_IER,
786: EAIER_QALIGN | EAIER_CMDCMPL | EAIER_OPERR | EAIER_CMDREAD |
787: EAIER_CMDWRITE | EAIER_DATAREAD | EAIER_DATAWRITE |
788: EAIER_INTRNLLEN | EAIER_EXTRNLLEN | EAIER_DESBLOCK |
789: EAIER_DESKEY | EAIER_ILL);
790:
791: NOCT_WRITE_4(sc, NOCT_EA_CSR,
792: NOCT_READ_4(sc, NOCT_EA_CSR) | EACSR_ENABLE);
793:
794: NOCT_WRITE_4(sc, NOCT_BRDG_CTL,
795: NOCT_READ_4(sc, NOCT_BRDG_CTL) | BRDGCTL_EAIRQ_ENA);
796: }
797:
798: void
799: noct_ea_init(sc)
800: struct noct_softc *sc;
801: {
802: bus_dma_segment_t seg;
803: int rseg, algs[CRYPTO_ALGORITHM_MAX + 1];
804:
805: if (bus_dmamem_alloc(sc->sc_dmat, NOCT_EA_BUFSIZE,
806: PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
807: printf("%s: failed ea buf alloc\n", sc->sc_dv.dv_xname);
808: goto fail;
809: }
810: if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, NOCT_EA_BUFSIZE,
811: (caddr_t *)&sc->sc_eacmd, BUS_DMA_NOWAIT)) {
812: printf("%s: failed ea buf map\n", sc->sc_dv.dv_xname);
813: goto fail_1;
814: }
815: if (bus_dmamap_create(sc->sc_dmat, NOCT_EA_BUFSIZE, rseg,
816: NOCT_EA_BUFSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_eamap)) {
817: printf("%s: failed ea map create\n", sc->sc_dv.dv_xname);
818: goto fail_2;
819: }
820: if (bus_dmamap_load_raw(sc->sc_dmat, sc->sc_eamap,
821: &seg, rseg, NOCT_EA_BUFSIZE, BUS_DMA_NOWAIT)) {
822: printf("%s: failed ea buf load\n", sc->sc_dv.dv_xname);
823: goto fail_3;
824: }
825:
826: noct_ea_disable(sc);
827: noct_ea_enable(sc);
828:
829: SIMPLEQ_INIT(&sc->sc_inq);
830: SIMPLEQ_INIT(&sc->sc_chipq);
831: SIMPLEQ_INIT(&sc->sc_outq);
832:
833: bzero(algs, sizeof(algs));
834:
835: algs[CRYPTO_MD5] = CRYPTO_ALG_FLAG_SUPPORTED;
836: algs[CRYPTO_SHA1] = CRYPTO_ALG_FLAG_SUPPORTED;
837: algs[CRYPTO_DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
838: algs[CRYPTO_3DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
839:
840: crypto_register(sc->sc_cid, algs,
841: noct_newsession, noct_freesession, noct_process);
842: printf(" MD5 SHA1 3DES");
843:
844: kthread_create_deferred(noct_ea_create_thread, sc);
845:
846: return;
847:
848: fail_3:
849: bus_dmamap_destroy(sc->sc_dmat, sc->sc_eamap);
850: fail_2:
851: bus_dmamem_unmap(sc->sc_dmat,
852: (caddr_t)sc->sc_eacmd, NOCT_EA_BUFSIZE);
853: fail_1:
854: bus_dmamem_free(sc->sc_dmat, &seg, rseg);
855: fail:
856: sc->sc_eacmd = NULL;
857: sc->sc_eamap = NULL;
858: }
859:
860: void
861: noct_ea_create_thread(vsc)
862: void *vsc;
863: {
864: struct noct_softc *sc = vsc;
865:
866: if (kthread_create(noct_ea_thread, sc, NULL,
867: "%s", sc->sc_dv.dv_xname))
868: panic("%s: unable to create ea thread", sc->sc_dv.dv_xname);
869: }
870:
871: void
872: noct_ea_thread(vsc)
873: void *vsc;
874: {
875: struct noct_softc *sc = vsc;
876: struct noct_workq *q;
877: struct cryptop *crp;
878: struct cryptodesc *crd;
879: int s, rseg;
880: u_int32_t len;
881:
882: for (;;) {
883: tsleep(&sc->sc_eawp, PWAIT, "noctea", 0);
884:
885: /* Handle output queue */
886: s = splnet();
887: while (!SIMPLEQ_EMPTY(&sc->sc_outq)) {
888: q = SIMPLEQ_FIRST(&sc->sc_outq);
889: SIMPLEQ_REMOVE_HEAD(&sc->sc_outq, q_next);
890: splx(s);
891:
892: crp = q->q_crp;
893: crd = crp->crp_desc;
894: switch (crd->crd_alg) {
895: case CRYPTO_MD5:
896: len = 16;
897: break;
898: case CRYPTO_SHA1:
899: len = 20;
900: break;
901: default:
902: len = 0;
903: break;
904: }
905:
906: bus_dmamap_sync(sc->sc_dmat, q->q_dmamap,
907: 0, q->q_dmamap->dm_mapsize,
908: BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
909:
910: if (len != 0) {
911: if (crp->crp_flags & CRYPTO_F_IMBUF)
912: m_copyback((struct mbuf *)crp->crp_buf,
913: crd->crd_inject, len,
914: q->q_macbuf);
915: else if (crp->crp_flags & CRYPTO_F_IOV)
916: bcopy(q->q_macbuf, crp->crp_mac, len);
917: }
918:
919: if (crd->crd_alg == CRYPTO_DES_CBC ||
920: crd->crd_alg == CRYPTO_3DES_CBC) {
921: if (crp->crp_flags & CRYPTO_F_IMBUF)
922: m_copyback((struct mbuf *)crp->crp_buf,
923: crd->crd_skip, crd->crd_len,
924: q->q_buf);
925: else if (crp->crp_flags & CRYPTO_F_IOV)
926: cuio_copyback((struct uio *)crp->crp_buf,
927: crd->crd_skip, crd->crd_len,
928: q->q_buf);
929: }
930:
931: bus_dmamap_unload(sc->sc_dmat, q->q_dmamap);
932: bus_dmamap_destroy(sc->sc_dmat, q->q_dmamap);
933: bus_dmamem_unmap(sc->sc_dmat, q->q_buf, crd->crd_len);
934: bus_dmamem_free(sc->sc_dmat, &q->q_dmaseg, rseg);
935: crp->crp_etype = 0;
936: free(q, M_DEVBUF);
937: s = splnet();
938: crypto_done(crp);
939: }
940: splx(s);
941:
942: /* Handle input queue */
943: s = splnet();
944: while (!SIMPLEQ_EMPTY(&sc->sc_inq)) {
945: q = SIMPLEQ_FIRST(&sc->sc_inq);
946: SIMPLEQ_REMOVE_HEAD(&sc->sc_inq, q_next);
947: splx(s);
948:
949: noct_ea_start(sc, q);
950: s = splnet();
951: }
952: splx(s);
953: }
954: }
955:
956: void
957: noct_ea_start(sc, q)
958: struct noct_softc *sc;
959: struct noct_workq *q;
960: {
961: struct cryptop *crp;
962: struct cryptodesc *crd;
963: int s, err;
964:
965: crp = q->q_crp;
966: crd = crp->crp_desc;
967:
968: /* XXX Can't handle multiple ops yet */
969: if (crd->crd_next != NULL) {
970: err = EOPNOTSUPP;
971: goto errout;
972: }
973:
974: switch (crd->crd_alg) {
975: case CRYPTO_MD5:
976: case CRYPTO_SHA1:
977: noct_ea_start_hash(sc, q, crp, crd);
978: break;
979: case CRYPTO_DES_CBC:
980: case CRYPTO_3DES_CBC:
981: noct_ea_start_des(sc, q, crp, crd);
982: break;
983: default:
984: err = EOPNOTSUPP;
985: goto errout;
986: }
987:
988: return;
989:
990: errout:
991: crp->crp_etype = err;
992: free(q, M_DEVBUF);
993: s = splnet();
994: crypto_done(crp);
995: splx(s);
996: }
997:
998: void
999: noct_ea_start_hash(sc, q, crp, crd)
1000: struct noct_softc *sc;
1001: struct noct_workq *q;
1002: struct cryptop *crp;
1003: struct cryptodesc *crd;
1004: {
1005: u_int64_t adr;
1006: int s, err, i, rseg;
1007: u_int32_t wp;
1008:
1009: if (crd->crd_len > 0x4800) {
1010: err = ERANGE;
1011: goto errout;
1012: }
1013:
1014: if ((err = bus_dmamem_alloc(sc->sc_dmat, crd->crd_len, PAGE_SIZE, 0,
1015: &q->q_dmaseg, 1, &rseg, BUS_DMA_WAITOK | BUS_DMA_STREAMING)) != 0)
1016: goto errout;
1017:
1018: if ((err = bus_dmamem_map(sc->sc_dmat, &q->q_dmaseg, rseg,
1019: crd->crd_len, (caddr_t *)&q->q_buf, BUS_DMA_WAITOK)) != 0)
1020: goto errout_dmafree;
1021:
1022: if ((err = bus_dmamap_create(sc->sc_dmat, crd->crd_len, 1,
1023: crd->crd_len, 0, BUS_DMA_WAITOK, &q->q_dmamap)) != 0)
1024: goto errout_dmaunmap;
1025:
1026: if ((err = bus_dmamap_load_raw(sc->sc_dmat, q->q_dmamap, &q->q_dmaseg,
1027: rseg, crd->crd_len, BUS_DMA_WAITOK)) != 0)
1028: goto errout_dmadestroy;
1029:
1030: if (crp->crp_flags & CRYPTO_F_IMBUF)
1031: m_copydata((struct mbuf *)crp->crp_buf,
1032: crd->crd_skip, crd->crd_len, q->q_buf);
1033: else if (crp->crp_flags & CRYPTO_F_IOV)
1034: cuio_copydata((struct uio *)crp->crp_buf,
1035: crd->crd_skip, crd->crd_len, q->q_buf);
1036: else {
1037: err = EINVAL;
1038: goto errout_dmaunload;
1039: }
1040:
1041: bus_dmamap_sync(sc->sc_dmat, q->q_dmamap, 0, q->q_dmamap->dm_mapsize,
1042: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1043:
1044: s = splnet();
1045: if (noct_ea_nfree(sc) < 1) {
1046: err = ENOMEM;
1047: goto errout_dmaunload;
1048: }
1049: wp = sc->sc_eawp;
1050: if (++sc->sc_eawp == NOCT_EA_ENTRIES)
1051: sc->sc_eawp = 0;
1052: for (i = 0; i < EA_CMD_WORDS; i++)
1053: sc->sc_eacmd[wp].buf[i] = 0;
1054: sc->sc_eacmd[wp].buf[0] = EA_0_SI;
1055: switch (crd->crd_alg) {
1056: case CRYPTO_MD5:
1057: sc->sc_eacmd[wp].buf[1] = htole32(EA_OP_MD5);
1058: break;
1059: case CRYPTO_SHA1:
1060: sc->sc_eacmd[wp].buf[1] = htole32(EA_OP_SHA1);
1061: break;
1062: }
1063:
1064: /* Source, new buffer just allocated */
1065: sc->sc_eacmd[wp].buf[1] |= htole32(crd->crd_len);
1066: adr = q->q_dmamap->dm_segs[0].ds_addr;
1067: sc->sc_eacmd[wp].buf[2] = htole32(adr >> 32);
1068: sc->sc_eacmd[wp].buf[3] = htole32(adr & 0xffffffff);
1069:
1070: /* Dest, hide it in the descriptor */
1071: adr = sc->sc_eamap->dm_segs[0].ds_addr +
1072: (wp * sizeof(struct noct_ea_cmd)) +
1073: offsetof(struct noct_ea_cmd, buf[6]);
1074: sc->sc_eacmd[wp].buf[4] = htole32(adr >> 32);
1075: sc->sc_eacmd[wp].buf[5] = htole32(adr & 0xffffffff);
1076:
1077: bus_dmamap_sync(sc->sc_dmat, sc->sc_eamap,
1078: (wp * sizeof(struct noct_ea_cmd)), sizeof(struct noct_ea_cmd),
1079: BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1080:
1081: if (++wp == NOCT_EA_ENTRIES)
1082: wp = 0;
1083: NOCT_WRITE_4(sc, NOCT_EA_Q_PTR, wp);
1084: sc->sc_eawp = wp;
1085:
1086: SIMPLEQ_INSERT_TAIL(&sc->sc_chipq, q, q_next);
1087: splx(s);
1088:
1089: return;
1090:
1091: errout_dmaunload:
1092: bus_dmamap_unload(sc->sc_dmat, q->q_dmamap);
1093: errout_dmadestroy:
1094: bus_dmamap_destroy(sc->sc_dmat, q->q_dmamap);
1095: errout_dmaunmap:
1096: bus_dmamem_unmap(sc->sc_dmat, q->q_buf, crd->crd_len);
1097: errout_dmafree:
1098: bus_dmamem_free(sc->sc_dmat, &q->q_dmaseg, rseg);
1099: errout:
1100: crp->crp_etype = err;
1101: free(q, M_DEVBUF);
1102: s = splnet();
1103: crypto_done(crp);
1104: splx(s);
1105: }
1106:
1107: void
1108: noct_ea_start_des(sc, q, crp, crd)
1109: struct noct_softc *sc;
1110: struct noct_workq *q;
1111: struct cryptop *crp;
1112: struct cryptodesc *crd;
1113: {
1114: u_int64_t adr;
1115: volatile u_int8_t *pb;
1116: int s, err, i, rseg;
1117: u_int32_t wp;
1118: u_int8_t iv[8], key[24];
1119:
1120: if (crd->crd_len > 0x4800) {
1121: err = ERANGE;
1122: goto errout;
1123: }
1124:
1125: if ((crd->crd_len & 3) != 0) {
1126: err = ERANGE;
1127: goto errout;
1128: }
1129:
1130: if (crd->crd_alg == CRYPTO_DES_CBC) {
1131: for (i = 0; i < 8; i++)
1132: key[i] = key[i + 8] = key[i + 16] = crd->crd_key[i];
1133: } else {
1134: for (i = 0; i < 24; i++)
1135: key[i] = crd->crd_key[i];
1136: }
1137:
1138: if (crd->crd_flags & CRD_F_ENCRYPT) {
1139: if (crd->crd_flags & CRD_F_IV_EXPLICIT)
1140: bcopy(crd->crd_iv, iv, 8);
1141: else
1142: get_random_bytes(iv, sizeof(iv));
1143:
1144: if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
1145: if (crp->crp_flags & CRYPTO_F_IMBUF)
1146: m_copyback((struct mbuf *)crp->crp_buf,
1147: crd->crd_inject, 8, iv);
1148: else if (crp->crp_flags & CRYPTO_F_IOV)
1149: cuio_copyback((struct uio *)crp->crp_buf,
1150: crd->crd_inject, 8, iv);
1151: }
1152: } else {
1153: if (crd->crd_flags & CRD_F_IV_EXPLICIT)
1154: bcopy(crd->crd_iv, iv, 8);
1155: else if (crp->crp_flags & CRYPTO_F_IMBUF)
1156: m_copydata((struct mbuf *)crp->crp_buf,
1157: crd->crd_inject, 8, iv);
1158: else if (crp->crp_flags & CRYPTO_F_IOV)
1159: cuio_copydata((struct uio *)crp->crp_buf,
1160: crd->crd_inject, 8, iv);
1161: }
1162:
1163: if ((err = bus_dmamem_alloc(sc->sc_dmat, crd->crd_len, PAGE_SIZE, 0,
1164: &q->q_dmaseg, 1, &rseg, BUS_DMA_WAITOK | BUS_DMA_STREAMING)) != 0)
1165: goto errout;
1166:
1167: if ((err = bus_dmamem_map(sc->sc_dmat, &q->q_dmaseg, rseg,
1168: crd->crd_len, (caddr_t *)&q->q_buf, BUS_DMA_WAITOK)) != 0)
1169: goto errout_dmafree;
1170:
1171: if ((err = bus_dmamap_create(sc->sc_dmat, crd->crd_len, 1,
1172: crd->crd_len, 0, BUS_DMA_WAITOK, &q->q_dmamap)) != 0)
1173: goto errout_dmaunmap;
1174:
1175: if ((err = bus_dmamap_load_raw(sc->sc_dmat, q->q_dmamap, &q->q_dmaseg,
1176: rseg, crd->crd_len, BUS_DMA_WAITOK)) != 0)
1177: goto errout_dmadestroy;
1178:
1179: if (crp->crp_flags & CRYPTO_F_IMBUF)
1180: m_copydata((struct mbuf *)crp->crp_buf,
1181: crd->crd_skip, crd->crd_len, q->q_buf);
1182: else if (crp->crp_flags & CRYPTO_F_IOV)
1183: cuio_copydata((struct uio *)crp->crp_buf,
1184: crd->crd_skip, crd->crd_len, q->q_buf);
1185: else {
1186: err = EINVAL;
1187: goto errout_dmaunload;
1188: }
1189:
1190: bus_dmamap_sync(sc->sc_dmat, q->q_dmamap, 0, q->q_dmamap->dm_mapsize,
1191: BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1192:
1193: s = splnet();
1194: if (noct_ea_nfree(sc) < 1) {
1195: err = ENOMEM;
1196: goto errout_dmaunload;
1197: }
1198: wp = sc->sc_eawp;
1199: if (++sc->sc_eawp == NOCT_EA_ENTRIES)
1200: sc->sc_eawp = 0;
1201:
1202: for (i = 0; i < EA_CMD_WORDS; i++)
1203: sc->sc_eacmd[wp].buf[i] = 0;
1204:
1205: sc->sc_eacmd[wp].buf[0] = EA_0_SI;
1206:
1207: if (crd->crd_flags & CRD_F_ENCRYPT)
1208: sc->sc_eacmd[wp].buf[1] = htole32(EA_OP_3DESCBCE);
1209: else
1210: sc->sc_eacmd[wp].buf[1] = htole32(EA_OP_3DESCBCD);
1211:
1212: /* Source, new buffer just allocated */
1213: sc->sc_eacmd[wp].buf[1] |= htole32(crd->crd_len);
1214: adr = q->q_dmamap->dm_segs[0].ds_addr;
1215: sc->sc_eacmd[wp].buf[2] = htole32(adr >> 32);
1216: sc->sc_eacmd[wp].buf[3] = htole32(adr & 0xffffffff);
1217:
1218: /* Dest, same as source. */
1219: sc->sc_eacmd[wp].buf[4] = htole32(adr >> 32);
1220: sc->sc_eacmd[wp].buf[5] = htole32(adr & 0xffffffff);
1221:
1222: /* IV and key */
1223: pb = (volatile u_int8_t *)&sc->sc_eacmd[wp].buf[20];
1224: for (i = 0; i < 8; i++)
1225: pb[i] = iv[i];
1226: SWAP32(sc->sc_eacmd[wp].buf[20]);
1227: SWAP32(sc->sc_eacmd[wp].buf[21]);
1228: pb = (volatile u_int8_t *)&sc->sc_eacmd[wp].buf[24];
1229: for (i = 0; i < 24; i++)
1230: pb[i] = key[i];
1231: SWAP32(sc->sc_eacmd[wp].buf[24]);
1232: SWAP32(sc->sc_eacmd[wp].buf[25]);
1233: SWAP32(sc->sc_eacmd[wp].buf[26]);
1234: SWAP32(sc->sc_eacmd[wp].buf[27]);
1235: SWAP32(sc->sc_eacmd[wp].buf[28]);
1236: SWAP32(sc->sc_eacmd[wp].buf[29]);
1237:
1238: bus_dmamap_sync(sc->sc_dmat, sc->sc_eamap,
1239: (wp * sizeof(struct noct_ea_cmd)), sizeof(struct noct_ea_cmd),
1240: BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1241:
1242: if (++wp == NOCT_EA_ENTRIES)
1243: wp = 0;
1244: NOCT_WRITE_4(sc, NOCT_EA_Q_PTR, wp);
1245: sc->sc_eawp = wp;
1246:
1247: SIMPLEQ_INSERT_TAIL(&sc->sc_chipq, q, q_next);
1248: splx(s);
1249:
1250: return;
1251:
1252: errout_dmaunload:
1253: bus_dmamap_unload(sc->sc_dmat, q->q_dmamap);
1254: errout_dmadestroy:
1255: bus_dmamap_destroy(sc->sc_dmat, q->q_dmamap);
1256: errout_dmaunmap:
1257: bus_dmamem_unmap(sc->sc_dmat, q->q_buf, crd->crd_len);
1258: errout_dmafree:
1259: bus_dmamem_free(sc->sc_dmat, &q->q_dmaseg, rseg);
1260: errout:
1261: crp->crp_etype = err;
1262: free(q, M_DEVBUF);
1263: s = splnet();
1264: crypto_done(crp);
1265: splx(s);
1266: }
1267:
1268: void
1269: noct_ea_intr(sc)
1270: struct noct_softc *sc;
1271: {
1272: struct noct_workq *q;
1273: u_int32_t csr, rp;
1274:
1275: csr = NOCT_READ_4(sc, NOCT_EA_CSR);
1276: NOCT_WRITE_4(sc, NOCT_EA_CSR, csr |
1277: EACSR_QALIGN | EACSR_CMDCMPL | EACSR_OPERR | EACSR_CMDREAD |
1278: EACSR_CMDWRITE | EACSR_DATAREAD | EACSR_DATAWRITE |
1279: EACSR_INTRNLLEN | EACSR_EXTRNLLEN | EACSR_DESBLOCK |
1280: EACSR_DESKEY | EACSR_ILL);
1281:
1282: rp = (NOCT_READ_4(sc, NOCT_EA_Q_PTR) & EAQPTR_READ_M) >>
1283: EAQPTR_READ_S;
1284: while (sc->sc_earp != rp) {
1285: if (SIMPLEQ_EMPTY(&sc->sc_chipq))
1286: panic("%s: empty chipq", sc->sc_dv.dv_xname);
1287: q = SIMPLEQ_FIRST(&sc->sc_chipq);
1288: SIMPLEQ_REMOVE_HEAD(&sc->sc_chipq, q_next);
1289: SIMPLEQ_INSERT_TAIL(&sc->sc_outq, q, q_next);
1290:
1291: bus_dmamap_sync(sc->sc_dmat, sc->sc_eamap,
1292: (sc->sc_earp * sizeof(struct noct_ea_cmd)),
1293: sizeof(struct noct_ea_cmd),
1294: BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1295: bcopy((u_int8_t *)&sc->sc_eacmd[sc->sc_earp].buf[6],
1296: q->q_macbuf, 20);
1297:
1298: NOCT_WAKEUP(sc);
1299: if (++sc->sc_earp == NOCT_EA_ENTRIES)
1300: sc->sc_earp = 0;
1301: }
1302: sc->sc_earp = rp;
1303:
1304: if (csr & EACSR_QALIGN)
1305: printf("%s: ea bad queue alignment\n", sc->sc_dv.dv_xname);
1306: if (csr & EACSR_OPERR)
1307: printf("%s: ea bad opcode\n", sc->sc_dv.dv_xname);
1308: if (csr & EACSR_CMDREAD)
1309: printf("%s: ea command read error\n", sc->sc_dv.dv_xname);
1310: if (csr & EACSR_CMDWRITE)
1311: printf("%s: ea command write error\n", sc->sc_dv.dv_xname);
1312: if (csr & EACSR_DATAREAD)
1313: printf("%s: ea data read error\n", sc->sc_dv.dv_xname);
1314: if (csr & EACSR_DATAWRITE)
1315: printf("%s: ea data write error\n", sc->sc_dv.dv_xname);
1316: if (csr & EACSR_INTRNLLEN)
1317: printf("%s: ea bad internal len\n", sc->sc_dv.dv_xname);
1318: if (csr & EACSR_EXTRNLLEN)
1319: printf("%s: ea bad external len\n", sc->sc_dv.dv_xname);
1320: if (csr & EACSR_DESBLOCK)
1321: printf("%s: ea bad des block\n", sc->sc_dv.dv_xname);
1322: if (csr & EACSR_DESKEY)
1323: printf("%s: ea bad des key\n", sc->sc_dv.dv_xname);
1324: if (csr & EACSR_ILL)
1325: printf("%s: ea illegal access\n", sc->sc_dv.dv_xname);
1326: }
1327:
1328: void
1329: noct_write_8(sc, reg, val)
1330: struct noct_softc *sc;
1331: u_int32_t reg;
1332: u_int64_t val;
1333: {
1334: NOCT_WRITE_4(sc, reg, (val >> 32) & 0xffffffff);
1335: NOCT_WRITE_4(sc, reg + 4, (val >> 0) & 0xffffffff);
1336: }
1337:
1338: u_int64_t
1339: noct_read_8(sc, reg)
1340: struct noct_softc *sc;
1341: u_int32_t reg;
1342: {
1343: u_int64_t ret;
1344:
1345: ret = NOCT_READ_4(sc, reg);
1346: ret <<= 32;
1347: ret |= NOCT_READ_4(sc, reg + 4);
1348: return (ret);
1349: }
1350:
1351: /*
1352: * NSP2000 is has a nifty bug, writes or reads to consecutive addresses
1353: * can be coalesced by a PCI bridge and executed as a burst read or write
1354: * which NSP2000's AMBA bridge doesn't grok. Avoid the hazard.
1355: */
1356: u_int32_t
1357: noct_read_4(sc, off)
1358: struct noct_softc *sc;
1359: bus_size_t off;
1360: {
1361: if (sc->sc_rar_last == off - 4 ||
1362: sc->sc_rar_last == off + 4) {
1363: bus_space_write_4(sc->sc_st, sc->sc_sh, NOCT_BRDG_TEST, 0);
1364: sc->sc_rar_last = off;
1365: sc->sc_waw_last = 0xffffffff;
1366: }
1367: return (bus_space_read_4(sc->sc_st, sc->sc_sh, off));
1368: }
1369:
1370: void
1371: noct_write_4(sc, off, val)
1372: struct noct_softc *sc;
1373: bus_size_t off;
1374: u_int32_t val;
1375: {
1376: if (sc->sc_waw_last == off - 4 ||
1377: sc->sc_waw_last == off + 4) {
1378: bus_space_read_4(sc->sc_st, sc->sc_sh, NOCT_BRDG_TEST);
1379: sc->sc_waw_last = off;
1380: sc->sc_rar_last = 0xffffffff;
1381: }
1382: bus_space_write_4(sc->sc_st, sc->sc_sh, off, val);
1383: }
1384:
1385: struct noct_softc *
1386: noct_kfind(krp)
1387: struct cryptkop *krp;
1388: {
1389: struct noct_softc *sc;
1390: int i;
1391:
1392: for (i = 0; i < noct_cd.cd_ndevs; i++) {
1393: sc = noct_cd.cd_devs[i];
1394: if (sc == NULL)
1395: continue;
1396: if (sc->sc_cid == krp->krp_hid)
1397: return (sc);
1398: }
1399: return (NULL);
1400: }
1401:
1402: int
1403: noct_kprocess(krp)
1404: struct cryptkop *krp;
1405: {
1406: struct noct_softc *sc;
1407:
1408: if (krp == NULL || krp->krp_callback == NULL)
1409: return (EINVAL);
1410: if ((sc = noct_kfind(krp)) == NULL) {
1411: krp->krp_status = EINVAL;
1412: crypto_kdone(krp);
1413: return (0);
1414: }
1415:
1416: switch (krp->krp_op) {
1417: case CRK_MOD_EXP:
1418: noct_kprocess_modexp(sc, krp);
1419: break;
1420: default:
1421: printf("%s: kprocess: invalid op 0x%x\n",
1422: sc->sc_dv.dv_xname, krp->krp_op);
1423: krp->krp_status = EOPNOTSUPP;
1424: crypto_kdone(krp);
1425: break;
1426: }
1427: return (0);
1428: }
1429:
1430: u_int32_t
1431: noct_pkh_nfree(sc)
1432: struct noct_softc *sc;
1433: {
1434: if (sc->sc_pkhwp == sc->sc_pkhrp)
1435: return (NOCT_PKH_ENTRIES);
1436: if (sc->sc_pkhwp < sc->sc_pkhrp)
1437: return (sc->sc_pkhrp - sc->sc_pkhwp - 1);
1438: return (sc->sc_pkhrp + NOCT_PKH_ENTRIES - sc->sc_pkhwp - 1);
1439: }
1440:
1441: int
1442: noct_kprocess_modexp(sc, krp)
1443: struct noct_softc *sc;
1444: struct cryptkop *krp;
1445: {
1446: int s, err;
1447: u_int32_t wp, aidx, bidx, midx;
1448: u_int64_t adr;
1449: union noct_pkh_cmd *cmd;
1450: int i, bits, mbits, digits, rmodidx, mmulidx;
1451:
1452: s = splnet();
1453: if (noct_pkh_nfree(sc) < 7) {
1454: /* Need 7 entries: 3 loads, 1 store, 3 ops */
1455: splx(s);
1456: return (ENOMEM);
1457: }
1458:
1459: /* Load M */
1460: midx = wp = sc->sc_pkhwp;
1461: mbits = bits = noct_ksigbits(&krp->krp_param[2]);
1462: if (bits > 4096) {
1463: err = ERANGE;
1464: goto errout;
1465: }
1466: sc->sc_pkh_bnsw[midx].bn_siz = (bits + 127) / 128;
1467: if (extent_alloc(sc->sc_pkh_bn, sc->sc_pkh_bnsw[midx].bn_siz,
1468: EX_NOALIGN, 0, EX_NOBOUNDARY, EX_NOWAIT,
1469: &sc->sc_pkh_bnsw[midx].bn_off)) {
1470: err = ENOMEM;
1471: goto errout;
1472: }
1473: cmd = &sc->sc_pkhcmd[midx];
1474: cmd->cache.op = htole32(PKH_OP_CODE_LOAD);
1475: cmd->cache.r = htole32(sc->sc_pkh_bnsw[midx].bn_off);
1476: adr = sc->sc_bnmap->dm_segs[0].ds_addr +
1477: (sc->sc_pkh_bnsw[midx].bn_off * 16);
1478: cmd->cache.addrhi = htole32((adr >> 32) & 0xffffffff);
1479: cmd->cache.addrlo = htole32((adr >> 0 ) & 0xffffffff);
1480: cmd->cache.len = htole32(sc->sc_pkh_bnsw[midx].bn_siz);
1481: cmd->cache.unused[0] = cmd->cache.unused[1] = cmd->cache.unused[2] = 0;
1482: bus_dmamap_sync(sc->sc_dmat, sc->sc_pkhmap,
1483: midx * sizeof(union noct_pkh_cmd), sizeof(union noct_pkh_cmd),
1484: BUS_DMASYNC_PREWRITE);
1485: for (i = 0; i < (digits * 16); i++)
1486: sc->sc_bncache[(sc->sc_pkh_bnsw[midx].bn_off * 16) + i] = 0;
1487: for (i = 0; i < ((bits + 7) / 8); i++)
1488: sc->sc_bncache[(sc->sc_pkh_bnsw[midx].bn_off * 16) +
1489: (digits * 16) - 1 - i] = krp->krp_param[2].crp_p[i];
1490: bus_dmamap_sync(sc->sc_dmat, sc->sc_bnmap,
1491: sc->sc_pkh_bnsw[midx].bn_off * 16, digits * 16,
1492: BUS_DMASYNC_PREWRITE);
1493: if (++wp == NOCT_PKH_ENTRIES)
1494: wp = 0;
1495:
1496: /* Store RMOD(m) -> location tmp1 */
1497: rmodidx = wp;
1498: sc->sc_pkh_bnsw[rmodidx].bn_siz = sc->sc_pkh_bnsw[midx].bn_siz;
1499: if (extent_alloc(sc->sc_pkh_bn, sc->sc_pkh_bnsw[rmodidx].bn_siz,
1500: EX_NOALIGN, 0, EX_NOBOUNDARY, EX_NOWAIT,
1501: &sc->sc_pkh_bnsw[rmodidx].bn_off)) {
1502: err = ENOMEM;
1503: goto errout_m;
1504: }
1505: cmd = &sc->sc_pkhcmd[rmodidx];
1506: cmd->arith.op = htole32(PKH_OP_CODE_RMOD);
1507: cmd->arith.r = htole32(sc->sc_pkh_bnsw[rmodidx].bn_off);
1508: cmd->arith.m = htole32(sc->sc_pkh_bnsw[midx].bn_off |
1509: (sc->sc_pkh_bnsw[midx].bn_siz << 16));
1510: cmd->arith.a = cmd->arith.b = cmd->arith.c = cmd->arith.unused[0] =
1511: cmd->arith.unused[1] = 0;
1512: bus_dmamap_sync(sc->sc_dmat, sc->sc_pkhmap,
1513: rmodidx * sizeof(union noct_pkh_cmd), sizeof(union noct_pkh_cmd),
1514: BUS_DMASYNC_PREWRITE);
1515: if (++wp == NOCT_PKH_ENTRIES)
1516: wp = 0;
1517:
1518: /* Load A XXX deal with A < M padding ... */
1519: aidx = wp = sc->sc_pkhwp;
1520: bits = noct_ksigbits(&krp->krp_param[0]);
1521: if (bits > 4096 || bits > mbits) {
1522: err = ERANGE;
1523: goto errout_rmod;
1524: }
1525: sc->sc_pkh_bnsw[aidx].bn_siz = (bits + 127) / 128;
1526: if (extent_alloc(sc->sc_pkh_bn, sc->sc_pkh_bnsw[aidx].bn_siz,
1527: EX_NOALIGN, 0, EX_NOBOUNDARY, EX_NOWAIT,
1528: &sc->sc_pkh_bnsw[aidx].bn_off)) {
1529: err = ENOMEM;
1530: goto errout_rmod;
1531: }
1532: cmd = &sc->sc_pkhcmd[aidx];
1533: cmd->cache.op = htole32(PKH_OP_CODE_LOAD);
1534: cmd->cache.r = htole32(sc->sc_pkh_bnsw[aidx].bn_off);
1535: adr = sc->sc_bnmap->dm_segs[0].ds_addr +
1536: (sc->sc_pkh_bnsw[aidx].bn_off * 16);
1537: cmd->cache.addrhi = htole32((adr >> 32) & 0xffffffff);
1538: cmd->cache.addrlo = htole32((adr >> 0 ) & 0xffffffff);
1539: cmd->cache.len = htole32(sc->sc_pkh_bnsw[aidx].bn_siz);
1540: cmd->cache.unused[0] = cmd->cache.unused[1] = cmd->cache.unused[2] = 0;
1541: bus_dmamap_sync(sc->sc_dmat, sc->sc_pkhmap,
1542: aidx * sizeof(union noct_pkh_cmd), sizeof(union noct_pkh_cmd),
1543: BUS_DMASYNC_PREWRITE);
1544: for (i = 0; i < (digits * 16); i++)
1545: sc->sc_bncache[(sc->sc_pkh_bnsw[aidx].bn_off * 16) + i] = 0;
1546: for (i = 0; i < ((bits + 7) / 8); i++)
1547: sc->sc_bncache[(sc->sc_pkh_bnsw[aidx].bn_off * 16) +
1548: (digits * 16) - 1 - i] = krp->krp_param[2].crp_p[i];
1549: bus_dmamap_sync(sc->sc_dmat, sc->sc_bnmap,
1550: sc->sc_pkh_bnsw[aidx].bn_off * 16, digits * 16,
1551: BUS_DMASYNC_PREWRITE);
1552: if (++wp == NOCT_PKH_ENTRIES)
1553: wp = 0;
1554:
1555: /* Compute (A * tmp1) mod m -> A */
1556: mmulidx = wp;
1557: sc->sc_pkh_bnsw[mmulidx].bn_siz = 0;
1558: sc->sc_pkh_bnsw[mmulidx].bn_off = 0;
1559: cmd = &sc->sc_pkhcmd[mmulidx];
1560: cmd->arith.op = htole32(PKH_OP_CODE_MUL);
1561: cmd->arith.r = htole32(sc->sc_pkh_bnsw[aidx].bn_off);
1562: cmd->arith.m = htole32(sc->sc_pkh_bnsw[midx].bn_off |
1563: (sc->sc_pkh_bnsw[midx].bn_siz << 16));
1564: cmd->arith.a = htole32(sc->sc_pkh_bnsw[aidx].bn_off |
1565: (sc->sc_pkh_bnsw[aidx].bn_siz << 16));
1566: cmd->arith.b = htole32(sc->sc_pkh_bnsw[rmodidx].bn_off |
1567: (sc->sc_pkh_bnsw[rmodidx].bn_siz << 16));
1568: cmd->arith.c = cmd->arith.unused[0] = cmd->arith.unused[1] = 0;
1569: bus_dmamap_sync(sc->sc_dmat, sc->sc_pkhmap,
1570: rmodidx * sizeof(union noct_pkh_cmd), sizeof(union noct_pkh_cmd),
1571: BUS_DMASYNC_PREWRITE);
1572: if (++wp == NOCT_PKH_ENTRIES)
1573: wp = 0;
1574:
1575: /* Load B */
1576: bidx = wp = sc->sc_pkhwp;
1577: bits = noct_ksigbits(&krp->krp_param[1]);
1578: if (bits > 4096) {
1579: err = ERANGE;
1580: goto errout_a;
1581: }
1582: sc->sc_pkh_bnsw[bidx].bn_siz = (bits + 127) / 128;
1583: if (extent_alloc(sc->sc_pkh_bn, sc->sc_pkh_bnsw[bidx].bn_siz,
1584: EX_NOALIGN, 0, EX_NOBOUNDARY, EX_NOWAIT,
1585: &sc->sc_pkh_bnsw[bidx].bn_off)) {
1586: err = ENOMEM;
1587: goto errout_a;
1588: }
1589: cmd = &sc->sc_pkhcmd[bidx];
1590: cmd->cache.op = htole32(PKH_OP_CODE_LOAD);
1591: cmd->cache.r = htole32(sc->sc_pkh_bnsw[bidx].bn_off);
1592: adr = sc->sc_bnmap->dm_segs[0].ds_addr +
1593: (sc->sc_pkh_bnsw[bidx].bn_off * 16);
1594: cmd->cache.addrhi = htole32((adr >> 32) & 0xffffffff);
1595: cmd->cache.addrlo = htole32((adr >> 0 ) & 0xffffffff);
1596: cmd->cache.len = htole32(sc->sc_pkh_bnsw[bidx].bn_siz);
1597: cmd->cache.unused[0] = cmd->cache.unused[1] = cmd->cache.unused[2] = 0;
1598: bus_dmamap_sync(sc->sc_dmat, sc->sc_pkhmap,
1599: bidx * sizeof(union noct_pkh_cmd), sizeof(union noct_pkh_cmd),
1600: BUS_DMASYNC_PREWRITE);
1601: for (i = 0; i < (digits * 16); i++)
1602: sc->sc_bncache[(sc->sc_pkh_bnsw[bidx].bn_off * 16) + i] = 0;
1603: for (i = 0; i < ((bits + 7) / 8); i++)
1604: sc->sc_bncache[(sc->sc_pkh_bnsw[bidx].bn_off * 16) +
1605: (digits * 16) - 1 - i] = krp->krp_param[2].crp_p[i];
1606: bus_dmamap_sync(sc->sc_dmat, sc->sc_bnmap,
1607: sc->sc_pkh_bnsw[bidx].bn_off * 16, digits * 16,
1608: BUS_DMASYNC_PREWRITE);
1609: if (++wp == NOCT_PKH_ENTRIES)
1610: wp = 0;
1611:
1612: NOCT_WRITE_4(sc, NOCT_PKH_Q_PTR, wp);
1613: sc->sc_pkhwp = wp;
1614:
1615: splx(s);
1616:
1617: return (0);
1618:
1619: errout_a:
1620: extent_free(sc->sc_pkh_bn, sc->sc_pkh_bnsw[aidx].bn_off,
1621: sc->sc_pkh_bnsw[aidx].bn_siz, EX_NOWAIT);
1622: errout_rmod:
1623: extent_free(sc->sc_pkh_bn, sc->sc_pkh_bnsw[rmodidx].bn_off,
1624: sc->sc_pkh_bnsw[rmodidx].bn_siz, EX_NOWAIT);
1625: errout_m:
1626: extent_free(sc->sc_pkh_bn, sc->sc_pkh_bnsw[midx].bn_off,
1627: sc->sc_pkh_bnsw[midx].bn_siz, EX_NOWAIT);
1628: errout:
1629: splx(s);
1630: krp->krp_status = err;
1631: crypto_kdone(krp);
1632: return (1);
1633: }
1634:
1635: void
1636: noct_pkh_freedesc(sc, idx)
1637: struct noct_softc *sc;
1638: int idx;
1639: {
1640: if (sc->sc_pkh_bnsw[idx].bn_callback != NULL)
1641: (*sc->sc_pkh_bnsw[idx].bn_callback)(sc, idx, 0);
1642: }
1643:
1644: /*
1645: * Return the number of significant bits of a big number.
1646: */
1647: int
1648: noct_ksigbits(cr)
1649: struct crparam *cr;
1650: {
1651: u_int plen = (cr->crp_nbits + 7) / 8;
1652: int i, sig = plen * 8;
1653: u_int8_t c, *p = cr->crp_p;
1654:
1655: for (i = plen - 1; i >= 0; i--) {
1656: c = p[i];
1657: if (c != 0) {
1658: while ((c & 0x80) == 0) {
1659: sig--;
1660: c <<= 1;
1661: }
1662: break;
1663: }
1664: sig -= 8;
1665: }
1666: return (sig);
1667: }
1668:
1669: int
1670: noct_kload(sc, cr, wp)
1671: struct noct_softc *sc;
1672: struct crparam *cr;
1673: u_int32_t wp;
1674: {
1675: u_int64_t adr;
1676: union noct_pkh_cmd *cmd;
1677: u_long off;
1678: int bits, digits, i;
1679: u_int32_t wpnext;
1680:
1681: wpnext = wp + 1;
1682: if (wpnext == NOCT_PKH_ENTRIES)
1683: wpnext = 0;
1684: if (wpnext == sc->sc_pkhrp)
1685: return (ENOMEM);
1686:
1687: bits = noct_ksigbits(cr);
1688: if (bits > 4096)
1689: return (E2BIG);
1690:
1691: digits = (bits + 127) / 128;
1692:
1693: if (extent_alloc(sc->sc_pkh_bn, digits, EX_NOALIGN, 0, EX_NOBOUNDARY,
1694: EX_NOWAIT, &off))
1695: return (ENOMEM);
1696:
1697: cmd = &sc->sc_pkhcmd[wp];
1698: cmd->cache.op = htole32(PKH_OP_CODE_LOAD);
1699: cmd->cache.r = htole32(off);
1700: adr = sc->sc_bnmap->dm_segs[0].ds_addr + (off * 16);
1701: cmd->cache.addrhi = htole32((adr >> 32) & 0xffffffff);
1702: cmd->cache.addrlo = htole32((adr >> 0 ) & 0xffffffff);
1703: cmd->cache.len = htole32(digits * 16);
1704: cmd->cache.unused[0] = cmd->cache.unused[1] = cmd->cache.unused[2] = 0;
1705: bus_dmamap_sync(sc->sc_dmat, sc->sc_pkhmap,
1706: wp * sizeof(union noct_pkh_cmd), sizeof(union noct_pkh_cmd),
1707: BUS_DMASYNC_PREWRITE);
1708:
1709: for (i = 0; i < (digits * 16); i++)
1710: sc->sc_bncache[(off * 16) + i] = 0;
1711: for (i = 0; i < ((bits + 7) / 8); i++)
1712: sc->sc_bncache[(off * 16) + (digits * 16) - 1 - i] =
1713: cr->crp_p[i];
1714: bus_dmamap_sync(sc->sc_dmat, sc->sc_bnmap, off * 16, digits * 16,
1715: BUS_DMASYNC_PREWRITE);
1716:
1717: sc->sc_pkh_bnsw[wp].bn_off = off;
1718: sc->sc_pkh_bnsw[wp].bn_siz = digits;
1719: sc->sc_pkh_bnsw[wp].bn_callback = noct_kload_cb;
1720: return (0);
1721: }
1722:
1723: void
1724: noct_kload_cb(sc, wp, err)
1725: struct noct_softc *sc;
1726: u_int32_t wp;
1727: int err;
1728: {
1729: struct noct_bnc_sw *sw = &sc->sc_pkh_bnsw[wp];
1730:
1731: extent_free(sc->sc_pkh_bn, sw->bn_off, sw->bn_siz, EX_NOWAIT);
1732: bzero(&sc->sc_bncache[sw->bn_off * 16], sw->bn_siz * 16);
1733: }
1734:
1735: void
1736: noct_modmul_cb(sc, wp, err)
1737: struct noct_softc *sc;
1738: u_int32_t wp;
1739: int err;
1740: {
1741: struct noct_bnc_sw *sw = &sc->sc_pkh_bnsw[wp];
1742: struct cryptkop *krp = sw->bn_krp;
1743: int i, j;
1744:
1745: if (err)
1746: goto out;
1747:
1748: i = (sw->bn_off * 16) + (sw->bn_siz * 16) - 1;
1749: for (j = 0; j < (krp->krp_param[3].crp_nbits + 7) / 8; j++) {
1750: krp->krp_param[3].crp_p[j] = sc->sc_bncache[i];
1751: i--;
1752: }
1753:
1754: out:
1755: extent_free(sc->sc_pkh_bn, sw->bn_off, sw->bn_siz, EX_NOWAIT);
1756: bzero(&sc->sc_bncache[sw->bn_off * 16], sw->bn_siz * 16);
1757: krp->krp_status = err;
1758: crypto_kdone(krp);
1759: }
1760:
1761: static const u_int8_t noct_odd_parity[] = {
1762: 0x01, 0x01, 0x02, 0x02, 0x04, 0x04, 0x07, 0x07,
1763: 0x08, 0x08, 0x0b, 0x0b, 0x0d, 0x0d, 0x0e, 0x0e,
1764: 0x10, 0x10, 0x13, 0x13, 0x15, 0x15, 0x16, 0x16,
1765: 0x19, 0x19, 0x1a, 0x1a, 0x1c, 0x1c, 0x1f, 0x1f,
1766: 0x20, 0x20, 0x23, 0x23, 0x25, 0x25, 0x26, 0x26,
1767: 0x29, 0x29, 0x2a, 0x2a, 0x2c, 0x2c, 0x2f, 0x2f,
1768: 0x31, 0x31, 0x32, 0x32, 0x34, 0x34, 0x37, 0x37,
1769: 0x38, 0x38, 0x3b, 0x3b, 0x3d, 0x3d, 0x3e, 0x3e,
1770: 0x40, 0x40, 0x43, 0x43, 0x45, 0x45, 0x46, 0x46,
1771: 0x49, 0x49, 0x4a, 0x4a, 0x4c, 0x4c, 0x4f, 0x4f,
1772: 0x51, 0x51, 0x52, 0x52, 0x54, 0x54, 0x57, 0x57,
1773: 0x58, 0x58, 0x5b, 0x5b, 0x5d, 0x5d, 0x5e, 0x5e,
1774: 0x61, 0x61, 0x62, 0x62, 0x64, 0x64, 0x67, 0x67,
1775: 0x68, 0x68, 0x6b, 0x6b, 0x6d, 0x6d, 0x6e, 0x6e,
1776: 0x70, 0x70, 0x73, 0x73, 0x75, 0x75, 0x76, 0x76,
1777: 0x79, 0x79, 0x7a, 0x7a, 0x7c, 0x7c, 0x7f, 0x7f,
1778: 0x80, 0x80, 0x83, 0x83, 0x85, 0x85, 0x86, 0x86,
1779: 0x89, 0x89, 0x8a, 0x8a, 0x8c, 0x8c, 0x8f, 0x8f,
1780: 0x91, 0x91, 0x92, 0x92, 0x94, 0x94, 0x97, 0x97,
1781: 0x98, 0x98, 0x9b, 0x9b, 0x9d, 0x9d, 0x9e, 0x9e,
1782: 0xa1, 0xa1, 0xa2, 0xa2, 0xa4, 0xa4, 0xa7, 0xa7,
1783: 0xa8, 0xa8, 0xab, 0xab, 0xad, 0xad, 0xae, 0xae,
1784: 0xb0, 0xb0, 0xb3, 0xb3, 0xb5, 0xb5, 0xb6, 0xb6,
1785: 0xb9, 0xb9, 0xba, 0xba, 0xbc, 0xbc, 0xbf, 0xbf,
1786: 0xc1, 0xc1, 0xc2, 0xc2, 0xc4, 0xc4, 0xc7, 0xc7,
1787: 0xc8, 0xc8, 0xcb, 0xcb, 0xcd, 0xcd, 0xce, 0xce,
1788: 0xd0, 0xd0, 0xd3, 0xd3, 0xd5, 0xd5, 0xd6, 0xd6,
1789: 0xd9, 0xd9, 0xda, 0xda, 0xdc, 0xdc, 0xdf, 0xdf,
1790: 0xe0, 0xe0, 0xe3, 0xe3, 0xe5, 0xe5, 0xe6, 0xe6,
1791: 0xe9, 0xe9, 0xea, 0xea, 0xec, 0xec, 0xef, 0xef,
1792: 0xf1, 0xf1, 0xf2, 0xf2, 0xf4, 0xf4, 0xf7, 0xf7,
1793: 0xf8, 0xf8, 0xfb, 0xfb, 0xfd, 0xfd, 0xfe, 0xfe,
1794: };
1795:
1796: int
1797: noct_newsession(sidp, cri)
1798: u_int32_t *sidp;
1799: struct cryptoini *cri;
1800: {
1801: struct noct_softc *sc;
1802: int i;
1803:
1804: for (i = 0; i < noct_cd.cd_ndevs; i++) {
1805: sc = noct_cd.cd_devs[i];
1806: if (sc == NULL || sc->sc_cid == (*sidp))
1807: break;
1808: }
1809: if (sc == NULL)
1810: return (EINVAL);
1811:
1812: /* XXX Can only handle single operations */
1813: if (cri->cri_next != NULL)
1814: return (EINVAL);
1815:
1816: if (cri->cri_alg == CRYPTO_DES_CBC || cri->cri_alg == CRYPTO_3DES_CBC) {
1817: u_int8_t key[24];
1818:
1819: if (cri->cri_alg == CRYPTO_DES_CBC) {
1820: if (cri->cri_klen != 64)
1821: return (EINVAL);
1822: for (i = 0; i < 8; i++)
1823: key[i] = key[i + 8] = key[i + 16] =
1824: cri->cri_key[i];
1825: } else {
1826: if (cri->cri_klen != 192)
1827: return (EINVAL);
1828: for (i = 0; i < 24; i++)
1829: key[i] = cri->cri_key[i];
1830: }
1831:
1832: /* Verify key parity */
1833: for (i = 0; i < 24; i++)
1834: if (key[i] != noct_odd_parity[key[i]])
1835: return (ENOEXEC);
1836: }
1837:
1838: *sidp = NOCT_SID(sc->sc_dv.dv_unit, 0);
1839: return (0);
1840: }
1841:
1842: int
1843: noct_freesession(tid)
1844: u_int64_t tid;
1845: {
1846: int card;
1847: u_int32_t sid = ((u_int32_t)tid) & 0xffffffff;
1848:
1849: card = NOCT_CARD(sid);
1850: if (card >= noct_cd.cd_ndevs || noct_cd.cd_devs[card] == NULL)
1851: return (EINVAL);
1852: return (0);
1853: }
1854:
1855: int
1856: noct_process(crp)
1857: struct cryptop *crp;
1858: {
1859: struct noct_softc *sc;
1860: struct noct_workq *q = NULL;
1861: int card, err, s;
1862:
1863: if (crp == NULL || crp->crp_callback == NULL)
1864: return (EINVAL);
1865:
1866: card = NOCT_CARD(crp->crp_sid);
1867: if (card >= noct_cd.cd_ndevs || noct_cd.cd_devs[card] == NULL)
1868: return (EINVAL);
1869: sc = noct_cd.cd_devs[card];
1870:
1871: q = (struct noct_workq *)malloc(sizeof(struct noct_workq),
1872: M_DEVBUF, M_NOWAIT);
1873: if (q == NULL) {
1874: err = ENOMEM;
1875: goto errout;
1876: }
1877: q->q_crp = crp;
1878:
1879: s = splnet();
1880: SIMPLEQ_INSERT_TAIL(&sc->sc_inq, q, q_next);
1881: splx(s);
1882: NOCT_WAKEUP(sc);
1883: return (0);
1884:
1885: errout:
1886: if (q != NULL)
1887: free(q, M_DEVBUF);
1888: crp->crp_etype = err;
1889: crypto_done(crp);
1890: return (0);
1891: }
CVSweb