2 * Linux port done by David McCullough <david_mccullough@mcafee.com>
3 * Copyright (C) 2004-2010 David McCullough
4 * The license and original author are listed below.
6 * Copyright (c) 2003 Sam Leffler, Errno Consulting
7 * Copyright (c) 2003 Global Technology Associates, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 __FBSDID("$FreeBSD: src/sys/dev/safe/safe.c,v 1.18 2007/03/21 03:42:50 sam Exp $");
34 #include <linux/version.h>
35 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
36 #include <linux/config.h>
38 #include <linux/module.h>
39 #include <linux/kernel.h>
40 #include <linux/init.h>
41 #include <linux/list.h>
42 #include <linux/slab.h>
43 #include <linux/wait.h>
44 #include <linux/sched.h>
45 #include <linux/pci.h>
46 #include <linux/delay.h>
47 #include <linux/interrupt.h>
48 #include <linux/spinlock.h>
49 #include <linux/random.h>
50 #include <linux/skbuff.h>
54 * SafeNet SafeXcel-1141 hardware crypto accelerator
57 #include <cryptodev.h>
59 #include <safe/safereg.h>
60 #include <safe/safevar.h>
63 #define DPRINTF(a) do { \
66 device_get_nameunit(sc->sc_dev) : "safe"); \
75 * until we find a cleaner way, include the BSD md5/sha1 code
80 #include <safe/hmachack.h>
83 #include <safe/sha1.h>
84 #include <safe/sha1.c>
85 #endif /* HMAC_HACK */
87 /* add proc entry for this */
88 struct safe_stats safestats;
90 #define debug safe_debug
92 module_param(safe_debug, int, 0644);
93 MODULE_PARM_DESC(safe_debug, "Enable debug");
95 static void safe_callback(struct safe_softc *, struct safe_ringentry *);
96 static void safe_feed(struct safe_softc *, struct safe_ringentry *);
97 #if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
98 static void safe_rng_init(struct safe_softc *);
99 int safe_rngbufsize = 8; /* 32 bytes each read */
100 module_param(safe_rngbufsize, int, 0644);
101 MODULE_PARM_DESC(safe_rngbufsize, "RNG polling buffer size (32-bit words)");
102 int safe_rngmaxalarm = 8; /* max alarms before reset */
103 module_param(safe_rngmaxalarm, int, 0644);
104 MODULE_PARM_DESC(safe_rngmaxalarm, "RNG max alarms before reset");
105 #endif /* SAFE_NO_RNG */
107 static void safe_totalreset(struct safe_softc *sc);
108 static int safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op);
109 static int safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op);
110 static int safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re);
111 static int safe_kprocess(device_t dev, struct cryptkop *krp, int hint);
112 static int safe_kstart(struct safe_softc *sc);
113 static int safe_ksigbits(struct safe_softc *sc, struct crparam *cr);
114 static void safe_kfeed(struct safe_softc *sc);
115 static void safe_kpoll(unsigned long arg);
116 static void safe_kload_reg(struct safe_softc *sc, u_int32_t off,
117 u_int32_t len, struct crparam *n);
119 static int safe_newsession(device_t, u_int32_t *, struct cryptoini *);
120 static int safe_freesession(device_t, u_int64_t);
121 static int safe_process(device_t, struct cryptop *, int);
123 static device_method_t safe_methods = {
124 /* crypto device methods */
125 DEVMETHOD(cryptodev_newsession, safe_newsession),
126 DEVMETHOD(cryptodev_freesession,safe_freesession),
127 DEVMETHOD(cryptodev_process, safe_process),
128 DEVMETHOD(cryptodev_kprocess, safe_kprocess),
131 #define READ_REG(sc,r) readl((sc)->sc_base_addr + (r))
132 #define WRITE_REG(sc,r,val) writel((val), (sc)->sc_base_addr + (r))
134 #define SAFE_MAX_CHIPS 8
135 static struct safe_softc *safe_chip_idx[SAFE_MAX_CHIPS];
138 * split our buffers up into safe DMAable byte fragments to avoid lockup
139 * bug in 1141 HW on rev 1.0.
144 struct safe_softc *sc,
145 struct safe_operand *buf,
150 int chunk, tlen = len;
152 tmp = pci_map_single(sc->sc_pcidev, addr, len, PCI_DMA_BIDIRECTIONAL);
156 chunk = (len > sc->sc_max_dsize) ? sc->sc_max_dsize : len;
157 buf->segs[buf->nsegs].ds_addr = tmp;
158 buf->segs[buf->nsegs].ds_len = chunk;
159 buf->segs[buf->nsegs].ds_tlen = tlen;
169 * map in a given uio buffer (great on some arches :-)
173 pci_map_uio(struct safe_softc *sc, struct safe_operand *buf, struct uio *uio)
175 struct iovec *iov = uio->uio_iov;
178 DPRINTF(("%s()\n", __FUNCTION__));
183 for (n = 0; n < uio->uio_iovcnt; n++) {
184 pci_map_linear(sc, buf, iov->iov_base, iov->iov_len);
188 /* identify this buffer by the first segment */
189 buf->map = (void *) buf->segs[0].ds_addr;
194 * map in a given sk_buff
198 pci_map_skb(struct safe_softc *sc,struct safe_operand *buf,struct sk_buff *skb)
202 DPRINTF(("%s()\n", __FUNCTION__));
207 pci_map_linear(sc, buf, skb->data, skb_headlen(skb));
209 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
210 pci_map_linear(sc, buf,
211 page_address(skb_shinfo(skb)->frags[i].page) +
212 skb_shinfo(skb)->frags[i].page_offset,
213 skb_shinfo(skb)->frags[i].size);
216 /* identify this buffer by the first segment */
217 buf->map = (void *) buf->segs[0].ds_addr;
222 #if 0 /* not needed at this time */
224 pci_sync_operand(struct safe_softc *sc, struct safe_operand *buf)
228 DPRINTF(("%s()\n", __FUNCTION__));
229 for (i = 0; i < buf->nsegs; i++)
230 pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
231 buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
236 pci_unmap_operand(struct safe_softc *sc, struct safe_operand *buf)
239 DPRINTF(("%s()\n", __FUNCTION__));
240 for (i = 0; i < buf->nsegs; i++) {
241 if (buf->segs[i].ds_tlen) {
242 DPRINTF(("%s - unmap %d 0x%x %d\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
243 pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
244 buf->segs[i].ds_tlen, PCI_DMA_BIDIRECTIONAL);
245 DPRINTF(("%s - unmap %d 0x%x %d done\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
247 buf->segs[i].ds_addr = 0;
248 buf->segs[i].ds_len = 0;
249 buf->segs[i].ds_tlen = 0;
258 * SafeXcel Interrupt routine
261 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
262 safe_intr(int irq, void *arg)
264 safe_intr(int irq, void *arg, struct pt_regs *regs)
267 struct safe_softc *sc = arg;
271 stat = READ_REG(sc, SAFE_HM_STAT);
273 DPRINTF(("%s(stat=0x%x)\n", __FUNCTION__, stat));
275 if (stat == 0) /* shared irq, not for us */
278 WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */
280 if ((stat & SAFE_INT_PE_DDONE)) {
282 * Descriptor(s) done; scan the ring and
283 * process completed operations.
285 spin_lock_irqsave(&sc->sc_ringmtx, flags);
286 while (sc->sc_back != sc->sc_front) {
287 struct safe_ringentry *re = sc->sc_back;
291 safe_dump_ringstate(sc, __func__);
292 safe_dump_request(sc, __func__, re);
296 * safe_process marks ring entries that were allocated
297 * but not used with a csr of zero. This insures the
298 * ring front pointer never needs to be set backwards
299 * in the event that an entry is allocated but not used
300 * because of a setup error.
302 DPRINTF(("%s re->re_desc.d_csr=0x%x\n", __FUNCTION__, re->re_desc.d_csr));
303 if (re->re_desc.d_csr != 0) {
304 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) {
305 DPRINTF(("%s !CSR_IS_DONE\n", __FUNCTION__));
308 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) {
309 DPRINTF(("%s !LEN_IS_DONE\n", __FUNCTION__));
313 safe_callback(sc, re);
315 if (++(sc->sc_back) == sc->sc_ringtop)
316 sc->sc_back = sc->sc_ring;
318 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
322 * Check to see if we got any DMA Error
324 if (stat & SAFE_INT_PE_ERROR) {
325 printk("%s: dmaerr dmastat %08x\n", device_get_nameunit(sc->sc_dev),
326 (int)READ_REG(sc, SAFE_PE_DMASTAT));
327 safestats.st_dmaerr++;
334 if (sc->sc_needwakeup) { /* XXX check high watermark */
335 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
336 DPRINTF(("%s: wakeup crypto %x\n", __func__,
338 sc->sc_needwakeup &= ~wakeup;
339 crypto_unblock(sc->sc_cid, wakeup);
346 * safe_feed() - post a request to chip
349 safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
351 DPRINTF(("%s()\n", __FUNCTION__));
354 safe_dump_ringstate(sc, __func__);
355 safe_dump_request(sc, __func__, re);
359 if (sc->sc_nqchip > safestats.st_maxqchip)
360 safestats.st_maxqchip = sc->sc_nqchip;
361 /* poke h/w to check descriptor ring, any value can be written */
362 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
365 #define N(a) (sizeof(a) / sizeof (a[0]))
367 safe_setup_enckey(struct safe_session *ses, caddr_t key)
371 bcopy(key, ses->ses_key, ses->ses_klen / 8);
373 /* PE is little-endian, insure proper byte order */
374 for (i = 0; i < N(ses->ses_key); i++)
375 ses->ses_key[i] = htole32(ses->ses_key[i]);
379 safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen)
387 for (i = 0; i < klen; i++)
388 key[i] ^= HMAC_IPAD_VAL;
390 if (algo == CRYPTO_MD5_HMAC) {
392 MD5Update(&md5ctx, key, klen);
393 MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
394 bcopy(md5ctx.md5_st8, ses->ses_hminner, sizeof(md5ctx.md5_st8));
397 SHA1Update(&sha1ctx, key, klen);
398 SHA1Update(&sha1ctx, hmac_ipad_buffer,
399 SHA1_HMAC_BLOCK_LEN - klen);
400 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
403 for (i = 0; i < klen; i++)
404 key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
406 if (algo == CRYPTO_MD5_HMAC) {
408 MD5Update(&md5ctx, key, klen);
409 MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
410 bcopy(md5ctx.md5_st8, ses->ses_hmouter, sizeof(md5ctx.md5_st8));
413 SHA1Update(&sha1ctx, key, klen);
414 SHA1Update(&sha1ctx, hmac_opad_buffer,
415 SHA1_HMAC_BLOCK_LEN - klen);
416 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
419 for (i = 0; i < klen; i++)
420 key[i] ^= HMAC_OPAD_VAL;
424 * this code prevents SHA working on a BE host,
425 * so it is obviously wrong. I think the byte
426 * swap setup we do with the chip fixes this for us
429 /* PE is little-endian, insure proper byte order */
430 for (i = 0; i < N(ses->ses_hminner); i++) {
431 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
432 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
435 #else /* HMAC_HACK */
436 printk("safe: md5/sha not implemented\n");
437 #endif /* HMAC_HACK */
442 * Allocate a new 'session' and return an encoded session id. 'sidp'
443 * contains our registration id, and should contain an encoded session
444 * id on successful allocation.
447 safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
449 struct safe_softc *sc = device_get_softc(dev);
450 struct cryptoini *c, *encini = NULL, *macini = NULL;
451 struct safe_session *ses = NULL;
454 DPRINTF(("%s()\n", __FUNCTION__));
456 if (sidp == NULL || cri == NULL || sc == NULL)
459 for (c = cri; c != NULL; c = c->cri_next) {
460 if (c->cri_alg == CRYPTO_MD5_HMAC ||
461 c->cri_alg == CRYPTO_SHA1_HMAC ||
462 c->cri_alg == CRYPTO_NULL_HMAC) {
466 } else if (c->cri_alg == CRYPTO_DES_CBC ||
467 c->cri_alg == CRYPTO_3DES_CBC ||
468 c->cri_alg == CRYPTO_AES_CBC ||
469 c->cri_alg == CRYPTO_NULL_CBC) {
476 if (encini == NULL && macini == NULL)
478 if (encini) { /* validate key length */
479 switch (encini->cri_alg) {
481 if (encini->cri_klen != 64)
484 case CRYPTO_3DES_CBC:
485 if (encini->cri_klen != 192)
489 if (encini->cri_klen != 128 &&
490 encini->cri_klen != 192 &&
491 encini->cri_klen != 256)
497 if (sc->sc_sessions == NULL) {
498 ses = sc->sc_sessions = (struct safe_session *)
499 kmalloc(sizeof(struct safe_session), SLAB_ATOMIC);
502 memset(ses, 0, sizeof(struct safe_session));
504 sc->sc_nsessions = 1;
506 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
507 if (sc->sc_sessions[sesn].ses_used == 0) {
508 ses = &sc->sc_sessions[sesn];
514 sesn = sc->sc_nsessions;
515 ses = (struct safe_session *)
516 kmalloc((sesn + 1) * sizeof(struct safe_session), SLAB_ATOMIC);
519 memset(ses, 0, (sesn + 1) * sizeof(struct safe_session));
520 bcopy(sc->sc_sessions, ses, sesn *
521 sizeof(struct safe_session));
522 bzero(sc->sc_sessions, sesn *
523 sizeof(struct safe_session));
524 kfree(sc->sc_sessions);
525 sc->sc_sessions = ses;
526 ses = &sc->sc_sessions[sesn];
531 bzero(ses, sizeof(struct safe_session));
535 ses->ses_klen = encini->cri_klen;
536 if (encini->cri_key != NULL)
537 safe_setup_enckey(ses, encini->cri_key);
541 ses->ses_mlen = macini->cri_mlen;
542 if (ses->ses_mlen == 0) {
543 if (macini->cri_alg == CRYPTO_MD5_HMAC)
544 ses->ses_mlen = MD5_HASH_LEN;
546 ses->ses_mlen = SHA1_HASH_LEN;
549 if (macini->cri_key != NULL) {
550 safe_setup_mackey(ses, macini->cri_alg, macini->cri_key,
551 macini->cri_klen / 8);
555 *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn);
560 * Deallocate a session.
563 safe_freesession(device_t dev, u_int64_t tid)
565 struct safe_softc *sc = device_get_softc(dev);
567 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
569 DPRINTF(("%s()\n", __FUNCTION__));
574 session = SAFE_SESSION(sid);
575 if (session < sc->sc_nsessions) {
576 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
585 safe_process(device_t dev, struct cryptop *crp, int hint)
587 struct safe_softc *sc = device_get_softc(dev);
588 int err = 0, i, nicealign, uniform;
589 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
590 int bypass, oplen, ivsize;
593 struct safe_session *ses;
594 struct safe_ringentry *re;
595 struct safe_sarec *sa;
596 struct safe_pdesc *pd;
597 u_int32_t cmd0, cmd1, staterec, rand_iv[4];
600 DPRINTF(("%s()\n", __FUNCTION__));
602 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
603 safestats.st_invalid++;
606 if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
607 safestats.st_badsession++;
611 spin_lock_irqsave(&sc->sc_ringmtx, flags);
612 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
613 safestats.st_ringfull++;
614 sc->sc_needwakeup |= CRYPTO_SYMQ;
615 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
620 staterec = re->re_sa.sa_staterec; /* save */
621 /* NB: zero everything but the PE descriptor */
622 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
623 re->re_sa.sa_staterec = staterec; /* restore */
626 re->re_sesn = SAFE_SESSION(crp->crp_sid);
628 re->re_src.nsegs = 0;
629 re->re_dst.nsegs = 0;
631 if (crp->crp_flags & CRYPTO_F_SKBUF) {
632 re->re_src_skb = (struct sk_buff *)crp->crp_buf;
633 re->re_dst_skb = (struct sk_buff *)crp->crp_buf;
634 } else if (crp->crp_flags & CRYPTO_F_IOV) {
635 re->re_src_io = (struct uio *)crp->crp_buf;
636 re->re_dst_io = (struct uio *)crp->crp_buf;
638 safestats.st_badflags++;
640 goto errout; /* XXX we don't handle contiguous blocks! */
644 ses = &sc->sc_sessions[re->re_sesn];
646 crd1 = crp->crp_desc;
648 safestats.st_nodesc++;
652 crd2 = crd1->crd_next;
654 cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */
657 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
658 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
659 crd1->crd_alg == CRYPTO_NULL_HMAC) {
662 cmd0 |= SAFE_SA_CMD0_OP_HASH;
663 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
664 crd1->crd_alg == CRYPTO_3DES_CBC ||
665 crd1->crd_alg == CRYPTO_AES_CBC ||
666 crd1->crd_alg == CRYPTO_NULL_CBC) {
669 cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
671 safestats.st_badalg++;
676 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
677 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
678 crd1->crd_alg == CRYPTO_NULL_HMAC) &&
679 (crd2->crd_alg == CRYPTO_DES_CBC ||
680 crd2->crd_alg == CRYPTO_3DES_CBC ||
681 crd2->crd_alg == CRYPTO_AES_CBC ||
682 crd2->crd_alg == CRYPTO_NULL_CBC) &&
683 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
686 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
687 crd1->crd_alg == CRYPTO_3DES_CBC ||
688 crd1->crd_alg == CRYPTO_AES_CBC ||
689 crd1->crd_alg == CRYPTO_NULL_CBC) &&
690 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
691 crd2->crd_alg == CRYPTO_SHA1_HMAC ||
692 crd2->crd_alg == CRYPTO_NULL_HMAC) &&
693 (crd1->crd_flags & CRD_F_ENCRYPT)) {
697 safestats.st_badalg++;
701 cmd0 |= SAFE_SA_CMD0_OP_BOTH;
705 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
706 safe_setup_enckey(ses, enccrd->crd_key);
708 if (enccrd->crd_alg == CRYPTO_DES_CBC) {
709 cmd0 |= SAFE_SA_CMD0_DES;
710 cmd1 |= SAFE_SA_CMD1_CBC;
711 ivsize = 2*sizeof(u_int32_t);
712 } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
713 cmd0 |= SAFE_SA_CMD0_3DES;
714 cmd1 |= SAFE_SA_CMD1_CBC;
715 ivsize = 2*sizeof(u_int32_t);
716 } else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
717 cmd0 |= SAFE_SA_CMD0_AES;
718 cmd1 |= SAFE_SA_CMD1_CBC;
719 if (ses->ses_klen == 128)
720 cmd1 |= SAFE_SA_CMD1_AES128;
721 else if (ses->ses_klen == 192)
722 cmd1 |= SAFE_SA_CMD1_AES192;
724 cmd1 |= SAFE_SA_CMD1_AES256;
725 ivsize = 4*sizeof(u_int32_t);
727 cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
732 * Setup encrypt/decrypt state. When using basic ops
733 * we can't use an inline IV because hash/crypt offset
734 * must be from the end of the IV to the start of the
735 * crypt data and this leaves out the preceding header
736 * from the hash calculation. Instead we place the IV
737 * in the state record and set the hash/crypt offset to
738 * copy both the header+IV.
740 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
741 cmd0 |= SAFE_SA_CMD0_OUTBOUND;
743 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
746 read_random((iv = (caddr_t) &rand_iv[0]), sizeof(rand_iv));
747 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
748 crypto_copyback(crp->crp_flags, crp->crp_buf,
749 enccrd->crd_inject, ivsize, iv);
751 bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
753 for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
754 re->re_sastate.sa_saved_iv[i] =
755 cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
756 cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
757 re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
759 cmd0 |= SAFE_SA_CMD0_INBOUND;
761 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
762 bcopy(enccrd->crd_iv,
763 re->re_sastate.sa_saved_iv, ivsize);
765 crypto_copydata(crp->crp_flags, crp->crp_buf,
766 enccrd->crd_inject, ivsize,
767 (caddr_t)re->re_sastate.sa_saved_iv);
770 for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
771 re->re_sastate.sa_saved_iv[i] =
772 cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
773 cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
776 * For basic encryption use the zero pad algorithm.
777 * This pads results to an 8-byte boundary and
778 * suppresses padding verification for inbound (i.e.
779 * decrypt) operations.
781 * NB: Not sure if the 8-byte pad boundary is a problem.
783 cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
785 /* XXX assert key bufs have the same size */
786 bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
790 if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
791 safe_setup_mackey(ses, maccrd->crd_alg,
792 maccrd->crd_key, maccrd->crd_klen / 8);
795 if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
796 cmd0 |= SAFE_SA_CMD0_MD5;
797 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
798 } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
799 cmd0 |= SAFE_SA_CMD0_SHA1;
800 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
802 cmd0 |= SAFE_SA_CMD0_HASH_NULL;
805 * Digest data is loaded from the SA and the hash
806 * result is saved to the state block where we
807 * retrieve it for return to the caller.
809 /* XXX assert digest bufs have the same size */
810 bcopy(ses->ses_hminner, sa->sa_indigest,
811 sizeof(sa->sa_indigest));
812 bcopy(ses->ses_hmouter, sa->sa_outdigest,
813 sizeof(sa->sa_outdigest));
815 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
816 re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
819 if (enccrd && maccrd) {
821 * The offset from hash data to the start of
822 * crypt data is the difference in the skips.
824 bypass = maccrd->crd_skip;
825 coffset = enccrd->crd_skip - maccrd->crd_skip;
827 DPRINTF(("%s: hash does not precede crypt; "
828 "mac skip %u enc skip %u\n",
829 __func__, maccrd->crd_skip, enccrd->crd_skip));
830 safestats.st_skipmismatch++;
834 oplen = enccrd->crd_skip + enccrd->crd_len;
835 if (maccrd->crd_skip + maccrd->crd_len != oplen) {
836 DPRINTF(("%s: hash amount %u != crypt amount %u\n",
837 __func__, maccrd->crd_skip + maccrd->crd_len,
839 safestats.st_lenmismatch++;
845 printf("mac: skip %d, len %d, inject %d\n",
846 maccrd->crd_skip, maccrd->crd_len,
848 printf("enc: skip %d, len %d, inject %d\n",
849 enccrd->crd_skip, enccrd->crd_len,
851 printf("bypass %d coffset %d oplen %d\n",
852 bypass, coffset, oplen);
855 if (coffset & 3) { /* offset must be 32-bit aligned */
856 DPRINTF(("%s: coffset %u misaligned\n",
858 safestats.st_coffmisaligned++;
863 if (coffset > 255) { /* offset must be <256 dwords */
864 DPRINTF(("%s: coffset %u too big\n",
866 safestats.st_cofftoobig++;
871 * Tell the hardware to copy the header to the output.
872 * The header is defined as the data from the end of
873 * the bypass to the start of data to be encrypted.
874 * Typically this is the inline IV. Note that you need
875 * to do this even if src+dst are the same; it appears
876 * that w/o this bit the crypted data is written
877 * immediately after the bypass data.
879 cmd1 |= SAFE_SA_CMD1_HDRCOPY;
881 * Disable IP header mutable bit handling. This is
882 * needed to get correct HMAC calculations.
884 cmd1 |= SAFE_SA_CMD1_MUTABLE;
887 bypass = enccrd->crd_skip;
888 oplen = bypass + enccrd->crd_len;
890 bypass = maccrd->crd_skip;
891 oplen = bypass + maccrd->crd_len;
895 /* XXX verify multiple of 4 when using s/g */
896 if (bypass > 96) { /* bypass offset must be <= 96 bytes */
897 DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
898 safestats.st_bypasstoobig++;
903 if (crp->crp_flags & CRYPTO_F_SKBUF) {
904 if (pci_map_skb(sc, &re->re_src, re->re_src_skb)) {
905 safestats.st_noload++;
909 } else if (crp->crp_flags & CRYPTO_F_IOV) {
910 if (pci_map_uio(sc, &re->re_src, re->re_src_io)) {
911 safestats.st_noload++;
916 nicealign = safe_dmamap_aligned(sc, &re->re_src);
917 uniform = safe_dmamap_uniform(sc, &re->re_src);
919 DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
920 nicealign, uniform, re->re_src.nsegs));
921 if (re->re_src.nsegs > 1) {
922 re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
923 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
924 for (i = 0; i < re->re_src_nsegs; i++) {
925 /* NB: no need to check if there's space */
927 if (++(sc->sc_spfree) == sc->sc_springtop)
928 sc->sc_spfree = sc->sc_spring;
930 KASSERT((pd->pd_flags&3) == 0 ||
931 (pd->pd_flags&3) == SAFE_PD_DONE,
932 ("bogus source particle descriptor; flags %x",
934 pd->pd_addr = re->re_src_segs[i].ds_addr;
935 pd->pd_size = re->re_src_segs[i].ds_len;
936 pd->pd_flags = SAFE_PD_READY;
938 cmd0 |= SAFE_SA_CMD0_IGATHER;
941 * No need for gather, reference the operand directly.
943 re->re_desc.d_src = re->re_src_segs[0].ds_addr;
946 if (enccrd == NULL && maccrd != NULL) {
948 * Hash op; no destination needed.
951 if (crp->crp_flags & (CRYPTO_F_IOV|CRYPTO_F_SKBUF)) {
953 safestats.st_iovmisaligned++;
958 device_printf(sc->sc_dev, "!uniform source\n");
961 * There's no way to handle the DMA
962 * requirements with this uio. We
963 * could create a separate DMA area for
964 * the result and then copy it back,
965 * but for now we just bail and return
966 * an error. Note that uio requests
967 * > SAFE_MAX_DSIZE are handled because
968 * the DMA map and segment list for the
969 * destination wil result in a
970 * destination particle list that does
971 * the necessary scatter DMA.
973 safestats.st_iovnotuniform++;
978 re->re_dst = re->re_src;
980 safestats.st_badflags++;
985 if (re->re_dst.nsegs > 1) {
986 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
987 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
988 for (i = 0; i < re->re_dst_nsegs; i++) {
990 KASSERT((pd->pd_flags&3) == 0 ||
991 (pd->pd_flags&3) == SAFE_PD_DONE,
992 ("bogus dest particle descriptor; flags %x",
994 if (++(sc->sc_dpfree) == sc->sc_dpringtop)
995 sc->sc_dpfree = sc->sc_dpring;
996 pd->pd_addr = re->re_dst_segs[i].ds_addr;
997 pd->pd_flags = SAFE_PD_READY;
999 cmd0 |= SAFE_SA_CMD0_OSCATTER;
1002 * No need for scatter, reference the operand directly.
1004 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
1009 * All done with setup; fillin the SA command words
1010 * and the packet engine descriptor. The operation
1011 * is now ready for submission to the hardware.
1013 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
1015 | (coffset << SAFE_SA_CMD1_OFFSET_S)
1016 | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */
1017 | SAFE_SA_CMD1_SRPCI
1020 * NB: the order of writes is important here. In case the
1021 * chip is scanning the ring because of an outstanding request
1022 * it might nab this one too. In that case we need to make
1023 * sure the setup is complete before we write the length
1024 * field of the descriptor as it signals the descriptor is
1025 * ready for processing.
1027 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
1029 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
1031 re->re_desc.d_len = oplen
1033 | (bypass << SAFE_PE_LEN_BYPASS_S)
1036 safestats.st_ipackets++;
1037 safestats.st_ibytes += oplen;
1039 if (++(sc->sc_front) == sc->sc_ringtop)
1040 sc->sc_front = sc->sc_ring;
1042 /* XXX honor batching */
1044 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
1048 if (re->re_src.map != re->re_dst.map)
1049 pci_unmap_operand(sc, &re->re_dst);
1051 pci_unmap_operand(sc, &re->re_src);
1052 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
1053 if (err != ERESTART) {
1054 crp->crp_etype = err;
1057 sc->sc_needwakeup |= CRYPTO_SYMQ;
1063 safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
1065 struct cryptop *crp = (struct cryptop *)re->re_crp;
1066 struct cryptodesc *crd;
1068 DPRINTF(("%s()\n", __FUNCTION__));
1070 safestats.st_opackets++;
1071 safestats.st_obytes += re->re_dst.mapsize;
1073 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
1074 device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
1076 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
1077 safestats.st_peoperr++;
1078 crp->crp_etype = EIO; /* something more meaningful? */
1081 if (re->re_dst.map != NULL && re->re_dst.map != re->re_src.map)
1082 pci_unmap_operand(sc, &re->re_dst);
1083 pci_unmap_operand(sc, &re->re_src);
1086 * If result was written to a differet mbuf chain, swap
1087 * it in as the return value and reclaim the original.
1089 if ((crp->crp_flags & CRYPTO_F_SKBUF) && re->re_src_skb != re->re_dst_skb) {
1090 device_printf(sc->sc_dev, "no CRYPTO_F_SKBUF swapping support\n");
1091 /* kfree_skb(skb) */
1092 /* crp->crp_buf = (caddr_t)re->re_dst_skb */
1096 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
1097 /* copy out ICV result */
1098 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1099 if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
1100 crd->crd_alg == CRYPTO_SHA1_HMAC ||
1101 crd->crd_alg == CRYPTO_NULL_HMAC))
1103 if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
1105 * SHA-1 ICV's are byte-swapped; fix 'em up
1106 * before copy them to their destination.
1108 re->re_sastate.sa_saved_indigest[0] =
1109 cpu_to_be32(re->re_sastate.sa_saved_indigest[0]);
1110 re->re_sastate.sa_saved_indigest[1] =
1111 cpu_to_be32(re->re_sastate.sa_saved_indigest[1]);
1112 re->re_sastate.sa_saved_indigest[2] =
1113 cpu_to_be32(re->re_sastate.sa_saved_indigest[2]);
1115 re->re_sastate.sa_saved_indigest[0] =
1116 cpu_to_le32(re->re_sastate.sa_saved_indigest[0]);
1117 re->re_sastate.sa_saved_indigest[1] =
1118 cpu_to_le32(re->re_sastate.sa_saved_indigest[1]);
1119 re->re_sastate.sa_saved_indigest[2] =
1120 cpu_to_le32(re->re_sastate.sa_saved_indigest[2]);
1122 crypto_copyback(crp->crp_flags, crp->crp_buf,
1124 sc->sc_sessions[re->re_sesn].ses_mlen,
1125 (caddr_t)re->re_sastate.sa_saved_indigest);
1133 #if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
1134 #define SAFE_RNG_MAXWAIT 1000
1137 safe_rng_init(struct safe_softc *sc)
1142 DPRINTF(("%s()\n", __FUNCTION__));
1144 WRITE_REG(sc, SAFE_RNG_CTRL, 0);
1145 /* use default value according to the manual */
1146 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */
1147 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1150 * There is a bug in rev 1.0 of the 1140 that when the RNG
1151 * is brought out of reset the ready status flag does not
1152 * work until the RNG has finished its internal initialization.
1154 * So in order to determine the device is through its
1155 * initialization we must read the data register, using the
1156 * status reg in the read in case it is initialized. Then read
1157 * the data register until it changes from the first read.
1158 * Once it changes read the data register until it changes
1159 * again. At this time the RNG is considered initialized.
1160 * This could take between 750ms - 1000ms in time.
1163 w = READ_REG(sc, SAFE_RNG_OUT);
1165 v = READ_REG(sc, SAFE_RNG_OUT);
1171 } while (++i < SAFE_RNG_MAXWAIT);
1173 /* Wait Until data changes again */
1176 v = READ_REG(sc, SAFE_RNG_OUT);
1180 } while (++i < SAFE_RNG_MAXWAIT);
1183 static __inline void
1184 safe_rng_disable_short_cycle(struct safe_softc *sc)
1186 DPRINTF(("%s()\n", __FUNCTION__));
1188 WRITE_REG(sc, SAFE_RNG_CTRL,
1189 READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
1192 static __inline void
1193 safe_rng_enable_short_cycle(struct safe_softc *sc)
1195 DPRINTF(("%s()\n", __FUNCTION__));
1197 WRITE_REG(sc, SAFE_RNG_CTRL,
1198 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
1201 static __inline u_int32_t
1202 safe_rng_read(struct safe_softc *sc)
1207 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
1209 return READ_REG(sc, SAFE_RNG_OUT);
1213 safe_read_random(void *arg, u_int32_t *buf, int maxwords)
1215 struct safe_softc *sc = (struct safe_softc *) arg;
1218 DPRINTF(("%s()\n", __FUNCTION__));
1222 * Fetch the next block of data.
1224 if (maxwords > safe_rngbufsize)
1225 maxwords = safe_rngbufsize;
1226 if (maxwords > SAFE_RNG_MAXBUFSIZ)
1227 maxwords = SAFE_RNG_MAXBUFSIZ;
1229 /* read as much as we can */
1230 for (rc = 0; rc < maxwords; rc++) {
1231 if (READ_REG(sc, SAFE_RNG_STAT) != 0)
1233 buf[rc] = READ_REG(sc, SAFE_RNG_OUT);
1238 * Check the comparator alarm count and reset the h/w if
1239 * it exceeds our threshold. This guards against the
1240 * hardware oscillators resonating with external signals.
1242 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
1243 u_int32_t freq_inc, w;
1245 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
1246 (unsigned)READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
1247 safestats.st_rngalarm++;
1248 safe_rng_enable_short_cycle(sc);
1250 for (i = 0; i < 64; i++) {
1251 w = READ_REG(sc, SAFE_RNG_CNFG);
1252 freq_inc = ((w + freq_inc) & 0x3fL);
1253 w = ((w & ~0x3fL) | freq_inc);
1254 WRITE_REG(sc, SAFE_RNG_CNFG, w);
1256 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1258 (void) safe_rng_read(sc);
1261 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
1262 safe_rng_disable_short_cycle(sc);
1267 safe_rng_disable_short_cycle(sc);
1269 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1273 #endif /* defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG) */
1277 * Resets the board. Values in the regesters are left as is
1278 * from the reset (i.e. initial values are assigned elsewhere).
1281 safe_reset_board(struct safe_softc *sc)
1285 * Reset the device. The manual says no delay
1286 * is needed between marking and clearing reset.
1288 DPRINTF(("%s()\n", __FUNCTION__));
1290 v = READ_REG(sc, SAFE_PE_DMACFG) &~
1291 (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
1292 SAFE_PE_DMACFG_SGRESET);
1293 WRITE_REG(sc, SAFE_PE_DMACFG, v
1294 | SAFE_PE_DMACFG_PERESET
1295 | SAFE_PE_DMACFG_PDRRESET
1296 | SAFE_PE_DMACFG_SGRESET);
1297 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1301 * Initialize registers we need to touch only once.
1304 safe_init_board(struct safe_softc *sc)
1306 u_int32_t v, dwords;
1308 DPRINTF(("%s()\n", __FUNCTION__));
1310 v = READ_REG(sc, SAFE_PE_DMACFG);
1311 v &=~ ( SAFE_PE_DMACFG_PEMODE
1312 | SAFE_PE_DMACFG_FSENA /* failsafe enable */
1313 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
1314 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
1315 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
1316 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
1317 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
1318 | SAFE_PE_DMACFG_ESPACKET /* swap the packet data */
1320 v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */
1321 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
1322 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
1323 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
1324 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
1325 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
1327 | SAFE_PE_DMACFG_ESPACKET /* swap the packet data */
1330 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1333 /* tell the safenet that we are 4321 and not 1234 */
1334 WRITE_REG(sc, SAFE_ENDIAN, 0xe4e41b1b);
1337 if (sc->sc_chiprev == SAFE_REV(1,0)) {
1339 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where
1340 * "target mode transfers" done while the chip is DMA'ing
1341 * >1020 bytes cause the hardware to lockup. To avoid this
1342 * we reduce the max PCI transfer size and use small source
1343 * particle descriptors (<= 256 bytes).
1345 WRITE_REG(sc, SAFE_DMA_CFG, 256);
1346 device_printf(sc->sc_dev,
1347 "Reduce max DMA size to %u words for rev %u.%u WAR\n",
1348 (unsigned) ((READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff),
1349 (unsigned) SAFE_REV_MAJ(sc->sc_chiprev),
1350 (unsigned) SAFE_REV_MIN(sc->sc_chiprev));
1351 sc->sc_max_dsize = 256;
1353 sc->sc_max_dsize = SAFE_MAX_DSIZE;
1356 /* NB: operands+results are overlaid */
1357 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
1358 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
1360 * Configure ring entry size and number of items in the ring.
1362 KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
1363 ("PE ring entry not 32-bit aligned!"));
1364 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
1365 WRITE_REG(sc, SAFE_PE_RINGCFG,
1366 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
1367 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */
1369 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
1370 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
1371 WRITE_REG(sc, SAFE_PE_PARTSIZE,
1372 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
1374 * NB: destination particles are fixed size. We use
1375 * an mbuf cluster and require all results go to
1376 * clusters or smaller.
1378 WRITE_REG(sc, SAFE_PE_PARTCFG, sc->sc_max_dsize);
1380 /* it's now safe to enable PE mode, do it */
1381 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
1384 * Configure hardware to use level-triggered interrupts and
1385 * to interrupt after each descriptor is processed.
1387 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
1388 WRITE_REG(sc, SAFE_HI_CLR, 0xffffffff);
1389 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
1390 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
1395 * Clean up after a chip crash.
1396 * It is assumed that the caller in splimp()
1399 safe_cleanchip(struct safe_softc *sc)
1401 DPRINTF(("%s()\n", __FUNCTION__));
1403 if (sc->sc_nqchip != 0) {
1404 struct safe_ringentry *re = sc->sc_back;
1406 while (re != sc->sc_front) {
1407 if (re->re_desc.d_csr != 0)
1408 safe_free_entry(sc, re);
1409 if (++re == sc->sc_ringtop)
1419 * It is assumed that the caller is within splimp().
1422 safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
1424 struct cryptop *crp;
1426 DPRINTF(("%s()\n", __FUNCTION__));
1431 if ((re->re_dst_skb != NULL) && (re->re_src_skb != re->re_dst_skb))
1433 m_freem(re->re_dst_m);
1435 printk("%s,%d: SKB not supported\n", __FILE__, __LINE__);
1438 crp = (struct cryptop *)re->re_crp;
1440 re->re_desc.d_csr = 0;
1442 crp->crp_etype = EFAULT;
1448 * Routine to reset the chip and clean up.
1449 * It is assumed that the caller is in splimp()
1452 safe_totalreset(struct safe_softc *sc)
1454 DPRINTF(("%s()\n", __FUNCTION__));
1456 safe_reset_board(sc);
1457 safe_init_board(sc);
1462 * Is the operand suitable aligned for direct DMA. Each
1463 * segment must be aligned on a 32-bit boundary and all
1464 * but the last segment must be a multiple of 4 bytes.
1467 safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op)
1471 DPRINTF(("%s()\n", __FUNCTION__));
1473 for (i = 0; i < op->nsegs; i++) {
1474 if (op->segs[i].ds_addr & 3)
1476 if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
1483 * Is the operand suitable for direct DMA as the destination
1484 * of an operation. The hardware requires that each ``particle''
1485 * but the last in an operation result have the same size. We
1486 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns
1487 * 0 if some segment is not a multiple of of this size, 1 if all
1488 * segments are exactly this size, or 2 if segments are at worst
1489 * a multple of this size.
1492 safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op)
1496 DPRINTF(("%s()\n", __FUNCTION__));
1498 if (op->nsegs > 0) {
1501 for (i = 0; i < op->nsegs-1; i++) {
1502 if (op->segs[i].ds_len % sc->sc_max_dsize)
1504 if (op->segs[i].ds_len != sc->sc_max_dsize)
1512 safe_kprocess(device_t dev, struct cryptkop *krp, int hint)
1514 struct safe_softc *sc = device_get_softc(dev);
1516 unsigned long flags;
1518 DPRINTF(("%s()\n", __FUNCTION__));
1521 krp->krp_status = EINVAL;
1525 if (krp->krp_op != CRK_MOD_EXP) {
1526 krp->krp_status = EOPNOTSUPP;
1530 q = (struct safe_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
1532 krp->krp_status = ENOMEM;
1535 memset(q, 0, sizeof(*q));
1537 INIT_LIST_HEAD(&q->pkq_list);
1539 spin_lock_irqsave(&sc->sc_pkmtx, flags);
1540 list_add_tail(&q->pkq_list, &sc->sc_pkq);
1542 spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
1550 #define SAFE_CRK_PARAM_BASE 0
1551 #define SAFE_CRK_PARAM_EXP 1
1552 #define SAFE_CRK_PARAM_MOD 2
1555 safe_kstart(struct safe_softc *sc)
1557 struct cryptkop *krp = sc->sc_pkq_cur->pkq_krp;
1558 int exp_bits, mod_bits, base_bits;
1559 u_int32_t op, a_off, b_off, c_off, d_off;
1561 DPRINTF(("%s()\n", __FUNCTION__));
1563 if (krp->krp_iparams < 3 || krp->krp_oparams != 1) {
1564 krp->krp_status = EINVAL;
1568 base_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_BASE]);
1569 if (base_bits > 2048)
1571 if (base_bits <= 0) /* 5. base not zero */
1574 exp_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_EXP]);
1575 if (exp_bits > 2048)
1577 if (exp_bits <= 0) /* 1. exponent word length > 0 */
1578 goto too_small; /* 4. exponent not zero */
1580 mod_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_MOD]);
1581 if (mod_bits > 2048)
1583 if (mod_bits <= 32) /* 2. modulus word length > 1 */
1584 goto too_small; /* 8. MSW of modulus != zero */
1585 if (mod_bits < exp_bits) /* 3 modulus len >= exponent len */
1587 if ((krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p[0] & 1) == 0)
1588 goto bad_domain; /* 6. modulus is odd */
1589 if (mod_bits > krp->krp_param[krp->krp_iparams].crp_nbits)
1590 goto too_small; /* make sure result will fit */
1592 /* 7. modulus > base */
1593 if (mod_bits < base_bits)
1595 if (mod_bits == base_bits) {
1596 u_int8_t *basep, *modp;
1599 basep = krp->krp_param[SAFE_CRK_PARAM_BASE].crp_p +
1600 ((base_bits + 7) / 8) - 1;
1601 modp = krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p +
1602 ((mod_bits + 7) / 8) - 1;
1604 for (i = 0; i < (mod_bits + 7) / 8; i++, basep--, modp--) {
1612 /* And on the 9th step, he rested. */
1614 WRITE_REG(sc, SAFE_PK_A_LEN, (exp_bits + 31) / 32);
1615 WRITE_REG(sc, SAFE_PK_B_LEN, (mod_bits + 31) / 32);
1616 if (mod_bits > 1024) {
1617 op = SAFE_PK_FUNC_EXP4;
1623 op = SAFE_PK_FUNC_EXP16;
1629 sc->sc_pk_reslen = b_off - a_off;
1630 sc->sc_pk_resoff = d_off;
1632 /* A is exponent, B is modulus, C is base, D is result */
1633 safe_kload_reg(sc, a_off, b_off - a_off,
1634 &krp->krp_param[SAFE_CRK_PARAM_EXP]);
1635 WRITE_REG(sc, SAFE_PK_A_ADDR, a_off >> 2);
1636 safe_kload_reg(sc, b_off, b_off - a_off,
1637 &krp->krp_param[SAFE_CRK_PARAM_MOD]);
1638 WRITE_REG(sc, SAFE_PK_B_ADDR, b_off >> 2);
1639 safe_kload_reg(sc, c_off, b_off - a_off,
1640 &krp->krp_param[SAFE_CRK_PARAM_BASE]);
1641 WRITE_REG(sc, SAFE_PK_C_ADDR, c_off >> 2);
1642 WRITE_REG(sc, SAFE_PK_D_ADDR, d_off >> 2);
1644 WRITE_REG(sc, SAFE_PK_FUNC, op | SAFE_PK_FUNC_RUN);
1649 krp->krp_status = E2BIG;
1652 krp->krp_status = ERANGE;
1655 krp->krp_status = EDOM;
1660 safe_ksigbits(struct safe_softc *sc, struct crparam *cr)
1662 u_int plen = (cr->crp_nbits + 7) / 8;
1663 int i, sig = plen * 8;
1664 u_int8_t c, *p = cr->crp_p;
1666 DPRINTF(("%s()\n", __FUNCTION__));
1668 for (i = plen - 1; i >= 0; i--) {
1671 while ((c & 0x80) == 0) {
1683 safe_kfeed(struct safe_softc *sc)
1685 struct safe_pkq *q, *tmp;
1687 DPRINTF(("%s()\n", __FUNCTION__));
1689 if (list_empty(&sc->sc_pkq) && sc->sc_pkq_cur == NULL)
1691 if (sc->sc_pkq_cur != NULL)
1693 list_for_each_entry_safe(q, tmp, &sc->sc_pkq, pkq_list) {
1695 list_del(&q->pkq_list);
1696 if (safe_kstart(sc) != 0) {
1697 crypto_kdone(q->pkq_krp);
1699 sc->sc_pkq_cur = NULL;
1701 /* op started, start polling */
1702 mod_timer(&sc->sc_pkto, jiffies + 1);
1709 safe_kpoll(unsigned long arg)
1711 struct safe_softc *sc = NULL;
1713 struct crparam *res;
1716 unsigned long flags;
1718 DPRINTF(("%s()\n", __FUNCTION__));
1720 if (arg >= SAFE_MAX_CHIPS)
1722 sc = safe_chip_idx[arg];
1724 DPRINTF(("%s() - bad callback\n", __FUNCTION__));
1728 spin_lock_irqsave(&sc->sc_pkmtx, flags);
1729 if (sc->sc_pkq_cur == NULL)
1731 if (READ_REG(sc, SAFE_PK_FUNC) & SAFE_PK_FUNC_RUN) {
1732 /* still running, check back later */
1733 mod_timer(&sc->sc_pkto, jiffies + 1);
1738 res = &q->pkq_krp->krp_param[q->pkq_krp->krp_iparams];
1739 bzero(buf, sizeof(buf));
1740 bzero(res->crp_p, (res->crp_nbits + 7) / 8);
1741 for (i = 0; i < sc->sc_pk_reslen >> 2; i++)
1742 buf[i] = le32_to_cpu(READ_REG(sc, SAFE_PK_RAM_START +
1743 sc->sc_pk_resoff + (i << 2)));
1744 bcopy(buf, res->crp_p, (res->crp_nbits + 7) / 8);
1746 * reduce the bits that need copying if possible
1748 res->crp_nbits = min(res->crp_nbits,sc->sc_pk_reslen * 8);
1749 res->crp_nbits = safe_ksigbits(sc, res);
1751 for (i = SAFE_PK_RAM_START; i < SAFE_PK_RAM_END; i += 4)
1752 WRITE_REG(sc, i, 0);
1754 crypto_kdone(q->pkq_krp);
1756 sc->sc_pkq_cur = NULL;
1760 spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
1764 safe_kload_reg(struct safe_softc *sc, u_int32_t off, u_int32_t len,
1767 u_int32_t buf[64], i;
1769 DPRINTF(("%s()\n", __FUNCTION__));
1771 bzero(buf, sizeof(buf));
1772 bcopy(n->crp_p, buf, (n->crp_nbits + 7) / 8);
1774 for (i = 0; i < len >> 2; i++)
1775 WRITE_REG(sc, SAFE_PK_RAM_START + off + (i << 2),
1776 cpu_to_le32(buf[i]));
1781 safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
1783 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
1785 , READ_REG(sc, SAFE_DMA_ENDIAN)
1786 , READ_REG(sc, SAFE_DMA_SRCADDR)
1787 , READ_REG(sc, SAFE_DMA_DSTADDR)
1788 , READ_REG(sc, SAFE_DMA_STAT)
1793 safe_dump_intrstate(struct safe_softc *sc, const char *tag)
1795 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
1797 , READ_REG(sc, SAFE_HI_CFG)
1798 , READ_REG(sc, SAFE_HI_MASK)
1799 , READ_REG(sc, SAFE_HI_DESC_CNT)
1800 , READ_REG(sc, SAFE_HU_STAT)
1801 , READ_REG(sc, SAFE_HM_STAT)
1806 safe_dump_ringstate(struct safe_softc *sc, const char *tag)
1808 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
1810 /* NB: assume caller has lock on ring */
1811 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
1813 estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
1814 (unsigned long)(sc->sc_back - sc->sc_ring),
1815 (unsigned long)(sc->sc_front - sc->sc_ring));
1819 safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
1823 ix = re - sc->sc_ring;
1824 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
1833 if (re->re_src.nsegs > 1) {
1834 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
1835 sizeof(struct safe_pdesc);
1836 for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
1837 printf(" spd[%u] %p: %p size %u flags %x"
1838 , ix, &sc->sc_spring[ix]
1839 , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
1840 , sc->sc_spring[ix].pd_size
1841 , sc->sc_spring[ix].pd_flags
1843 if (sc->sc_spring[ix].pd_size == 0)
1846 if (++ix == SAFE_TOTAL_SPART)
1850 if (re->re_dst.nsegs > 1) {
1851 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
1852 sizeof(struct safe_pdesc);
1853 for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
1854 printf(" dpd[%u] %p: %p flags %x\n"
1855 , ix, &sc->sc_dpring[ix]
1856 , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
1857 , sc->sc_dpring[ix].pd_flags
1859 if (++ix == SAFE_TOTAL_DPART)
1863 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
1864 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
1865 printf("sa: key %x %x %x %x %x %x %x %x\n"
1866 , re->re_sa.sa_key[0]
1867 , re->re_sa.sa_key[1]
1868 , re->re_sa.sa_key[2]
1869 , re->re_sa.sa_key[3]
1870 , re->re_sa.sa_key[4]
1871 , re->re_sa.sa_key[5]
1872 , re->re_sa.sa_key[6]
1873 , re->re_sa.sa_key[7]
1875 printf("sa: indigest %x %x %x %x %x\n"
1876 , re->re_sa.sa_indigest[0]
1877 , re->re_sa.sa_indigest[1]
1878 , re->re_sa.sa_indigest[2]
1879 , re->re_sa.sa_indigest[3]
1880 , re->re_sa.sa_indigest[4]
1882 printf("sa: outdigest %x %x %x %x %x\n"
1883 , re->re_sa.sa_outdigest[0]
1884 , re->re_sa.sa_outdigest[1]
1885 , re->re_sa.sa_outdigest[2]
1886 , re->re_sa.sa_outdigest[3]
1887 , re->re_sa.sa_outdigest[4]
1889 printf("sr: iv %x %x %x %x\n"
1890 , re->re_sastate.sa_saved_iv[0]
1891 , re->re_sastate.sa_saved_iv[1]
1892 , re->re_sastate.sa_saved_iv[2]
1893 , re->re_sastate.sa_saved_iv[3]
1895 printf("sr: hashbc %u indigest %x %x %x %x %x\n"
1896 , re->re_sastate.sa_saved_hashbc
1897 , re->re_sastate.sa_saved_indigest[0]
1898 , re->re_sastate.sa_saved_indigest[1]
1899 , re->re_sastate.sa_saved_indigest[2]
1900 , re->re_sastate.sa_saved_indigest[3]
1901 , re->re_sastate.sa_saved_indigest[4]
1906 safe_dump_ring(struct safe_softc *sc, const char *tag)
1908 unsigned long flags;
1910 spin_lock_irqsave(&sc->sc_ringmtx, flags);
1911 printf("\nSafeNet Ring State:\n");
1912 safe_dump_intrstate(sc, tag);
1913 safe_dump_dmastatus(sc, tag);
1914 safe_dump_ringstate(sc, tag);
1915 if (sc->sc_nqchip) {
1916 struct safe_ringentry *re = sc->sc_back;
1918 safe_dump_request(sc, tag, re);
1919 if (++re == sc->sc_ringtop)
1921 } while (re != sc->sc_front);
1923 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
1925 #endif /* SAFE_DEBUG */
1928 static int safe_probe(struct pci_dev *dev, const struct pci_device_id *ent)
1930 struct safe_softc *sc = NULL;
1931 u32 mem_start, mem_len, cmd;
1934 static int num_chips = 0;
1936 DPRINTF(("%s()\n", __FUNCTION__));
1938 if (pci_enable_device(dev) < 0)
1942 printk("safe: found device with no IRQ assigned. check BIOS settings!");
1943 pci_disable_device(dev);
1947 #ifdef HAVE_PCI_SET_MWI
1948 if (pci_set_mwi(dev)) {
1949 printk("safe: pci_set_mwi failed!");
1954 sc = (struct safe_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
1957 memset(sc, 0, sizeof(*sc));
1959 softc_device_init(sc, "safe", num_chips, safe_methods);
1963 sc->sc_pcidev = dev;
1964 if (num_chips < SAFE_MAX_CHIPS) {
1965 safe_chip_idx[device_get_unit(sc->sc_dev)] = sc;
1969 INIT_LIST_HEAD(&sc->sc_pkq);
1970 spin_lock_init(&sc->sc_pkmtx);
1972 pci_set_drvdata(sc->sc_pcidev, sc);
1974 /* we read its hardware registers as memory */
1975 mem_start = pci_resource_start(sc->sc_pcidev, 0);
1976 mem_len = pci_resource_len(sc->sc_pcidev, 0);
1978 sc->sc_base_addr = (ocf_iomem_t) ioremap(mem_start, mem_len);
1979 if (!sc->sc_base_addr) {
1980 device_printf(sc->sc_dev, "failed to ioremap 0x%x-0x%x\n",
1981 mem_start, mem_start + mem_len - 1);
1985 /* fix up the bus size */
1986 if (pci_set_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
1987 device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
1990 if (pci_set_consistent_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
1991 device_printf(sc->sc_dev, "No usable consistent DMA configuration, aborting.\n");
1995 pci_set_master(sc->sc_pcidev);
1997 pci_read_config_dword(sc->sc_pcidev, PCI_COMMAND, &cmd);
1999 if (!(cmd & PCI_COMMAND_MEMORY)) {
2000 device_printf(sc->sc_dev, "failed to enable memory mapping\n");
2004 if (!(cmd & PCI_COMMAND_MASTER)) {
2005 device_printf(sc->sc_dev, "failed to enable bus mastering\n");
2009 rc = request_irq(dev->irq, safe_intr, IRQF_SHARED, "safe", sc);
2011 device_printf(sc->sc_dev, "failed to hook irq %d\n", sc->sc_irq);
2014 sc->sc_irq = dev->irq;
2016 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
2017 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
2020 * Allocate packet engine descriptors.
2022 sc->sc_ringalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
2023 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
2024 &sc->sc_ringalloc.dma_paddr);
2025 if (!sc->sc_ringalloc.dma_vaddr) {
2026 device_printf(sc->sc_dev, "cannot allocate PE descriptor ring\n");
2031 * Hookup the static portion of all our data structures.
2033 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
2034 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
2035 sc->sc_front = sc->sc_ring;
2036 sc->sc_back = sc->sc_ring;
2037 raddr = sc->sc_ringalloc.dma_paddr;
2038 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
2039 for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
2040 struct safe_ringentry *re = &sc->sc_ring[i];
2042 re->re_desc.d_sa = raddr +
2043 offsetof(struct safe_ringentry, re_sa);
2044 re->re_sa.sa_staterec = raddr +
2045 offsetof(struct safe_ringentry, re_sastate);
2047 raddr += sizeof (struct safe_ringentry);
2049 spin_lock_init(&sc->sc_ringmtx);
2052 * Allocate scatter and gather particle descriptors.
2054 sc->sc_spalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
2055 SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
2056 &sc->sc_spalloc.dma_paddr);
2057 if (!sc->sc_spalloc.dma_vaddr) {
2058 device_printf(sc->sc_dev, "cannot allocate source particle descriptor ring\n");
2061 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
2062 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
2063 sc->sc_spfree = sc->sc_spring;
2064 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
2066 sc->sc_dpalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
2067 SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
2068 &sc->sc_dpalloc.dma_paddr);
2069 if (!sc->sc_dpalloc.dma_vaddr) {
2070 device_printf(sc->sc_dev, "cannot allocate destination particle descriptor ring\n");
2073 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
2074 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
2075 sc->sc_dpfree = sc->sc_dpring;
2076 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
2078 sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE);
2079 if (sc->sc_cid < 0) {
2080 device_printf(sc->sc_dev, "could not get crypto driver id\n");
2084 printf("%s:", device_get_nameunit(sc->sc_dev));
2086 devinfo = READ_REG(sc, SAFE_DEVINFO);
2087 if (devinfo & SAFE_DEVINFO_RNG) {
2088 sc->sc_flags |= SAFE_FLAGS_RNG;
2091 if (devinfo & SAFE_DEVINFO_PKEY) {
2093 sc->sc_flags |= SAFE_FLAGS_KEY;
2094 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
2096 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
2098 init_timer(&sc->sc_pkto);
2099 sc->sc_pkto.function = safe_kpoll;
2100 sc->sc_pkto.data = (unsigned long) device_get_unit(sc->sc_dev);
2102 if (devinfo & SAFE_DEVINFO_DES) {
2103 printf(" des/3des");
2104 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
2105 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
2107 if (devinfo & SAFE_DEVINFO_AES) {
2109 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
2111 if (devinfo & SAFE_DEVINFO_MD5) {
2113 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
2115 if (devinfo & SAFE_DEVINFO_SHA1) {
2117 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
2120 crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0);
2121 crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0);
2122 /* XXX other supported algorithms */
2125 safe_reset_board(sc); /* reset h/w */
2126 safe_init_board(sc); /* init h/w */
2128 #if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
2129 if (sc->sc_flags & SAFE_FLAGS_RNG) {
2131 crypto_rregister(sc->sc_cid, safe_read_random, sc);
2133 #endif /* SAFE_NO_RNG */
2138 if (sc->sc_cid >= 0)
2139 crypto_unregister_all(sc->sc_cid);
2140 if (sc->sc_irq != -1)
2141 free_irq(sc->sc_irq, sc);
2142 if (sc->sc_ringalloc.dma_vaddr)
2143 pci_free_consistent(sc->sc_pcidev,
2144 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
2145 sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
2146 if (sc->sc_spalloc.dma_vaddr)
2147 pci_free_consistent(sc->sc_pcidev,
2148 SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
2149 sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
2150 if (sc->sc_dpalloc.dma_vaddr)
2151 pci_free_consistent(sc->sc_pcidev,
2152 SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
2153 sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
2158 static void safe_remove(struct pci_dev *dev)
2160 struct safe_softc *sc = pci_get_drvdata(dev);
2162 DPRINTF(("%s()\n", __FUNCTION__));
2164 /* XXX wait/abort active ops */
2166 WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */
2168 del_timer_sync(&sc->sc_pkto);
2170 crypto_unregister_all(sc->sc_cid);
2174 if (sc->sc_irq != -1)
2175 free_irq(sc->sc_irq, sc);
2176 if (sc->sc_ringalloc.dma_vaddr)
2177 pci_free_consistent(sc->sc_pcidev,
2178 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
2179 sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
2180 if (sc->sc_spalloc.dma_vaddr)
2181 pci_free_consistent(sc->sc_pcidev,
2182 SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
2183 sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
2184 if (sc->sc_dpalloc.dma_vaddr)
2185 pci_free_consistent(sc->sc_pcidev,
2186 SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
2187 sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
2189 sc->sc_ringalloc.dma_vaddr = NULL;
2190 sc->sc_spalloc.dma_vaddr = NULL;
2191 sc->sc_dpalloc.dma_vaddr = NULL;
2194 static struct pci_device_id safe_pci_tbl[] = {
2195 { PCI_VENDOR_SAFENET, PCI_PRODUCT_SAFEXCEL,
2196 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
2199 MODULE_DEVICE_TABLE(pci, safe_pci_tbl);
2201 static struct pci_driver safe_driver = {
2203 .id_table = safe_pci_tbl,
2204 .probe = safe_probe,
2205 .remove = safe_remove,
2206 /* add PM stuff here one day */
2209 static int __init safe_init (void)
2211 struct safe_softc *sc = NULL;
2214 DPRINTF(("%s(%p)\n", __FUNCTION__, safe_init));
2216 rc = pci_register_driver(&safe_driver);
2217 pci_register_driver_compat(&safe_driver, rc);
2222 static void __exit safe_exit (void)
2224 pci_unregister_driver(&safe_driver);
2227 module_init(safe_init);
2228 module_exit(safe_exit);
2230 MODULE_LICENSE("BSD");
2231 MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
2232 MODULE_DESCRIPTION("OCF driver for safenet PCI crypto devices");