2 * SHA-256 routines supporting the Power 7+ Nest Accelerators driver
4 * Copyright (C) 2011-2012 International Business Machines Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Author: Kent Yoder <yoder1@us.ibm.com>
22 #include <crypto/internal/hash.h>
23 #include <crypto/sha.h>
24 #include <linux/module.h>
26 #include <asm/byteorder.h>
28 #include "nx_csbcpb.h"
32 static int nx_sha256_init(struct shash_desc *desc)
34 struct sha256_state *sctx = shash_desc_ctx(desc);
35 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
40 nx_ctx_init(nx_ctx, HCOP_FC_SHA);
42 memset(sctx, 0, sizeof *sctx);
44 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
46 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
48 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
49 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
50 max_sg_len = min_t(u64, max_sg_len,
51 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
53 len = SHA256_DIGEST_SIZE;
54 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
56 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
58 if (len != SHA256_DIGEST_SIZE)
61 sctx->state[0] = __cpu_to_be32(SHA256_H0);
62 sctx->state[1] = __cpu_to_be32(SHA256_H1);
63 sctx->state[2] = __cpu_to_be32(SHA256_H2);
64 sctx->state[3] = __cpu_to_be32(SHA256_H3);
65 sctx->state[4] = __cpu_to_be32(SHA256_H4);
66 sctx->state[5] = __cpu_to_be32(SHA256_H5);
67 sctx->state[6] = __cpu_to_be32(SHA256_H6);
68 sctx->state[7] = __cpu_to_be32(SHA256_H7);
74 static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
77 struct sha256_state *sctx = shash_desc_ctx(desc);
78 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
79 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
81 u64 to_process = 0, leftover, total;
82 unsigned long irq_flags;
86 u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
88 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
90 /* 2 cases for total data len:
91 * 1: < SHA256_BLOCK_SIZE: copy into state, return 0
92 * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
94 total = (sctx->count % SHA256_BLOCK_SIZE) + len;
95 if (total < SHA256_BLOCK_SIZE) {
96 memcpy(sctx->buf + buf_len, data, len);
101 memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
102 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
103 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
105 in_sg = nx_ctx->in_sg;
106 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
107 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
108 max_sg_len = min_t(u64, max_sg_len,
109 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
113 * to_process: the SHA256_BLOCK_SIZE data chunk to process in
114 * this update. This value is also restricted by the sg list
117 to_process = total - to_process;
118 to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
122 in_sg = nx_build_sg_list(nx_ctx->in_sg,
127 if (data_len != buf_len) {
133 data_len = to_process - buf_len;
134 in_sg = nx_build_sg_list(in_sg, (u8 *) data,
135 &data_len, max_sg_len);
137 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
139 to_process = (data_len + buf_len);
140 leftover = total - to_process;
143 * we've hit the nx chip previously and we're updating
144 * again, so copy over the partial digest.
146 memcpy(csbcpb->cpb.sha256.input_partial_digest,
147 csbcpb->cpb.sha256.message_digest,
150 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
155 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
156 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
160 atomic_inc(&(nx_ctx->stats->sha256_ops));
163 data += to_process - buf_len;
166 } while (leftover >= SHA256_BLOCK_SIZE);
168 /* copy the leftover back into the state struct */
170 memcpy(sctx->buf, data, leftover);
173 memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
175 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
179 static int nx_sha256_final(struct shash_desc *desc, u8 *out)
181 struct sha256_state *sctx = shash_desc_ctx(desc);
182 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
183 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
184 struct nx_sg *in_sg, *out_sg;
185 unsigned long irq_flags;
190 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
192 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
193 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
194 max_sg_len = min_t(u64, max_sg_len,
195 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
197 /* final is represented by continuing the operation and indicating that
198 * this is not an intermediate operation */
199 if (sctx->count >= SHA256_BLOCK_SIZE) {
200 /* we've hit the nx chip previously, now we're finalizing,
201 * so copy over the partial digest */
202 memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
203 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
204 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
206 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
207 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
210 csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
212 len = sctx->count & (SHA256_BLOCK_SIZE - 1);
213 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
216 if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
221 len = SHA256_DIGEST_SIZE;
222 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);
224 if (len != SHA256_DIGEST_SIZE) {
229 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
230 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
231 if (!nx_ctx->op.outlen) {
236 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
237 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
241 atomic_inc(&(nx_ctx->stats->sha256_ops));
243 atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes));
244 memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
246 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
250 static int nx_sha256_export(struct shash_desc *desc, void *out)
252 struct sha256_state *sctx = shash_desc_ctx(desc);
254 memcpy(out, sctx, sizeof(*sctx));
259 static int nx_sha256_import(struct shash_desc *desc, const void *in)
261 struct sha256_state *sctx = shash_desc_ctx(desc);
263 memcpy(sctx, in, sizeof(*sctx));
268 struct shash_alg nx_shash_sha256_alg = {
269 .digestsize = SHA256_DIGEST_SIZE,
270 .init = nx_sha256_init,
271 .update = nx_sha256_update,
272 .final = nx_sha256_final,
273 .export = nx_sha256_export,
274 .import = nx_sha256_import,
275 .descsize = sizeof(struct sha256_state),
276 .statesize = sizeof(struct sha256_state),
278 .cra_name = "sha256",
279 .cra_driver_name = "sha256-nx",
281 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
282 .cra_blocksize = SHA256_BLOCK_SIZE,
283 .cra_module = THIS_MODULE,
284 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
285 .cra_init = nx_crypto_ctx_sha_init,
286 .cra_exit = nx_crypto_ctx_exit,