2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/module.h>
49 #include <crypto/internal/rsa.h>
50 #include <crypto/internal/akcipher.h>
51 #include <crypto/akcipher.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/fips.h>
54 #include "qat_rsakey-asn1.h"
55 #include "icp_qat_fw_pke.h"
56 #include "adf_accel_devices.h"
57 #include "adf_transport.h"
58 #include "adf_common_drv.h"
59 #include "qat_crypto.h"
61 struct qat_rsa_input_params {
75 } __packed __aligned(64);
77 struct qat_rsa_output_params {
87 } __packed __aligned(64);
97 struct qat_crypto_instance *inst;
98 } __packed __aligned(64);
100 struct qat_rsa_request {
101 struct qat_rsa_input_params in;
102 struct qat_rsa_output_params out;
106 struct icp_qat_fw_pke_request req;
107 struct qat_rsa_ctx *ctx;
111 static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
113 struct akcipher_request *areq = (void *)(__force long)resp->opaque;
114 struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64);
115 struct device *dev = &GET_DEV(req->ctx->inst->accel_dev);
116 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
117 resp->pke_resp_hdr.comn_resp_flags);
118 char *ptr = areq->dst;
120 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
123 dma_free_coherent(dev, req->ctx->key_sz, req->src_align,
126 dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz,
129 dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz,
131 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
133 dma_unmap_single(dev, req->phy_out,
134 sizeof(struct qat_rsa_output_params),
137 areq->dst_len = req->ctx->key_sz;
138 /* Need to set the corect length of the output */
139 while (!(*ptr) && areq->dst_len) {
144 if (areq->dst_len != req->ctx->key_sz)
145 memcpy(areq->dst, ptr, areq->dst_len);
147 akcipher_request_complete(areq, err);
150 void qat_alg_asym_callback(void *_resp)
152 struct icp_qat_fw_pke_resp *resp = _resp;
157 #define PKE_RSA_EP_512 0x1c161b21
158 #define PKE_RSA_EP_1024 0x35111bf7
159 #define PKE_RSA_EP_1536 0x4d111cdc
160 #define PKE_RSA_EP_2048 0x6e111dba
161 #define PKE_RSA_EP_3072 0x7d111ea3
162 #define PKE_RSA_EP_4096 0xa5101f7e
164 static unsigned long qat_rsa_enc_fn_id(unsigned int len)
166 unsigned int bitslen = len << 3;
170 return PKE_RSA_EP_512;
172 return PKE_RSA_EP_1024;
174 return PKE_RSA_EP_1536;
176 return PKE_RSA_EP_2048;
178 return PKE_RSA_EP_3072;
180 return PKE_RSA_EP_4096;
186 #define PKE_RSA_DP1_512 0x1c161b3c
187 #define PKE_RSA_DP1_1024 0x35111c12
188 #define PKE_RSA_DP1_1536 0x4d111cf7
189 #define PKE_RSA_DP1_2048 0x6e111dda
190 #define PKE_RSA_DP1_3072 0x7d111ebe
191 #define PKE_RSA_DP1_4096 0xa5101f98
193 static unsigned long qat_rsa_dec_fn_id(unsigned int len)
195 unsigned int bitslen = len << 3;
199 return PKE_RSA_DP1_512;
201 return PKE_RSA_DP1_1024;
203 return PKE_RSA_DP1_1536;
205 return PKE_RSA_DP1_2048;
207 return PKE_RSA_DP1_3072;
209 return PKE_RSA_DP1_4096;
215 static int qat_rsa_enc(struct akcipher_request *req)
217 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
218 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
219 struct qat_crypto_instance *inst = ctx->inst;
220 struct device *dev = &GET_DEV(inst->accel_dev);
221 struct qat_rsa_request *qat_req =
222 PTR_ALIGN(akcipher_request_ctx(req), 64);
223 struct icp_qat_fw_pke_request *msg = &qat_req->req;
226 if (unlikely(!ctx->n || !ctx->e))
229 if (req->dst_len < ctx->key_sz) {
230 req->dst_len = ctx->key_sz;
233 memset(msg, '\0', sizeof(*msg));
234 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
235 ICP_QAT_FW_COMN_REQ_FLAG_SET);
236 msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
237 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
241 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
242 msg->pke_hdr.comn_req_flags =
243 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
244 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
246 qat_req->in.enc.e = ctx->dma_e;
247 qat_req->in.enc.n = ctx->dma_n;
251 * src can be of any size in valid range, but HW expects it to be the
252 * same as modulo n so in case it is different we need to allocate a
253 * new buf and copy src data.
254 * In other case we just need to map the user provided buffer.
256 if (req->src_len < ctx->key_sz) {
257 int shift = ctx->key_sz - req->src_len;
259 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
262 if (unlikely(!qat_req->src_align))
265 memcpy(qat_req->src_align + shift, req->src, req->src_len);
267 qat_req->src_align = NULL;
268 qat_req->in.enc.m = dma_map_single(dev, req->src, req->src_len,
271 qat_req->in.in_tab[3] = 0;
272 qat_req->out.enc.c = dma_map_single(dev, req->dst, req->dst_len,
274 qat_req->out.out_tab[1] = 0;
275 qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m,
276 sizeof(struct qat_rsa_input_params),
278 qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c,
279 sizeof(struct qat_rsa_output_params),
282 if (unlikely((!qat_req->src_align &&
283 dma_mapping_error(dev, qat_req->in.enc.m)) ||
284 dma_mapping_error(dev, qat_req->out.enc.c) ||
285 dma_mapping_error(dev, qat_req->phy_in) ||
286 dma_mapping_error(dev, qat_req->phy_out)))
289 msg->pke_mid.src_data_addr = qat_req->phy_in;
290 msg->pke_mid.dest_data_addr = qat_req->phy_out;
291 msg->pke_mid.opaque = (uint64_t)(__force long)req;
292 msg->input_param_count = 3;
293 msg->output_param_count = 1;
295 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
296 } while (ret == -EBUSY && ctr++ < 100);
301 if (qat_req->src_align)
302 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
305 if (!dma_mapping_error(dev, qat_req->in.enc.m))
306 dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
308 if (!dma_mapping_error(dev, qat_req->out.enc.c))
309 dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
311 if (!dma_mapping_error(dev, qat_req->phy_in))
312 dma_unmap_single(dev, qat_req->phy_in,
313 sizeof(struct qat_rsa_input_params),
315 if (!dma_mapping_error(dev, qat_req->phy_out))
316 dma_unmap_single(dev, qat_req->phy_out,
317 sizeof(struct qat_rsa_output_params),
322 static int qat_rsa_dec(struct akcipher_request *req)
324 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
325 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
326 struct qat_crypto_instance *inst = ctx->inst;
327 struct device *dev = &GET_DEV(inst->accel_dev);
328 struct qat_rsa_request *qat_req =
329 PTR_ALIGN(akcipher_request_ctx(req), 64);
330 struct icp_qat_fw_pke_request *msg = &qat_req->req;
333 if (unlikely(!ctx->n || !ctx->d))
336 if (req->dst_len < ctx->key_sz) {
337 req->dst_len = ctx->key_sz;
340 memset(msg, '\0', sizeof(*msg));
341 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
342 ICP_QAT_FW_COMN_REQ_FLAG_SET);
343 msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz);
344 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
348 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
349 msg->pke_hdr.comn_req_flags =
350 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
351 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
353 qat_req->in.dec.d = ctx->dma_d;
354 qat_req->in.dec.n = ctx->dma_n;
358 * src can be of any size in valid range, but HW expects it to be the
359 * same as modulo n so in case it is different we need to allocate a
360 * new buf and copy src data.
361 * In other case we just need to map the user provided buffer.
363 if (req->src_len < ctx->key_sz) {
364 int shift = ctx->key_sz - req->src_len;
366 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
369 if (unlikely(!qat_req->src_align))
372 memcpy(qat_req->src_align + shift, req->src, req->src_len);
374 qat_req->src_align = NULL;
375 qat_req->in.dec.c = dma_map_single(dev, req->src, req->src_len,
378 qat_req->in.in_tab[3] = 0;
379 qat_req->out.dec.m = dma_map_single(dev, req->dst, req->dst_len,
381 qat_req->out.out_tab[1] = 0;
382 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c,
383 sizeof(struct qat_rsa_input_params),
385 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m,
386 sizeof(struct qat_rsa_output_params),
389 if (unlikely((!qat_req->src_align &&
390 dma_mapping_error(dev, qat_req->in.dec.c)) ||
391 dma_mapping_error(dev, qat_req->out.dec.m) ||
392 dma_mapping_error(dev, qat_req->phy_in) ||
393 dma_mapping_error(dev, qat_req->phy_out)))
396 msg->pke_mid.src_data_addr = qat_req->phy_in;
397 msg->pke_mid.dest_data_addr = qat_req->phy_out;
398 msg->pke_mid.opaque = (uint64_t)(__force long)req;
399 msg->input_param_count = 3;
400 msg->output_param_count = 1;
402 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
403 } while (ret == -EBUSY && ctr++ < 100);
408 if (qat_req->src_align)
409 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
412 if (!dma_mapping_error(dev, qat_req->in.dec.c))
413 dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
415 if (!dma_mapping_error(dev, qat_req->out.dec.m))
416 dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
418 if (!dma_mapping_error(dev, qat_req->phy_in))
419 dma_unmap_single(dev, qat_req->phy_in,
420 sizeof(struct qat_rsa_input_params),
422 if (!dma_mapping_error(dev, qat_req->phy_out))
423 dma_unmap_single(dev, qat_req->phy_out,
424 sizeof(struct qat_rsa_output_params),
429 int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
430 const void *value, size_t vlen)
432 struct qat_rsa_ctx *ctx = context;
433 struct qat_crypto_instance *inst = ctx->inst;
434 struct device *dev = &GET_DEV(inst->accel_dev);
435 const char *ptr = value;
438 while (!*ptr && vlen) {
445 /* In FIPS mode only allow key size 2K & 3K */
446 if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) {
447 pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
450 /* invalid key size provided */
451 if (!qat_rsa_enc_fn_id(ctx->key_sz))
455 ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
459 memcpy(ctx->n, ptr, ctx->key_sz);
467 int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
468 const void *value, size_t vlen)
470 struct qat_rsa_ctx *ctx = context;
471 struct qat_crypto_instance *inst = ctx->inst;
472 struct device *dev = &GET_DEV(inst->accel_dev);
473 const char *ptr = value;
475 while (!*ptr && vlen) {
480 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
485 ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
490 memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
494 int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
495 const void *value, size_t vlen)
497 struct qat_rsa_ctx *ctx = context;
498 struct qat_crypto_instance *inst = ctx->inst;
499 struct device *dev = &GET_DEV(inst->accel_dev);
500 const char *ptr = value;
503 while (!*ptr && vlen) {
509 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
512 /* In FIPS mode only allow key size 2K & 3K */
513 if (fips_enabled && (vlen != 256 && vlen != 384)) {
514 pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
519 ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
523 memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
530 static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
533 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
534 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
537 /* Free the old key if any */
539 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
541 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
543 memset(ctx->d, '\0', ctx->key_sz);
544 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
550 ret = asn1_ber_decoder(&qat_rsakey_decoder, ctx, key, keylen);
554 if (!ctx->n || !ctx->e) {
555 /* invalid key provided */
563 memset(ctx->d, '\0', ctx->key_sz);
564 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
568 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
572 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
579 static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
581 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
582 struct qat_crypto_instance *inst =
583 qat_crypto_get_instance_node(get_current_node());
593 static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
595 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
596 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
599 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
601 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
603 memset(ctx->d, '\0', ctx->key_sz);
604 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
606 qat_crypto_put_instance(ctx->inst);
612 static struct akcipher_alg rsa = {
613 .encrypt = qat_rsa_enc,
614 .decrypt = qat_rsa_dec,
616 .verify = qat_rsa_enc,
617 .setkey = qat_rsa_setkey,
618 .init = qat_rsa_init_tfm,
619 .exit = qat_rsa_exit_tfm,
620 .reqsize = sizeof(struct qat_rsa_request) + 64,
623 .cra_driver_name = "qat-rsa",
624 .cra_priority = 1000,
625 .cra_module = THIS_MODULE,
626 .cra_ctxsize = sizeof(struct qat_rsa_ctx),
630 int qat_asym_algs_register(void)
632 rsa.base.cra_flags = 0;
633 return crypto_register_akcipher(&rsa);
636 void qat_asym_algs_unregister(void)
638 crypto_unregister_akcipher(&rsa);