1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <crypto/internal/rsa.h>
5 #include <crypto/internal/akcipher.h>
6 #include <crypto/akcipher.h>
7 #include <crypto/kpp.h>
8 #include <crypto/internal/kpp.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/fips.h>
12 #include <crypto/scatterwalk.h>
13 #include "icp_qat_fw_pke.h"
14 #include "adf_accel_devices.h"
15 #include "qat_algs_send.h"
16 #include "adf_transport.h"
17 #include "adf_common_drv.h"
18 #include "qat_crypto.h"
20 static DEFINE_MUTEX(algs_lock);
21 static unsigned int active_devs;
23 struct qat_rsa_input_params {
45 } __packed __aligned(64);
47 struct qat_rsa_output_params {
57 } __packed __aligned(64);
78 struct qat_crypto_instance *inst;
79 } __packed __aligned(64);
81 struct qat_dh_input_params {
94 } __packed __aligned(64);
96 struct qat_dh_output_params {
101 } __packed __aligned(64);
112 struct qat_crypto_instance *inst;
113 } __packed __aligned(64);
115 struct qat_asym_request {
117 struct qat_rsa_input_params rsa;
118 struct qat_dh_input_params dh;
121 struct qat_rsa_output_params rsa;
122 struct qat_dh_output_params dh;
128 struct icp_qat_fw_pke_request req;
130 struct qat_rsa_ctx *rsa;
131 struct qat_dh_ctx *dh;
134 struct akcipher_request *rsa;
135 struct kpp_request *dh;
138 void (*cb)(struct icp_qat_fw_pke_resp *resp);
139 struct qat_alg_req alg_req;
142 static int qat_alg_send_asym_message(struct qat_asym_request *qat_req,
143 struct qat_crypto_instance *inst,
144 struct crypto_async_request *base)
146 struct qat_alg_req *alg_req = &qat_req->alg_req;
148 alg_req->fw_req = (u32 *)&qat_req->req;
149 alg_req->tx_ring = inst->pke_tx;
150 alg_req->base = base;
151 alg_req->backlog = &inst->backlog;
153 return qat_alg_send_message(alg_req);
156 static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
158 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
159 struct kpp_request *areq = req->areq.dh;
160 struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
161 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
162 resp->pke_resp_hdr.comn_resp_flags);
164 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
167 dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size,
169 kfree_sensitive(req->src_align);
172 areq->dst_len = req->ctx.dh->p_size;
173 if (req->dst_align) {
174 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
176 kfree_sensitive(req->dst_align);
179 dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
182 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
184 dma_unmap_single(dev, req->phy_out,
185 sizeof(struct qat_dh_output_params),
188 kpp_request_complete(areq, err);
191 #define PKE_DH_1536 0x390c1a49
192 #define PKE_DH_G2_1536 0x2e0b1a3e
193 #define PKE_DH_2048 0x4d0c1a60
194 #define PKE_DH_G2_2048 0x3e0b1a55
195 #define PKE_DH_3072 0x510c1a77
196 #define PKE_DH_G2_3072 0x3a0b1a6c
197 #define PKE_DH_4096 0x690c1a8e
198 #define PKE_DH_G2_4096 0x4a0b1a83
200 static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
202 unsigned int bitslen = len << 3;
206 return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
208 return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
210 return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
212 return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
218 static int qat_dh_compute_value(struct kpp_request *req)
220 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
221 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
222 struct qat_crypto_instance *inst = ctx->inst;
223 struct device *dev = &GET_DEV(inst->accel_dev);
224 struct qat_asym_request *qat_req =
225 PTR_ALIGN(kpp_request_ctx(req), 64);
226 struct icp_qat_fw_pke_request *msg = &qat_req->req;
227 gfp_t flags = qat_algs_alloc_flags(&req->base);
228 int n_input_params = 0;
232 if (unlikely(!ctx->xa))
235 if (req->dst_len < ctx->p_size) {
236 req->dst_len = ctx->p_size;
240 if (req->src_len > ctx->p_size)
243 memset(msg, '\0', sizeof(*msg));
244 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
245 ICP_QAT_FW_COMN_REQ_FLAG_SET);
247 msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
248 !req->src && ctx->g2);
249 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
252 qat_req->cb = qat_dh_cb;
253 qat_req->ctx.dh = ctx;
254 qat_req->areq.dh = req;
255 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
256 msg->pke_hdr.comn_req_flags =
257 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
258 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
261 * If no source is provided use g as base
264 qat_req->in.dh.in.xa = ctx->dma_xa;
265 qat_req->in.dh.in.p = ctx->dma_p;
269 qat_req->in.dh.in_g2.xa = ctx->dma_xa;
270 qat_req->in.dh.in_g2.p = ctx->dma_p;
273 qat_req->in.dh.in.b = ctx->dma_g;
274 qat_req->in.dh.in.xa = ctx->dma_xa;
275 qat_req->in.dh.in.p = ctx->dma_p;
283 * src can be of any size in valid range, but HW expects it to
284 * be the same as modulo p so in case it is different we need
285 * to allocate a new buf and copy src data.
286 * In other case we just need to map the user provided buffer.
287 * Also need to make sure that it is in contiguous buffer.
289 if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
290 qat_req->src_align = NULL;
291 vaddr = sg_virt(req->src);
293 int shift = ctx->p_size - req->src_len;
295 qat_req->src_align = kzalloc(ctx->p_size, flags);
296 if (unlikely(!qat_req->src_align))
299 scatterwalk_map_and_copy(qat_req->src_align + shift,
300 req->src, 0, req->src_len, 0);
302 vaddr = qat_req->src_align;
305 qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size,
307 if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b)))
311 * dst can be of any size in valid range, but HW expects it to be the
312 * same as modulo m so in case it is different we need to allocate a
313 * new buf and copy src data.
314 * In other case we just need to map the user provided buffer.
315 * Also need to make sure that it is in contiguous buffer.
317 if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
318 qat_req->dst_align = NULL;
319 vaddr = sg_virt(req->dst);
321 qat_req->dst_align = kzalloc(ctx->p_size, flags);
322 if (unlikely(!qat_req->dst_align))
325 vaddr = qat_req->dst_align;
327 qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size,
329 if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
332 qat_req->in.dh.in_tab[n_input_params] = 0;
333 qat_req->out.dh.out_tab[1] = 0;
334 /* Mapping in.in.b or in.in_g2.xa is the same */
335 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh,
336 sizeof(struct qat_dh_input_params),
338 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
341 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh,
342 sizeof(struct qat_dh_output_params),
344 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
345 goto unmap_in_params;
347 msg->pke_mid.src_data_addr = qat_req->phy_in;
348 msg->pke_mid.dest_data_addr = qat_req->phy_out;
349 msg->pke_mid.opaque = (u64)(__force long)qat_req;
350 msg->input_param_count = n_input_params;
351 msg->output_param_count = 1;
353 ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
360 if (!dma_mapping_error(dev, qat_req->phy_out))
361 dma_unmap_single(dev, qat_req->phy_out,
362 sizeof(struct qat_dh_output_params),
365 if (!dma_mapping_error(dev, qat_req->phy_in))
366 dma_unmap_single(dev, qat_req->phy_in,
367 sizeof(struct qat_dh_input_params),
370 if (!dma_mapping_error(dev, qat_req->out.dh.r))
371 dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
373 kfree_sensitive(qat_req->dst_align);
376 if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
377 dma_unmap_single(dev, qat_req->in.dh.in.b,
380 kfree_sensitive(qat_req->src_align);
385 static int qat_dh_check_params_length(unsigned int p_len)
397 static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
399 struct qat_crypto_instance *inst = ctx->inst;
400 struct device *dev = &GET_DEV(inst->accel_dev);
402 if (qat_dh_check_params_length(params->p_size << 3))
405 ctx->p_size = params->p_size;
406 ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
409 memcpy(ctx->p, params->p, ctx->p_size);
411 /* If g equals 2 don't copy it */
412 if (params->g_size == 1 && *(char *)params->g == 0x02) {
417 ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
420 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
426 static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
429 memset(ctx->g, 0, ctx->p_size);
430 dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
434 memset(ctx->xa, 0, ctx->p_size);
435 dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
439 memset(ctx->p, 0, ctx->p_size);
440 dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
447 static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
450 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
451 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
455 if (crypto_dh_decode_key(buf, len, ¶ms) < 0)
458 /* Free old secret if any */
459 qat_dh_clear_ctx(dev, ctx);
461 ret = qat_dh_set_params(ctx, ¶ms);
465 ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
471 memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
477 qat_dh_clear_ctx(dev, ctx);
481 static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
483 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
488 static int qat_dh_init_tfm(struct crypto_kpp *tfm)
490 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
491 struct qat_crypto_instance *inst =
492 qat_crypto_get_instance_node(numa_node_id());
503 static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
505 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
506 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
508 qat_dh_clear_ctx(dev, ctx);
509 qat_crypto_put_instance(ctx->inst);
512 static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
514 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
515 struct akcipher_request *areq = req->areq.rsa;
516 struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
517 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
518 resp->pke_resp_hdr.comn_resp_flags);
520 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
522 kfree_sensitive(req->src_align);
524 dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
527 areq->dst_len = req->ctx.rsa->key_sz;
528 if (req->dst_align) {
529 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
532 kfree_sensitive(req->dst_align);
535 dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
538 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
540 dma_unmap_single(dev, req->phy_out,
541 sizeof(struct qat_rsa_output_params),
544 akcipher_request_complete(areq, err);
547 void qat_alg_asym_callback(void *_resp)
549 struct icp_qat_fw_pke_resp *resp = _resp;
550 struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
551 struct qat_instance_backlog *backlog = areq->alg_req.backlog;
555 qat_alg_send_backlog(backlog);
558 #define PKE_RSA_EP_512 0x1c161b21
559 #define PKE_RSA_EP_1024 0x35111bf7
560 #define PKE_RSA_EP_1536 0x4d111cdc
561 #define PKE_RSA_EP_2048 0x6e111dba
562 #define PKE_RSA_EP_3072 0x7d111ea3
563 #define PKE_RSA_EP_4096 0xa5101f7e
565 static unsigned long qat_rsa_enc_fn_id(unsigned int len)
567 unsigned int bitslen = len << 3;
571 return PKE_RSA_EP_512;
573 return PKE_RSA_EP_1024;
575 return PKE_RSA_EP_1536;
577 return PKE_RSA_EP_2048;
579 return PKE_RSA_EP_3072;
581 return PKE_RSA_EP_4096;
587 #define PKE_RSA_DP1_512 0x1c161b3c
588 #define PKE_RSA_DP1_1024 0x35111c12
589 #define PKE_RSA_DP1_1536 0x4d111cf7
590 #define PKE_RSA_DP1_2048 0x6e111dda
591 #define PKE_RSA_DP1_3072 0x7d111ebe
592 #define PKE_RSA_DP1_4096 0xa5101f98
594 static unsigned long qat_rsa_dec_fn_id(unsigned int len)
596 unsigned int bitslen = len << 3;
600 return PKE_RSA_DP1_512;
602 return PKE_RSA_DP1_1024;
604 return PKE_RSA_DP1_1536;
606 return PKE_RSA_DP1_2048;
608 return PKE_RSA_DP1_3072;
610 return PKE_RSA_DP1_4096;
616 #define PKE_RSA_DP2_512 0x1c131b57
617 #define PKE_RSA_DP2_1024 0x26131c2d
618 #define PKE_RSA_DP2_1536 0x45111d12
619 #define PKE_RSA_DP2_2048 0x59121dfa
620 #define PKE_RSA_DP2_3072 0x81121ed9
621 #define PKE_RSA_DP2_4096 0xb1111fb2
623 static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
625 unsigned int bitslen = len << 3;
629 return PKE_RSA_DP2_512;
631 return PKE_RSA_DP2_1024;
633 return PKE_RSA_DP2_1536;
635 return PKE_RSA_DP2_2048;
637 return PKE_RSA_DP2_3072;
639 return PKE_RSA_DP2_4096;
645 static int qat_rsa_enc(struct akcipher_request *req)
647 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
648 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
649 struct qat_crypto_instance *inst = ctx->inst;
650 struct device *dev = &GET_DEV(inst->accel_dev);
651 struct qat_asym_request *qat_req =
652 PTR_ALIGN(akcipher_request_ctx(req), 64);
653 struct icp_qat_fw_pke_request *msg = &qat_req->req;
654 gfp_t flags = qat_algs_alloc_flags(&req->base);
658 if (unlikely(!ctx->n || !ctx->e))
661 if (req->dst_len < ctx->key_sz) {
662 req->dst_len = ctx->key_sz;
666 if (req->src_len > ctx->key_sz)
669 memset(msg, '\0', sizeof(*msg));
670 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
671 ICP_QAT_FW_COMN_REQ_FLAG_SET);
672 msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
673 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
676 qat_req->cb = qat_rsa_cb;
677 qat_req->ctx.rsa = ctx;
678 qat_req->areq.rsa = req;
679 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
680 msg->pke_hdr.comn_req_flags =
681 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
682 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
684 qat_req->in.rsa.enc.e = ctx->dma_e;
685 qat_req->in.rsa.enc.n = ctx->dma_n;
689 * src can be of any size in valid range, but HW expects it to be the
690 * same as modulo n so in case it is different we need to allocate a
691 * new buf and copy src data.
692 * In other case we just need to map the user provided buffer.
693 * Also need to make sure that it is in contiguous buffer.
695 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
696 qat_req->src_align = NULL;
697 vaddr = sg_virt(req->src);
699 int shift = ctx->key_sz - req->src_len;
701 qat_req->src_align = kzalloc(ctx->key_sz, flags);
702 if (unlikely(!qat_req->src_align))
705 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
707 vaddr = qat_req->src_align;
710 qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz,
712 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
715 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
716 qat_req->dst_align = NULL;
717 vaddr = sg_virt(req->dst);
719 qat_req->dst_align = kzalloc(ctx->key_sz, flags);
720 if (unlikely(!qat_req->dst_align))
722 vaddr = qat_req->dst_align;
725 qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz,
727 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
730 qat_req->in.rsa.in_tab[3] = 0;
731 qat_req->out.rsa.out_tab[1] = 0;
732 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
733 sizeof(struct qat_rsa_input_params),
735 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
738 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
739 sizeof(struct qat_rsa_output_params),
741 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
742 goto unmap_in_params;
744 msg->pke_mid.src_data_addr = qat_req->phy_in;
745 msg->pke_mid.dest_data_addr = qat_req->phy_out;
746 msg->pke_mid.opaque = (u64)(__force long)qat_req;
747 msg->input_param_count = 3;
748 msg->output_param_count = 1;
750 ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
757 if (!dma_mapping_error(dev, qat_req->phy_out))
758 dma_unmap_single(dev, qat_req->phy_out,
759 sizeof(struct qat_rsa_output_params),
762 if (!dma_mapping_error(dev, qat_req->phy_in))
763 dma_unmap_single(dev, qat_req->phy_in,
764 sizeof(struct qat_rsa_input_params),
767 if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
768 dma_unmap_single(dev, qat_req->out.rsa.enc.c,
769 ctx->key_sz, DMA_FROM_DEVICE);
770 kfree_sensitive(qat_req->dst_align);
772 if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
773 dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz,
775 kfree_sensitive(qat_req->src_align);
779 static int qat_rsa_dec(struct akcipher_request *req)
781 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
782 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
783 struct qat_crypto_instance *inst = ctx->inst;
784 struct device *dev = &GET_DEV(inst->accel_dev);
785 struct qat_asym_request *qat_req =
786 PTR_ALIGN(akcipher_request_ctx(req), 64);
787 struct icp_qat_fw_pke_request *msg = &qat_req->req;
788 gfp_t flags = qat_algs_alloc_flags(&req->base);
792 if (unlikely(!ctx->n || !ctx->d))
795 if (req->dst_len < ctx->key_sz) {
796 req->dst_len = ctx->key_sz;
800 if (req->src_len > ctx->key_sz)
803 memset(msg, '\0', sizeof(*msg));
804 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
805 ICP_QAT_FW_COMN_REQ_FLAG_SET);
806 msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
807 qat_rsa_dec_fn_id_crt(ctx->key_sz) :
808 qat_rsa_dec_fn_id(ctx->key_sz);
809 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
812 qat_req->cb = qat_rsa_cb;
813 qat_req->ctx.rsa = ctx;
814 qat_req->areq.rsa = req;
815 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
816 msg->pke_hdr.comn_req_flags =
817 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
818 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
821 qat_req->in.rsa.dec_crt.p = ctx->dma_p;
822 qat_req->in.rsa.dec_crt.q = ctx->dma_q;
823 qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
824 qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
825 qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
827 qat_req->in.rsa.dec.d = ctx->dma_d;
828 qat_req->in.rsa.dec.n = ctx->dma_n;
833 * src can be of any size in valid range, but HW expects it to be the
834 * same as modulo n so in case it is different we need to allocate a
835 * new buf and copy src data.
836 * In other case we just need to map the user provided buffer.
837 * Also need to make sure that it is in contiguous buffer.
839 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
840 qat_req->src_align = NULL;
841 vaddr = sg_virt(req->src);
843 int shift = ctx->key_sz - req->src_len;
845 qat_req->src_align = kzalloc(ctx->key_sz, flags);
846 if (unlikely(!qat_req->src_align))
849 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
851 vaddr = qat_req->src_align;
854 qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz,
856 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
859 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
860 qat_req->dst_align = NULL;
861 vaddr = sg_virt(req->dst);
863 qat_req->dst_align = kzalloc(ctx->key_sz, flags);
864 if (unlikely(!qat_req->dst_align))
866 vaddr = qat_req->dst_align;
868 qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz,
870 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
874 qat_req->in.rsa.in_tab[6] = 0;
876 qat_req->in.rsa.in_tab[3] = 0;
877 qat_req->out.rsa.out_tab[1] = 0;
878 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
879 sizeof(struct qat_rsa_input_params),
881 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
884 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
885 sizeof(struct qat_rsa_output_params),
887 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
888 goto unmap_in_params;
890 msg->pke_mid.src_data_addr = qat_req->phy_in;
891 msg->pke_mid.dest_data_addr = qat_req->phy_out;
892 msg->pke_mid.opaque = (u64)(__force long)qat_req;
894 msg->input_param_count = 6;
896 msg->input_param_count = 3;
898 msg->output_param_count = 1;
900 ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
907 if (!dma_mapping_error(dev, qat_req->phy_out))
908 dma_unmap_single(dev, qat_req->phy_out,
909 sizeof(struct qat_rsa_output_params),
912 if (!dma_mapping_error(dev, qat_req->phy_in))
913 dma_unmap_single(dev, qat_req->phy_in,
914 sizeof(struct qat_rsa_input_params),
917 if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
918 dma_unmap_single(dev, qat_req->out.rsa.dec.m,
919 ctx->key_sz, DMA_FROM_DEVICE);
920 kfree_sensitive(qat_req->dst_align);
922 if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
923 dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz,
925 kfree_sensitive(qat_req->src_align);
929 static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
932 struct qat_crypto_instance *inst = ctx->inst;
933 struct device *dev = &GET_DEV(inst->accel_dev);
934 const char *ptr = value;
937 while (!*ptr && vlen) {
944 /* invalid key size provided */
945 if (!qat_rsa_enc_fn_id(ctx->key_sz))
949 ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
953 memcpy(ctx->n, ptr, ctx->key_sz);
961 static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
964 struct qat_crypto_instance *inst = ctx->inst;
965 struct device *dev = &GET_DEV(inst->accel_dev);
966 const char *ptr = value;
968 while (!*ptr && vlen) {
973 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
978 ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
982 memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
986 static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
989 struct qat_crypto_instance *inst = ctx->inst;
990 struct device *dev = &GET_DEV(inst->accel_dev);
991 const char *ptr = value;
994 while (!*ptr && vlen) {
1000 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
1004 ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
1008 memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
1015 static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
1017 while (!**ptr && *len) {
1023 static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1025 struct qat_crypto_instance *inst = ctx->inst;
1026 struct device *dev = &GET_DEV(inst->accel_dev);
1029 unsigned int half_key_sz = ctx->key_sz / 2;
1033 len = rsa_key->p_sz;
1034 qat_rsa_drop_leading_zeros(&ptr, &len);
1037 ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
1040 memcpy(ctx->p + (half_key_sz - len), ptr, len);
1044 len = rsa_key->q_sz;
1045 qat_rsa_drop_leading_zeros(&ptr, &len);
1048 ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
1051 memcpy(ctx->q + (half_key_sz - len), ptr, len);
1055 len = rsa_key->dp_sz;
1056 qat_rsa_drop_leading_zeros(&ptr, &len);
1059 ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
1063 memcpy(ctx->dp + (half_key_sz - len), ptr, len);
1067 len = rsa_key->dq_sz;
1068 qat_rsa_drop_leading_zeros(&ptr, &len);
1071 ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
1075 memcpy(ctx->dq + (half_key_sz - len), ptr, len);
1078 ptr = rsa_key->qinv;
1079 len = rsa_key->qinv_sz;
1080 qat_rsa_drop_leading_zeros(&ptr, &len);
1083 ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
1087 memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
1089 ctx->crt_mode = true;
1093 memset(ctx->dq, '\0', half_key_sz);
1094 dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1097 memset(ctx->dp, '\0', half_key_sz);
1098 dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1101 memset(ctx->q, '\0', half_key_sz);
1102 dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1105 memset(ctx->p, '\0', half_key_sz);
1106 dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1109 ctx->crt_mode = false;
1112 static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
1114 unsigned int half_key_sz = ctx->key_sz / 2;
1116 /* Free the old key if any */
1118 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
1120 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
1122 memset(ctx->d, '\0', ctx->key_sz);
1123 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
1126 memset(ctx->p, '\0', half_key_sz);
1127 dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1130 memset(ctx->q, '\0', half_key_sz);
1131 dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1134 memset(ctx->dp, '\0', half_key_sz);
1135 dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1138 memset(ctx->dq, '\0', half_key_sz);
1139 dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1142 memset(ctx->qinv, '\0', half_key_sz);
1143 dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
1154 ctx->crt_mode = false;
1158 static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
1159 unsigned int keylen, bool private)
1161 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1162 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1163 struct rsa_key rsa_key;
1166 qat_rsa_clear_ctx(dev, ctx);
1169 ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1171 ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1175 ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
1178 ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1182 ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1185 qat_rsa_setkey_crt(ctx, &rsa_key);
1188 if (!ctx->n || !ctx->e) {
1189 /* invalid key provided */
1193 if (private && !ctx->d) {
1194 /* invalid private key provided */
1201 qat_rsa_clear_ctx(dev, ctx);
1205 static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1206 unsigned int keylen)
1208 return qat_rsa_setkey(tfm, key, keylen, false);
1211 static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1212 unsigned int keylen)
1214 return qat_rsa_setkey(tfm, key, keylen, true);
1217 static unsigned int qat_rsa_max_size(struct crypto_akcipher *tfm)
1219 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1224 static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
1226 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1227 struct qat_crypto_instance *inst =
1228 qat_crypto_get_instance_node(numa_node_id());
1238 static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
1240 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1241 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1243 qat_rsa_clear_ctx(dev, ctx);
1244 qat_crypto_put_instance(ctx->inst);
1247 static struct akcipher_alg rsa = {
1248 .encrypt = qat_rsa_enc,
1249 .decrypt = qat_rsa_dec,
1250 .set_pub_key = qat_rsa_setpubkey,
1251 .set_priv_key = qat_rsa_setprivkey,
1252 .max_size = qat_rsa_max_size,
1253 .init = qat_rsa_init_tfm,
1254 .exit = qat_rsa_exit_tfm,
1255 .reqsize = sizeof(struct qat_asym_request) + 64,
1258 .cra_driver_name = "qat-rsa",
1259 .cra_priority = 1000,
1260 .cra_module = THIS_MODULE,
1261 .cra_ctxsize = sizeof(struct qat_rsa_ctx),
1265 static struct kpp_alg dh = {
1266 .set_secret = qat_dh_set_secret,
1267 .generate_public_key = qat_dh_compute_value,
1268 .compute_shared_secret = qat_dh_compute_value,
1269 .max_size = qat_dh_max_size,
1270 .init = qat_dh_init_tfm,
1271 .exit = qat_dh_exit_tfm,
1272 .reqsize = sizeof(struct qat_asym_request) + 64,
1275 .cra_driver_name = "qat-dh",
1276 .cra_priority = 1000,
1277 .cra_module = THIS_MODULE,
1278 .cra_ctxsize = sizeof(struct qat_dh_ctx),
1282 int qat_asym_algs_register(void)
1286 mutex_lock(&algs_lock);
1287 if (++active_devs == 1) {
1288 rsa.base.cra_flags = 0;
1289 ret = crypto_register_akcipher(&rsa);
1292 ret = crypto_register_kpp(&dh);
1295 mutex_unlock(&algs_lock);
1299 void qat_asym_algs_unregister(void)
1301 mutex_lock(&algs_lock);
1302 if (--active_devs == 0) {
1303 crypto_unregister_akcipher(&rsa);
1304 crypto_unregister_kpp(&dh);
1306 mutex_unlock(&algs_lock);