blob: 3d56fb82f48a9ddf09bec5bd6a19fba713aa2557 [file] [log] [blame]
Tadeusz Struka9905322015-07-15 15:28:38 -07001/*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*/
47
48#include <linux/module.h>
49#include <crypto/internal/rsa.h>
50#include <crypto/internal/akcipher.h>
51#include <crypto/akcipher.h>
Salvatore Benedettoc9839142016-07-07 15:27:29 +010052#include <crypto/kpp.h>
53#include <crypto/internal/kpp.h>
54#include <crypto/dh.h>
Tadeusz Struka9905322015-07-15 15:28:38 -070055#include <linux/dma-mapping.h>
56#include <linux/fips.h>
Tadeusz Struk22287b02015-10-08 09:26:55 -070057#include <crypto/scatterwalk.h>
Tadeusz Struka9905322015-07-15 15:28:38 -070058#include "icp_qat_fw_pke.h"
59#include "adf_accel_devices.h"
60#include "adf_transport.h"
61#include "adf_common_drv.h"
62#include "qat_crypto.h"
63
Tadeusz Struk8f5ea2d2015-07-21 22:07:47 -070064static DEFINE_MUTEX(algs_lock);
65static unsigned int active_devs;
66
Tadeusz Struka9905322015-07-15 15:28:38 -070067struct qat_rsa_input_params {
68 union {
69 struct {
70 dma_addr_t m;
71 dma_addr_t e;
72 dma_addr_t n;
73 } enc;
74 struct {
75 dma_addr_t c;
76 dma_addr_t d;
77 dma_addr_t n;
78 } dec;
Salvatore Benedetto879f77e2016-07-04 17:21:40 +010079 struct {
80 dma_addr_t c;
81 dma_addr_t p;
82 dma_addr_t q;
83 dma_addr_t dp;
84 dma_addr_t dq;
85 dma_addr_t qinv;
86 } dec_crt;
Tadeusz Struka9905322015-07-15 15:28:38 -070087 u64 in_tab[8];
88 };
89} __packed __aligned(64);
90
91struct qat_rsa_output_params {
92 union {
93 struct {
94 dma_addr_t c;
95 } enc;
96 struct {
97 dma_addr_t m;
98 } dec;
99 u64 out_tab[8];
100 };
101} __packed __aligned(64);
102
103struct qat_rsa_ctx {
104 char *n;
105 char *e;
106 char *d;
Salvatore Benedetto879f77e2016-07-04 17:21:40 +0100107 char *p;
108 char *q;
109 char *dp;
110 char *dq;
111 char *qinv;
Tadeusz Struka9905322015-07-15 15:28:38 -0700112 dma_addr_t dma_n;
113 dma_addr_t dma_e;
114 dma_addr_t dma_d;
Salvatore Benedetto879f77e2016-07-04 17:21:40 +0100115 dma_addr_t dma_p;
116 dma_addr_t dma_q;
117 dma_addr_t dma_dp;
118 dma_addr_t dma_dq;
119 dma_addr_t dma_qinv;
Tadeusz Struka9905322015-07-15 15:28:38 -0700120 unsigned int key_sz;
Salvatore Benedetto879f77e2016-07-04 17:21:40 +0100121 bool crt_mode;
Tadeusz Struka9905322015-07-15 15:28:38 -0700122 struct qat_crypto_instance *inst;
123} __packed __aligned(64);
124
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100125struct qat_dh_input_params {
126 union {
127 struct {
128 dma_addr_t b;
129 dma_addr_t xa;
130 dma_addr_t p;
131 } in;
132 struct {
133 dma_addr_t xa;
134 dma_addr_t p;
135 } in_g2;
136 u64 in_tab[8];
137 };
138} __packed __aligned(64);
139
140struct qat_dh_output_params {
141 union {
142 dma_addr_t r;
143 u64 out_tab[8];
144 };
145} __packed __aligned(64);
146
147struct qat_dh_ctx {
148 char *g;
149 char *xa;
150 char *p;
151 dma_addr_t dma_g;
152 dma_addr_t dma_xa;
153 dma_addr_t dma_p;
154 unsigned int p_size;
155 bool g2;
156 struct qat_crypto_instance *inst;
157} __packed __aligned(64);
158
159struct qat_asym_request {
160 union {
161 struct qat_rsa_input_params rsa;
162 struct qat_dh_input_params dh;
163 } in;
164 union {
165 struct qat_rsa_output_params rsa;
166 struct qat_dh_output_params dh;
167 } out;
Tadeusz Struka9905322015-07-15 15:28:38 -0700168 dma_addr_t phy_in;
169 dma_addr_t phy_out;
170 char *src_align;
Tadeusz Struk22287b02015-10-08 09:26:55 -0700171 char *dst_align;
Tadeusz Struka9905322015-07-15 15:28:38 -0700172 struct icp_qat_fw_pke_request req;
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100173 union {
174 struct qat_rsa_ctx *rsa;
175 struct qat_dh_ctx *dh;
176 } ctx;
177 union {
178 struct akcipher_request *rsa;
179 struct kpp_request *dh;
180 } areq;
Tadeusz Struka9905322015-07-15 15:28:38 -0700181 int err;
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100182 void (*cb)(struct icp_qat_fw_pke_resp *resp);
Tadeusz Struka9905322015-07-15 15:28:38 -0700183} __aligned(64);
184
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100185static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
186{
187 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
188 struct kpp_request *areq = req->areq.dh;
189 struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
190 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
191 resp->pke_resp_hdr.comn_resp_flags);
192
193 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
194
195 if (areq->src) {
196 if (req->src_align)
197 dma_free_coherent(dev, req->ctx.dh->p_size,
198 req->src_align, req->in.dh.in.b);
199 else
200 dma_unmap_single(dev, req->in.dh.in.b,
201 req->ctx.dh->p_size, DMA_TO_DEVICE);
202 }
203
204 areq->dst_len = req->ctx.dh->p_size;
205 if (req->dst_align) {
206 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
207 areq->dst_len, 1);
208
209 dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
210 req->out.dh.r);
211 } else {
212 dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
213 DMA_FROM_DEVICE);
214 }
215
216 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
217 DMA_TO_DEVICE);
218 dma_unmap_single(dev, req->phy_out,
219 sizeof(struct qat_dh_output_params),
220 DMA_TO_DEVICE);
221
222 kpp_request_complete(areq, err);
223}
224
225#define PKE_DH_1536 0x390c1a49
226#define PKE_DH_G2_1536 0x2e0b1a3e
227#define PKE_DH_2048 0x4d0c1a60
228#define PKE_DH_G2_2048 0x3e0b1a55
229#define PKE_DH_3072 0x510c1a77
230#define PKE_DH_G2_3072 0x3a0b1a6c
231#define PKE_DH_4096 0x690c1a8e
232#define PKE_DH_G2_4096 0x4a0b1a83
233
234static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
235{
236 unsigned int bitslen = len << 3;
237
238 switch (bitslen) {
239 case 1536:
240 return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
241 case 2048:
242 return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
243 case 3072:
244 return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
245 case 4096:
246 return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
247 default:
248 return 0;
249 };
250}
251
252static inline struct qat_dh_ctx *qat_dh_get_params(struct crypto_kpp *tfm)
253{
254 return kpp_tfm_ctx(tfm);
255}
256
257static int qat_dh_compute_value(struct kpp_request *req)
258{
259 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
260 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
261 struct qat_crypto_instance *inst = ctx->inst;
262 struct device *dev = &GET_DEV(inst->accel_dev);
263 struct qat_asym_request *qat_req =
264 PTR_ALIGN(kpp_request_ctx(req), 64);
265 struct icp_qat_fw_pke_request *msg = &qat_req->req;
266 int ret, ctr = 0;
267 int n_input_params = 0;
268
269 if (unlikely(!ctx->xa))
270 return -EINVAL;
271
272 if (req->dst_len < ctx->p_size) {
273 req->dst_len = ctx->p_size;
274 return -EOVERFLOW;
275 }
276 memset(msg, '\0', sizeof(*msg));
277 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
278 ICP_QAT_FW_COMN_REQ_FLAG_SET);
279
280 msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
281 !req->src && ctx->g2);
282 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
283 return -EINVAL;
284
285 qat_req->cb = qat_dh_cb;
286 qat_req->ctx.dh = ctx;
287 qat_req->areq.dh = req;
288 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
289 msg->pke_hdr.comn_req_flags =
290 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
291 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
292
293 /*
294 * If no source is provided use g as base
295 */
296 if (req->src) {
297 qat_req->in.dh.in.xa = ctx->dma_xa;
298 qat_req->in.dh.in.p = ctx->dma_p;
299 n_input_params = 3;
300 } else {
301 if (ctx->g2) {
302 qat_req->in.dh.in_g2.xa = ctx->dma_xa;
303 qat_req->in.dh.in_g2.p = ctx->dma_p;
304 n_input_params = 2;
305 } else {
306 qat_req->in.dh.in.b = ctx->dma_g;
307 qat_req->in.dh.in.xa = ctx->dma_xa;
308 qat_req->in.dh.in.p = ctx->dma_p;
309 n_input_params = 3;
310 }
311 }
312
313 ret = -ENOMEM;
314 if (req->src) {
315 /*
316 * src can be of any size in valid range, but HW expects it to
317 * be the same as modulo p so in case it is different we need
318 * to allocate a new buf and copy src data.
319 * In other case we just need to map the user provided buffer.
320 * Also need to make sure that it is in contiguous buffer.
321 */
322 if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
323 qat_req->src_align = NULL;
324 qat_req->in.dh.in.b = dma_map_single(dev,
325 sg_virt(req->src),
326 req->src_len,
327 DMA_TO_DEVICE);
328 if (unlikely(dma_mapping_error(dev,
329 qat_req->in.dh.in.b)))
330 return ret;
331
332 } else {
333 int shift = ctx->p_size - req->src_len;
334
335 qat_req->src_align = dma_zalloc_coherent(dev,
336 ctx->p_size,
337 &qat_req->in.dh.in.b,
338 GFP_KERNEL);
339 if (unlikely(!qat_req->src_align))
340 return ret;
341
342 scatterwalk_map_and_copy(qat_req->src_align + shift,
343 req->src, 0, req->src_len, 0);
344 }
345 }
346 /*
347 * dst can be of any size in valid range, but HW expects it to be the
348 * same as modulo m so in case it is different we need to allocate a
349 * new buf and copy src data.
350 * In other case we just need to map the user provided buffer.
351 * Also need to make sure that it is in contiguous buffer.
352 */
353 if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
354 qat_req->dst_align = NULL;
355 qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
356 req->dst_len,
357 DMA_FROM_DEVICE);
358
359 if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
360 goto unmap_src;
361
362 } else {
363 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size,
364 &qat_req->out.dh.r,
365 GFP_KERNEL);
366 if (unlikely(!qat_req->dst_align))
367 goto unmap_src;
368 }
369
370 qat_req->in.dh.in_tab[n_input_params] = 0;
371 qat_req->out.dh.out_tab[1] = 0;
372 /* Mapping in.in.b or in.in_g2.xa is the same */
373 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
374 sizeof(struct qat_dh_input_params),
375 DMA_TO_DEVICE);
376 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
377 goto unmap_dst;
378
379 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
380 sizeof(struct qat_dh_output_params),
381 DMA_TO_DEVICE);
382 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
383 goto unmap_in_params;
384
385 msg->pke_mid.src_data_addr = qat_req->phy_in;
386 msg->pke_mid.dest_data_addr = qat_req->phy_out;
387 msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
388 msg->input_param_count = n_input_params;
389 msg->output_param_count = 1;
390
391 do {
392 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
393 } while (ret == -EBUSY && ctr++ < 100);
394
395 if (!ret)
396 return -EINPROGRESS;
397
398 if (!dma_mapping_error(dev, qat_req->phy_out))
399 dma_unmap_single(dev, qat_req->phy_out,
400 sizeof(struct qat_dh_output_params),
401 DMA_TO_DEVICE);
402unmap_in_params:
403 if (!dma_mapping_error(dev, qat_req->phy_in))
404 dma_unmap_single(dev, qat_req->phy_in,
405 sizeof(struct qat_dh_input_params),
406 DMA_TO_DEVICE);
407unmap_dst:
408 if (qat_req->dst_align)
409 dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
410 qat_req->out.dh.r);
411 else
412 if (!dma_mapping_error(dev, qat_req->out.dh.r))
413 dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
414 DMA_FROM_DEVICE);
415unmap_src:
416 if (req->src) {
417 if (qat_req->src_align)
418 dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
419 qat_req->in.dh.in.b);
420 else
421 if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
422 dma_unmap_single(dev, qat_req->in.dh.in.b,
423 ctx->p_size,
424 DMA_TO_DEVICE);
425 }
426 return ret;
427}
428
429static int qat_dh_check_params_length(unsigned int p_len)
430{
431 switch (p_len) {
432 case 1536:
433 case 2048:
434 case 3072:
435 case 4096:
436 return 0;
437 }
438 return -EINVAL;
439}
440
441static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
442{
443 struct qat_crypto_instance *inst = ctx->inst;
444 struct device *dev = &GET_DEV(inst->accel_dev);
445
446 if (unlikely(!params->p || !params->g))
447 return -EINVAL;
448
449 if (qat_dh_check_params_length(params->p_size << 3))
450 return -EINVAL;
451
452 ctx->p_size = params->p_size;
453 ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
454 if (!ctx->p)
455 return -ENOMEM;
456 memcpy(ctx->p, params->p, ctx->p_size);
457
458 /* If g equals 2 don't copy it */
459 if (params->g_size == 1 && *(char *)params->g == 0x02) {
460 ctx->g2 = true;
461 return 0;
462 }
463
464 ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
465 if (!ctx->g) {
466 dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
467 ctx->p = NULL;
468 return -ENOMEM;
469 }
470 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
471 params->g_size);
472
473 return 0;
474}
475
476static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
477{
478 if (ctx->g) {
479 dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
480 ctx->g = NULL;
481 }
482 if (ctx->xa) {
483 dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
484 ctx->xa = NULL;
485 }
486 if (ctx->p) {
487 dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
488 ctx->p = NULL;
489 }
490 ctx->p_size = 0;
491 ctx->g2 = false;
492}
493
494static int qat_dh_set_secret(struct crypto_kpp *tfm, void *buf,
495 unsigned int len)
496{
497 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
498 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
499 struct dh params;
500 int ret;
501
502 if (crypto_dh_decode_key(buf, len, &params) < 0)
503 return -EINVAL;
504
505 /* Free old secret if any */
506 qat_dh_clear_ctx(dev, ctx);
507
508 ret = qat_dh_set_params(ctx, &params);
509 if (ret < 0)
510 return ret;
511
512 ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
513 GFP_KERNEL);
514 if (!ctx->xa) {
515 qat_dh_clear_ctx(dev, ctx);
516 return -ENOMEM;
517 }
518 memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
519 params.key_size);
520
521 return 0;
522}
523
524static int qat_dh_max_size(struct crypto_kpp *tfm)
525{
526 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
527
528 return ctx->p ? ctx->p_size : -EINVAL;
529}
530
531static int qat_dh_init_tfm(struct crypto_kpp *tfm)
532{
533 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
534 struct qat_crypto_instance *inst =
535 qat_crypto_get_instance_node(get_current_node());
536
537 if (!inst)
538 return -EINVAL;
539
540 ctx->p_size = 0;
541 ctx->g2 = false;
542 ctx->inst = inst;
543 return 0;
544}
545
546static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
547{
548 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
549 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
550
551 qat_dh_clear_ctx(dev, ctx);
552 qat_crypto_put_instance(ctx->inst);
553}
554
Tadeusz Struka9905322015-07-15 15:28:38 -0700555static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
556{
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100557 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
558 struct akcipher_request *areq = req->areq.rsa;
559 struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
Tadeusz Struka9905322015-07-15 15:28:38 -0700560 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
561 resp->pke_resp_hdr.comn_resp_flags);
Tadeusz Struka9905322015-07-15 15:28:38 -0700562
563 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
564
565 if (req->src_align)
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100566 dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
567 req->in.rsa.enc.m);
Tadeusz Struka9905322015-07-15 15:28:38 -0700568 else
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100569 dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
Tadeusz Struka9905322015-07-15 15:28:38 -0700570 DMA_TO_DEVICE);
571
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100572 areq->dst_len = req->ctx.rsa->key_sz;
Tadeusz Struk22287b02015-10-08 09:26:55 -0700573 if (req->dst_align) {
574 char *ptr = req->dst_align;
575
576 while (!(*ptr) && areq->dst_len) {
577 areq->dst_len--;
578 ptr++;
579 }
580
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100581 if (areq->dst_len != req->ctx.rsa->key_sz)
Tadeusz Struk22287b02015-10-08 09:26:55 -0700582 memmove(req->dst_align, ptr, areq->dst_len);
583
584 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
585 areq->dst_len, 1);
586
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100587 dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
588 req->out.rsa.enc.c);
Tadeusz Struk22287b02015-10-08 09:26:55 -0700589 } else {
590 char *ptr = sg_virt(areq->dst);
591
592 while (!(*ptr) && areq->dst_len) {
593 areq->dst_len--;
594 ptr++;
595 }
596
597 if (sg_virt(areq->dst) != ptr && areq->dst_len)
598 memmove(sg_virt(areq->dst), ptr, areq->dst_len);
599
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100600 dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
Tadeusz Struk22287b02015-10-08 09:26:55 -0700601 DMA_FROM_DEVICE);
602 }
603
Tadeusz Struka9905322015-07-15 15:28:38 -0700604 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
605 DMA_TO_DEVICE);
606 dma_unmap_single(dev, req->phy_out,
607 sizeof(struct qat_rsa_output_params),
608 DMA_TO_DEVICE);
609
Tadeusz Struka9905322015-07-15 15:28:38 -0700610 akcipher_request_complete(areq, err);
611}
612
613void qat_alg_asym_callback(void *_resp)
614{
615 struct icp_qat_fw_pke_resp *resp = _resp;
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100616 struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
Tadeusz Struka9905322015-07-15 15:28:38 -0700617
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100618 areq->cb(resp);
Tadeusz Struka9905322015-07-15 15:28:38 -0700619}
620
621#define PKE_RSA_EP_512 0x1c161b21
622#define PKE_RSA_EP_1024 0x35111bf7
623#define PKE_RSA_EP_1536 0x4d111cdc
624#define PKE_RSA_EP_2048 0x6e111dba
625#define PKE_RSA_EP_3072 0x7d111ea3
626#define PKE_RSA_EP_4096 0xa5101f7e
627
628static unsigned long qat_rsa_enc_fn_id(unsigned int len)
629{
630 unsigned int bitslen = len << 3;
631
632 switch (bitslen) {
633 case 512:
634 return PKE_RSA_EP_512;
635 case 1024:
636 return PKE_RSA_EP_1024;
637 case 1536:
638 return PKE_RSA_EP_1536;
639 case 2048:
640 return PKE_RSA_EP_2048;
641 case 3072:
642 return PKE_RSA_EP_3072;
643 case 4096:
644 return PKE_RSA_EP_4096;
645 default:
646 return 0;
647 };
648}
649
650#define PKE_RSA_DP1_512 0x1c161b3c
651#define PKE_RSA_DP1_1024 0x35111c12
652#define PKE_RSA_DP1_1536 0x4d111cf7
653#define PKE_RSA_DP1_2048 0x6e111dda
654#define PKE_RSA_DP1_3072 0x7d111ebe
655#define PKE_RSA_DP1_4096 0xa5101f98
656
657static unsigned long qat_rsa_dec_fn_id(unsigned int len)
658{
659 unsigned int bitslen = len << 3;
660
661 switch (bitslen) {
662 case 512:
663 return PKE_RSA_DP1_512;
664 case 1024:
665 return PKE_RSA_DP1_1024;
666 case 1536:
667 return PKE_RSA_DP1_1536;
668 case 2048:
669 return PKE_RSA_DP1_2048;
670 case 3072:
671 return PKE_RSA_DP1_3072;
672 case 4096:
673 return PKE_RSA_DP1_4096;
674 default:
675 return 0;
676 };
677}
678
Salvatore Benedetto879f77e2016-07-04 17:21:40 +0100679#define PKE_RSA_DP2_512 0x1c131b57
680#define PKE_RSA_DP2_1024 0x26131c2d
681#define PKE_RSA_DP2_1536 0x45111d12
682#define PKE_RSA_DP2_2048 0x59121dfa
683#define PKE_RSA_DP2_3072 0x81121ed9
684#define PKE_RSA_DP2_4096 0xb1111fb2
685
686static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
687{
688 unsigned int bitslen = len << 3;
689
690 switch (bitslen) {
691 case 512:
692 return PKE_RSA_DP2_512;
693 case 1024:
694 return PKE_RSA_DP2_1024;
695 case 1536:
696 return PKE_RSA_DP2_1536;
697 case 2048:
698 return PKE_RSA_DP2_2048;
699 case 3072:
700 return PKE_RSA_DP2_3072;
701 case 4096:
702 return PKE_RSA_DP2_4096;
703 default:
704 return 0;
705 };
706}
707
Tadeusz Struka9905322015-07-15 15:28:38 -0700708static int qat_rsa_enc(struct akcipher_request *req)
709{
710 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
711 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
712 struct qat_crypto_instance *inst = ctx->inst;
713 struct device *dev = &GET_DEV(inst->accel_dev);
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100714 struct qat_asym_request *qat_req =
Tadeusz Struka9905322015-07-15 15:28:38 -0700715 PTR_ALIGN(akcipher_request_ctx(req), 64);
716 struct icp_qat_fw_pke_request *msg = &qat_req->req;
717 int ret, ctr = 0;
718
719 if (unlikely(!ctx->n || !ctx->e))
720 return -EINVAL;
721
722 if (req->dst_len < ctx->key_sz) {
723 req->dst_len = ctx->key_sz;
724 return -EOVERFLOW;
725 }
726 memset(msg, '\0', sizeof(*msg));
727 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
728 ICP_QAT_FW_COMN_REQ_FLAG_SET);
729 msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
730 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
731 return -EINVAL;
732
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100733 qat_req->cb = qat_rsa_cb;
734 qat_req->ctx.rsa = ctx;
735 qat_req->areq.rsa = req;
Tadeusz Struka9905322015-07-15 15:28:38 -0700736 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
737 msg->pke_hdr.comn_req_flags =
738 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
739 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
740
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100741 qat_req->in.rsa.enc.e = ctx->dma_e;
742 qat_req->in.rsa.enc.n = ctx->dma_n;
Tadeusz Struka9905322015-07-15 15:28:38 -0700743 ret = -ENOMEM;
744
745 /*
746 * src can be of any size in valid range, but HW expects it to be the
747 * same as modulo n so in case it is different we need to allocate a
748 * new buf and copy src data.
749 * In other case we just need to map the user provided buffer.
Tadeusz Struk22287b02015-10-08 09:26:55 -0700750 * Also need to make sure that it is in contiguous buffer.
Tadeusz Struka9905322015-07-15 15:28:38 -0700751 */
Tadeusz Struk22287b02015-10-08 09:26:55 -0700752 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
753 qat_req->src_align = NULL;
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100754 qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
Tadeusz Struk22287b02015-10-08 09:26:55 -0700755 req->src_len, DMA_TO_DEVICE);
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100756 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
Tadeusz Struk22287b02015-10-08 09:26:55 -0700757 return ret;
758
759 } else {
Tadeusz Struka9905322015-07-15 15:28:38 -0700760 int shift = ctx->key_sz - req->src_len;
761
762 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100763 &qat_req->in.rsa.enc.m,
Tadeusz Struka9905322015-07-15 15:28:38 -0700764 GFP_KERNEL);
765 if (unlikely(!qat_req->src_align))
766 return ret;
767
Tadeusz Struk22287b02015-10-08 09:26:55 -0700768 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
769 0, req->src_len, 0);
770 }
771 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
772 qat_req->dst_align = NULL;
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100773 qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
774 req->dst_len,
775 DMA_FROM_DEVICE);
Tadeusz Struk22287b02015-10-08 09:26:55 -0700776
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100777 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
Tadeusz Struk22287b02015-10-08 09:26:55 -0700778 goto unmap_src;
779
Tadeusz Struka9905322015-07-15 15:28:38 -0700780 } else {
Tadeusz Struk22287b02015-10-08 09:26:55 -0700781 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100782 &qat_req->out.rsa.enc.c,
Tadeusz Struk22287b02015-10-08 09:26:55 -0700783 GFP_KERNEL);
784 if (unlikely(!qat_req->dst_align))
785 goto unmap_src;
786
Tadeusz Struka9905322015-07-15 15:28:38 -0700787 }
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100788 qat_req->in.rsa.in_tab[3] = 0;
789 qat_req->out.rsa.out_tab[1] = 0;
790 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
Tadeusz Struka9905322015-07-15 15:28:38 -0700791 sizeof(struct qat_rsa_input_params),
792 DMA_TO_DEVICE);
Tadeusz Struk22287b02015-10-08 09:26:55 -0700793 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
794 goto unmap_dst;
795
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100796 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
Tadeusz Struka9905322015-07-15 15:28:38 -0700797 sizeof(struct qat_rsa_output_params),
Tadeusz Struk22287b02015-10-08 09:26:55 -0700798 DMA_TO_DEVICE);
799 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
800 goto unmap_in_params;
Tadeusz Struka9905322015-07-15 15:28:38 -0700801
802 msg->pke_mid.src_data_addr = qat_req->phy_in;
803 msg->pke_mid.dest_data_addr = qat_req->phy_out;
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100804 msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
Tadeusz Struka9905322015-07-15 15:28:38 -0700805 msg->input_param_count = 3;
806 msg->output_param_count = 1;
807 do {
808 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
809 } while (ret == -EBUSY && ctr++ < 100);
810
811 if (!ret)
812 return -EINPROGRESS;
Tadeusz Struk26d52ea2016-02-10 14:59:44 -0800813
814 if (!dma_mapping_error(dev, qat_req->phy_out))
815 dma_unmap_single(dev, qat_req->phy_out,
816 sizeof(struct qat_rsa_output_params),
817 DMA_TO_DEVICE);
818unmap_in_params:
819 if (!dma_mapping_error(dev, qat_req->phy_in))
820 dma_unmap_single(dev, qat_req->phy_in,
821 sizeof(struct qat_rsa_input_params),
822 DMA_TO_DEVICE);
Tadeusz Struk22287b02015-10-08 09:26:55 -0700823unmap_dst:
824 if (qat_req->dst_align)
825 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100826 qat_req->out.rsa.enc.c);
Tadeusz Struk22287b02015-10-08 09:26:55 -0700827 else
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100828 if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
829 dma_unmap_single(dev, qat_req->out.rsa.enc.c,
830 ctx->key_sz, DMA_FROM_DEVICE);
Tadeusz Struk26d52ea2016-02-10 14:59:44 -0800831unmap_src:
832 if (qat_req->src_align)
833 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100834 qat_req->in.rsa.enc.m);
Tadeusz Struk26d52ea2016-02-10 14:59:44 -0800835 else
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100836 if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
837 dma_unmap_single(dev, qat_req->in.rsa.enc.m,
838 ctx->key_sz, DMA_TO_DEVICE);
Tadeusz Struka9905322015-07-15 15:28:38 -0700839 return ret;
840}
841
842static int qat_rsa_dec(struct akcipher_request *req)
843{
844 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
845 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
846 struct qat_crypto_instance *inst = ctx->inst;
847 struct device *dev = &GET_DEV(inst->accel_dev);
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100848 struct qat_asym_request *qat_req =
Tadeusz Struka9905322015-07-15 15:28:38 -0700849 PTR_ALIGN(akcipher_request_ctx(req), 64);
850 struct icp_qat_fw_pke_request *msg = &qat_req->req;
851 int ret, ctr = 0;
852
853 if (unlikely(!ctx->n || !ctx->d))
854 return -EINVAL;
855
856 if (req->dst_len < ctx->key_sz) {
857 req->dst_len = ctx->key_sz;
858 return -EOVERFLOW;
859 }
860 memset(msg, '\0', sizeof(*msg));
861 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
862 ICP_QAT_FW_COMN_REQ_FLAG_SET);
Salvatore Benedetto879f77e2016-07-04 17:21:40 +0100863 msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
864 qat_rsa_dec_fn_id_crt(ctx->key_sz) :
865 qat_rsa_dec_fn_id(ctx->key_sz);
Tadeusz Struka9905322015-07-15 15:28:38 -0700866 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
867 return -EINVAL;
868
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100869 qat_req->cb = qat_rsa_cb;
870 qat_req->ctx.rsa = ctx;
871 qat_req->areq.rsa = req;
Tadeusz Struka9905322015-07-15 15:28:38 -0700872 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
873 msg->pke_hdr.comn_req_flags =
874 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
875 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
876
Salvatore Benedetto879f77e2016-07-04 17:21:40 +0100877 if (ctx->crt_mode) {
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100878 qat_req->in.rsa.dec_crt.p = ctx->dma_p;
879 qat_req->in.rsa.dec_crt.q = ctx->dma_q;
880 qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
881 qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
882 qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
Salvatore Benedetto879f77e2016-07-04 17:21:40 +0100883 } else {
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100884 qat_req->in.rsa.dec.d = ctx->dma_d;
885 qat_req->in.rsa.dec.n = ctx->dma_n;
Salvatore Benedetto879f77e2016-07-04 17:21:40 +0100886 }
Tadeusz Struka9905322015-07-15 15:28:38 -0700887 ret = -ENOMEM;
888
889 /*
890 * src can be of any size in valid range, but HW expects it to be the
891 * same as modulo n so in case it is different we need to allocate a
892 * new buf and copy src data.
893 * In other case we just need to map the user provided buffer.
Tadeusz Struk22287b02015-10-08 09:26:55 -0700894 * Also need to make sure that it is in contiguous buffer.
Tadeusz Struka9905322015-07-15 15:28:38 -0700895 */
Tadeusz Struk22287b02015-10-08 09:26:55 -0700896 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
897 qat_req->src_align = NULL;
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100898 qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
Tadeusz Struk22287b02015-10-08 09:26:55 -0700899 req->dst_len, DMA_TO_DEVICE);
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100900 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
Tadeusz Struk22287b02015-10-08 09:26:55 -0700901 return ret;
902
903 } else {
Tadeusz Struka9905322015-07-15 15:28:38 -0700904 int shift = ctx->key_sz - req->src_len;
905
906 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100907 &qat_req->in.rsa.dec.c,
Tadeusz Struka9905322015-07-15 15:28:38 -0700908 GFP_KERNEL);
909 if (unlikely(!qat_req->src_align))
910 return ret;
911
Tadeusz Struk22287b02015-10-08 09:26:55 -0700912 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
913 0, req->src_len, 0);
Tadeusz Struka9905322015-07-15 15:28:38 -0700914 }
Tadeusz Struk22287b02015-10-08 09:26:55 -0700915 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
916 qat_req->dst_align = NULL;
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100917 qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
Tadeusz Struk22287b02015-10-08 09:26:55 -0700918 req->dst_len,
919 DMA_FROM_DEVICE);
920
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100921 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
Tadeusz Struk22287b02015-10-08 09:26:55 -0700922 goto unmap_src;
923
924 } else {
925 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100926 &qat_req->out.rsa.dec.m,
Tadeusz Struk22287b02015-10-08 09:26:55 -0700927 GFP_KERNEL);
928 if (unlikely(!qat_req->dst_align))
929 goto unmap_src;
930
931 }
932
Salvatore Benedetto879f77e2016-07-04 17:21:40 +0100933 if (ctx->crt_mode)
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100934 qat_req->in.rsa.in_tab[6] = 0;
Salvatore Benedetto879f77e2016-07-04 17:21:40 +0100935 else
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100936 qat_req->in.rsa.in_tab[3] = 0;
937 qat_req->out.rsa.out_tab[1] = 0;
938 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
Tadeusz Struka9905322015-07-15 15:28:38 -0700939 sizeof(struct qat_rsa_input_params),
940 DMA_TO_DEVICE);
Tadeusz Struk22287b02015-10-08 09:26:55 -0700941 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
942 goto unmap_dst;
943
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100944 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
Tadeusz Struka9905322015-07-15 15:28:38 -0700945 sizeof(struct qat_rsa_output_params),
Tadeusz Struk22287b02015-10-08 09:26:55 -0700946 DMA_TO_DEVICE);
947 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
948 goto unmap_in_params;
Tadeusz Struka9905322015-07-15 15:28:38 -0700949
950 msg->pke_mid.src_data_addr = qat_req->phy_in;
951 msg->pke_mid.dest_data_addr = qat_req->phy_out;
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100952 msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
Salvatore Benedetto879f77e2016-07-04 17:21:40 +0100953 if (ctx->crt_mode)
954 msg->input_param_count = 6;
955 else
956 msg->input_param_count = 3;
957
Tadeusz Struka9905322015-07-15 15:28:38 -0700958 msg->output_param_count = 1;
959 do {
960 ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
961 } while (ret == -EBUSY && ctr++ < 100);
962
963 if (!ret)
964 return -EINPROGRESS;
Tadeusz Struk26d52ea2016-02-10 14:59:44 -0800965
966 if (!dma_mapping_error(dev, qat_req->phy_out))
967 dma_unmap_single(dev, qat_req->phy_out,
968 sizeof(struct qat_rsa_output_params),
969 DMA_TO_DEVICE);
970unmap_in_params:
971 if (!dma_mapping_error(dev, qat_req->phy_in))
972 dma_unmap_single(dev, qat_req->phy_in,
973 sizeof(struct qat_rsa_input_params),
974 DMA_TO_DEVICE);
Tadeusz Struk22287b02015-10-08 09:26:55 -0700975unmap_dst:
976 if (qat_req->dst_align)
977 dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100978 qat_req->out.rsa.dec.m);
Tadeusz Struk22287b02015-10-08 09:26:55 -0700979 else
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100980 if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
981 dma_unmap_single(dev, qat_req->out.rsa.dec.m,
982 ctx->key_sz, DMA_FROM_DEVICE);
Tadeusz Struk26d52ea2016-02-10 14:59:44 -0800983unmap_src:
984 if (qat_req->src_align)
985 dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100986 qat_req->in.rsa.dec.c);
Tadeusz Struk26d52ea2016-02-10 14:59:44 -0800987 else
Salvatore Benedettoc9839142016-07-07 15:27:29 +0100988 if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
989 dma_unmap_single(dev, qat_req->in.rsa.dec.c,
990 ctx->key_sz, DMA_TO_DEVICE);
Tadeusz Struka9905322015-07-15 15:28:38 -0700991 return ret;
992}
993
Salvatore Benedetto68896212016-07-04 10:49:28 +0100994int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
Tadeusz Struka9905322015-07-15 15:28:38 -0700995{
Tadeusz Struka9905322015-07-15 15:28:38 -0700996 struct qat_crypto_instance *inst = ctx->inst;
997 struct device *dev = &GET_DEV(inst->accel_dev);
998 const char *ptr = value;
999 int ret;
1000
1001 while (!*ptr && vlen) {
1002 ptr++;
1003 vlen--;
1004 }
1005
1006 ctx->key_sz = vlen;
1007 ret = -EINVAL;
Tadeusz Struka9905322015-07-15 15:28:38 -07001008 /* invalid key size provided */
1009 if (!qat_rsa_enc_fn_id(ctx->key_sz))
1010 goto err;
1011
1012 ret = -ENOMEM;
1013 ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
1014 if (!ctx->n)
1015 goto err;
1016
1017 memcpy(ctx->n, ptr, ctx->key_sz);
1018 return 0;
1019err:
1020 ctx->key_sz = 0;
1021 ctx->n = NULL;
1022 return ret;
1023}
1024
Salvatore Benedetto68896212016-07-04 10:49:28 +01001025int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
Tadeusz Struka9905322015-07-15 15:28:38 -07001026{
Tadeusz Struka9905322015-07-15 15:28:38 -07001027 struct qat_crypto_instance *inst = ctx->inst;
1028 struct device *dev = &GET_DEV(inst->accel_dev);
1029 const char *ptr = value;
1030
1031 while (!*ptr && vlen) {
1032 ptr++;
1033 vlen--;
1034 }
1035
1036 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
1037 ctx->e = NULL;
1038 return -EINVAL;
1039 }
1040
1041 ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
Salvatore Benedetto68896212016-07-04 10:49:28 +01001042 if (!ctx->e)
Tadeusz Struka9905322015-07-15 15:28:38 -07001043 return -ENOMEM;
Salvatore Benedetto68896212016-07-04 10:49:28 +01001044
Tadeusz Struka9905322015-07-15 15:28:38 -07001045 memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
1046 return 0;
1047}
1048
Salvatore Benedetto68896212016-07-04 10:49:28 +01001049int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
Tadeusz Struka9905322015-07-15 15:28:38 -07001050{
Tadeusz Struka9905322015-07-15 15:28:38 -07001051 struct qat_crypto_instance *inst = ctx->inst;
1052 struct device *dev = &GET_DEV(inst->accel_dev);
1053 const char *ptr = value;
1054 int ret;
1055
1056 while (!*ptr && vlen) {
1057 ptr++;
1058 vlen--;
1059 }
1060
1061 ret = -EINVAL;
1062 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
1063 goto err;
1064
Tadeusz Struka9905322015-07-15 15:28:38 -07001065 ret = -ENOMEM;
1066 ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
Tudor Ambarusaa8b6dd2016-03-23 17:06:39 +02001067 if (!ctx->d)
Tadeusz Struka9905322015-07-15 15:28:38 -07001068 goto err;
1069
1070 memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
1071 return 0;
1072err:
1073 ctx->d = NULL;
1074 return ret;
1075}
1076
Salvatore Benedetto879f77e2016-07-04 17:21:40 +01001077static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
Tadeusz Struka9905322015-07-15 15:28:38 -07001078{
Salvatore Benedetto879f77e2016-07-04 17:21:40 +01001079 while (!**ptr && *len) {
1080 (*ptr)++;
1081 (*len)--;
1082 }
1083}
1084
1085static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1086{
1087 struct qat_crypto_instance *inst = ctx->inst;
1088 struct device *dev = &GET_DEV(inst->accel_dev);
1089 const char *ptr;
1090 unsigned int len;
1091 unsigned int half_key_sz = ctx->key_sz / 2;
1092
1093 /* p */
1094 ptr = rsa_key->p;
1095 len = rsa_key->p_sz;
1096 qat_rsa_drop_leading_zeros(&ptr, &len);
1097 if (!len)
1098 goto err;
1099 ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
1100 if (!ctx->p)
1101 goto err;
1102 memcpy(ctx->p + (half_key_sz - len), ptr, len);
1103
1104 /* q */
1105 ptr = rsa_key->q;
1106 len = rsa_key->q_sz;
1107 qat_rsa_drop_leading_zeros(&ptr, &len);
1108 if (!len)
1109 goto free_p;
1110 ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
1111 if (!ctx->q)
1112 goto free_p;
1113 memcpy(ctx->q + (half_key_sz - len), ptr, len);
1114
1115 /* dp */
1116 ptr = rsa_key->dp;
1117 len = rsa_key->dp_sz;
1118 qat_rsa_drop_leading_zeros(&ptr, &len);
1119 if (!len)
1120 goto free_q;
1121 ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp,
1122 GFP_KERNEL);
1123 if (!ctx->dp)
1124 goto free_q;
1125 memcpy(ctx->dp + (half_key_sz - len), ptr, len);
1126
1127 /* dq */
1128 ptr = rsa_key->dq;
1129 len = rsa_key->dq_sz;
1130 qat_rsa_drop_leading_zeros(&ptr, &len);
1131 if (!len)
1132 goto free_dp;
1133 ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq,
1134 GFP_KERNEL);
1135 if (!ctx->dq)
1136 goto free_dp;
1137 memcpy(ctx->dq + (half_key_sz - len), ptr, len);
1138
1139 /* qinv */
1140 ptr = rsa_key->qinv;
1141 len = rsa_key->qinv_sz;
1142 qat_rsa_drop_leading_zeros(&ptr, &len);
1143 if (!len)
1144 goto free_dq;
1145 ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
1146 GFP_KERNEL);
1147 if (!ctx->qinv)
1148 goto free_dq;
1149 memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
1150
1151 ctx->crt_mode = true;
1152 return;
1153
1154free_dq:
1155 memset(ctx->dq, '\0', half_key_sz);
1156 dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1157 ctx->dq = NULL;
1158free_dp:
1159 memset(ctx->dp, '\0', half_key_sz);
1160 dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1161 ctx->dp = NULL;
1162free_q:
1163 memset(ctx->q, '\0', half_key_sz);
1164 dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1165 ctx->q = NULL;
1166free_p:
1167 memset(ctx->p, '\0', half_key_sz);
1168 dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1169 ctx->p = NULL;
1170err:
1171 ctx->crt_mode = false;
1172}
1173
1174static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
1175{
1176 unsigned int half_key_sz = ctx->key_sz / 2;
Tadeusz Struka9905322015-07-15 15:28:38 -07001177
1178 /* Free the old key if any */
1179 if (ctx->n)
1180 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
1181 if (ctx->e)
1182 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
1183 if (ctx->d) {
1184 memset(ctx->d, '\0', ctx->key_sz);
1185 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
1186 }
Salvatore Benedetto879f77e2016-07-04 17:21:40 +01001187 if (ctx->p) {
1188 memset(ctx->p, '\0', half_key_sz);
1189 dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1190 }
1191 if (ctx->q) {
1192 memset(ctx->q, '\0', half_key_sz);
1193 dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1194 }
1195 if (ctx->dp) {
1196 memset(ctx->dp, '\0', half_key_sz);
1197 dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1198 }
1199 if (ctx->dq) {
1200 memset(ctx->dq, '\0', half_key_sz);
1201 dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1202 }
1203 if (ctx->qinv) {
1204 memset(ctx->qinv, '\0', half_key_sz);
1205 dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
1206 }
Tadeusz Struka9905322015-07-15 15:28:38 -07001207
1208 ctx->n = NULL;
1209 ctx->e = NULL;
1210 ctx->d = NULL;
Salvatore Benedetto879f77e2016-07-04 17:21:40 +01001211 ctx->p = NULL;
1212 ctx->q = NULL;
1213 ctx->dp = NULL;
1214 ctx->dq = NULL;
1215 ctx->qinv = NULL;
1216 ctx->crt_mode = false;
1217 ctx->key_sz = 0;
1218}
1219
1220static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
1221 unsigned int keylen, bool private)
1222{
1223 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1224 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1225 struct rsa_key rsa_key;
1226 int ret;
1227
1228 qat_rsa_clear_ctx(dev, ctx);
Tadeusz Struk22287b02015-10-08 09:26:55 -07001229
1230 if (private)
Salvatore Benedetto68896212016-07-04 10:49:28 +01001231 ret = rsa_parse_priv_key(&rsa_key, key, keylen);
Tadeusz Struk22287b02015-10-08 09:26:55 -07001232 else
Salvatore Benedetto68896212016-07-04 10:49:28 +01001233 ret = rsa_parse_pub_key(&rsa_key, key, keylen);
Tadeusz Struka9905322015-07-15 15:28:38 -07001234 if (ret < 0)
1235 goto free;
1236
Salvatore Benedetto68896212016-07-04 10:49:28 +01001237 ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
1238 if (ret < 0)
1239 goto free;
1240 ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1241 if (ret < 0)
1242 goto free;
1243 if (private) {
1244 ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1245 if (ret < 0)
1246 goto free;
Salvatore Benedetto879f77e2016-07-04 17:21:40 +01001247 qat_rsa_setkey_crt(ctx, &rsa_key);
Salvatore Benedetto68896212016-07-04 10:49:28 +01001248 }
1249
Tadeusz Struka9905322015-07-15 15:28:38 -07001250 if (!ctx->n || !ctx->e) {
1251 /* invalid key provided */
1252 ret = -EINVAL;
1253 goto free;
1254 }
Tadeusz Struk22287b02015-10-08 09:26:55 -07001255 if (private && !ctx->d) {
1256 /* invalid private key provided */
1257 ret = -EINVAL;
1258 goto free;
1259 }
Tadeusz Struka9905322015-07-15 15:28:38 -07001260
1261 return 0;
1262free:
Salvatore Benedetto879f77e2016-07-04 17:21:40 +01001263 qat_rsa_clear_ctx(dev, ctx);
Tadeusz Struka9905322015-07-15 15:28:38 -07001264 return ret;
1265}
1266
Tadeusz Struk22287b02015-10-08 09:26:55 -07001267static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1268 unsigned int keylen)
1269{
1270 return qat_rsa_setkey(tfm, key, keylen, false);
1271}
1272
1273static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1274 unsigned int keylen)
1275{
1276 return qat_rsa_setkey(tfm, key, keylen, true);
1277}
1278
1279static int qat_rsa_max_size(struct crypto_akcipher *tfm)
1280{
1281 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1282
1283 return (ctx->n) ? ctx->key_sz : -EINVAL;
1284}
1285
Tadeusz Struka9905322015-07-15 15:28:38 -07001286static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
1287{
1288 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1289 struct qat_crypto_instance *inst =
1290 qat_crypto_get_instance_node(get_current_node());
1291
1292 if (!inst)
1293 return -EINVAL;
1294
1295 ctx->key_sz = 0;
1296 ctx->inst = inst;
1297 return 0;
1298}
1299
1300static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
1301{
1302 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1303 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1304
1305 if (ctx->n)
1306 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
1307 if (ctx->e)
1308 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
1309 if (ctx->d) {
1310 memset(ctx->d, '\0', ctx->key_sz);
1311 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
1312 }
1313 qat_crypto_put_instance(ctx->inst);
1314 ctx->n = NULL;
Tudor Ambarus738f9822016-03-23 17:06:40 +02001315 ctx->e = NULL;
Tadeusz Struka9905322015-07-15 15:28:38 -07001316 ctx->d = NULL;
1317}
1318
1319static struct akcipher_alg rsa = {
1320 .encrypt = qat_rsa_enc,
1321 .decrypt = qat_rsa_dec,
1322 .sign = qat_rsa_dec,
1323 .verify = qat_rsa_enc,
Tadeusz Struk22287b02015-10-08 09:26:55 -07001324 .set_pub_key = qat_rsa_setpubkey,
1325 .set_priv_key = qat_rsa_setprivkey,
1326 .max_size = qat_rsa_max_size,
Tadeusz Struka9905322015-07-15 15:28:38 -07001327 .init = qat_rsa_init_tfm,
1328 .exit = qat_rsa_exit_tfm,
Salvatore Benedettoc9839142016-07-07 15:27:29 +01001329 .reqsize = sizeof(struct qat_asym_request) + 64,
Tadeusz Struka9905322015-07-15 15:28:38 -07001330 .base = {
1331 .cra_name = "rsa",
1332 .cra_driver_name = "qat-rsa",
1333 .cra_priority = 1000,
1334 .cra_module = THIS_MODULE,
1335 .cra_ctxsize = sizeof(struct qat_rsa_ctx),
1336 },
1337};
1338
Salvatore Benedettoc9839142016-07-07 15:27:29 +01001339static struct kpp_alg dh = {
1340 .set_secret = qat_dh_set_secret,
1341 .generate_public_key = qat_dh_compute_value,
1342 .compute_shared_secret = qat_dh_compute_value,
1343 .max_size = qat_dh_max_size,
1344 .init = qat_dh_init_tfm,
1345 .exit = qat_dh_exit_tfm,
1346 .reqsize = sizeof(struct qat_asym_request) + 64,
1347 .base = {
1348 .cra_name = "dh",
1349 .cra_driver_name = "qat-dh",
1350 .cra_priority = 1000,
1351 .cra_module = THIS_MODULE,
1352 .cra_ctxsize = sizeof(struct qat_dh_ctx),
1353 },
1354};
1355
Tadeusz Struka9905322015-07-15 15:28:38 -07001356int qat_asym_algs_register(void)
1357{
Tadeusz Struk8f5ea2d2015-07-21 22:07:47 -07001358 int ret = 0;
1359
1360 mutex_lock(&algs_lock);
1361 if (++active_devs == 1) {
1362 rsa.base.cra_flags = 0;
1363 ret = crypto_register_akcipher(&rsa);
Salvatore Benedettoc9839142016-07-07 15:27:29 +01001364 if (ret)
1365 goto unlock;
1366 ret = crypto_register_kpp(&dh);
Tadeusz Struk8f5ea2d2015-07-21 22:07:47 -07001367 }
Salvatore Benedettoc9839142016-07-07 15:27:29 +01001368unlock:
Tadeusz Struk8f5ea2d2015-07-21 22:07:47 -07001369 mutex_unlock(&algs_lock);
1370 return ret;
Tadeusz Struka9905322015-07-15 15:28:38 -07001371}
1372
1373void qat_asym_algs_unregister(void)
1374{
Tadeusz Struk8f5ea2d2015-07-21 22:07:47 -07001375 mutex_lock(&algs_lock);
Salvatore Benedettoc9839142016-07-07 15:27:29 +01001376 if (--active_devs == 0) {
Tadeusz Struk8f5ea2d2015-07-21 22:07:47 -07001377 crypto_unregister_akcipher(&rsa);
Salvatore Benedettoc9839142016-07-07 15:27:29 +01001378 crypto_unregister_kpp(&dh);
1379 }
Tadeusz Struk8f5ea2d2015-07-21 22:07:47 -07001380 mutex_unlock(&algs_lock);
Tadeusz Struka9905322015-07-15 15:28:38 -07001381}