blob: c018cd0ac13bc8dbd61ef181f7bae1e7932934fb [file] [log] [blame]
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +10001/*
2 * Support for Marvell's crypto engine which can be found on some Orion5X
3 * boards.
4 *
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
6 * License: GPLv2
7 *
8 */
9#include <crypto/aes.h>
10#include <crypto/algapi.h>
11#include <linux/crypto.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kthread.h>
15#include <linux/platform_device.h>
16#include <linux/scatterlist.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
Uri Simchoni750052d2010-04-08 19:34:55 +030018#include <crypto/internal/hash.h>
19#include <crypto/sha.h>
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +100020
21#include "mv_cesa.h"
Uri Simchoni750052d2010-04-08 19:34:55 +030022
23#define MV_CESA "MV-CESA:"
24#define MAX_HW_HASH_SIZE 0xFFFF
25
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +100026/*
27 * STM:
28 * /---------------------------------------\
29 * | | request complete
30 * \./ |
31 * IDLE -> new request -> BUSY -> done -> DEQUEUE
32 * /°\ |
33 * | | more scatter entries
34 * \________________/
35 */
36enum engine_status {
37 ENGINE_IDLE,
38 ENGINE_BUSY,
39 ENGINE_W_DEQUEUE,
40};
41
42/**
43 * struct req_progress - used for every crypt request
44 * @src_sg_it: sg iterator for src
45 * @dst_sg_it: sg iterator for dst
46 * @sg_src_left: bytes left in src to process (scatter list)
47 * @src_start: offset to add to src start position (scatter list)
Uri Simchoni750052d2010-04-08 19:34:55 +030048 * @crypt_len: length of current hw crypt/hash process
Uri Simchoni3b61a902010-04-08 19:27:33 +030049 * @hw_nbytes: total bytes to process in hw for this request
Uri Simchonif0d03de2010-04-08 19:31:48 +030050 * @copy_back: whether to copy data back (crypt) or not (hash)
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +100051 * @sg_dst_left: bytes left dst to process in this scatter list
52 * @dst_start: offset to add to dst start position (scatter list)
Uri Simchoni7a5f6912010-04-08 19:29:16 +030053 * @hw_processed_bytes: number of bytes processed by hw (request).
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +100054 *
55 * sg helper are used to iterate over the scatterlist. Since the size of the
56 * SRAM may be less than the scatter size, this struct struct is used to keep
57 * track of progress within current scatterlist.
58 */
59struct req_progress {
60 struct sg_mapping_iter src_sg_it;
61 struct sg_mapping_iter dst_sg_it;
Uri Simchonia58094a2010-04-08 19:30:19 +030062 void (*complete) (void);
63 void (*process) (int is_first);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +100064
65 /* src mostly */
66 int sg_src_left;
67 int src_start;
68 int crypt_len;
Uri Simchoni3b61a902010-04-08 19:27:33 +030069 int hw_nbytes;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +100070 /* dst mostly */
Uri Simchonif0d03de2010-04-08 19:31:48 +030071 int copy_back;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +100072 int sg_dst_left;
73 int dst_start;
Uri Simchoni7a5f6912010-04-08 19:29:16 +030074 int hw_processed_bytes;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +100075};
76
77struct crypto_priv {
78 void __iomem *reg;
79 void __iomem *sram;
80 int irq;
81 struct task_struct *queue_th;
82
83 /* the lock protects queue and eng_st */
84 spinlock_t lock;
85 struct crypto_queue queue;
86 enum engine_status eng_st;
Uri Simchoni3b61a902010-04-08 19:27:33 +030087 struct crypto_async_request *cur_req;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +100088 struct req_progress p;
89 int max_req_size;
90 int sram_size;
Uri Simchoni750052d2010-04-08 19:34:55 +030091 int has_sha1;
92 int has_hmac_sha1;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +100093};
94
95static struct crypto_priv *cpg;
96
97struct mv_ctx {
98 u8 aes_enc_key[AES_KEY_LEN];
99 u32 aes_dec_key[8];
100 int key_len;
101 u32 need_calc_aes_dkey;
102};
103
104enum crypto_op {
105 COP_AES_ECB,
106 COP_AES_CBC,
107};
108
109struct mv_req_ctx {
110 enum crypto_op op;
111 int decrypt;
112};
113
Uri Simchoni750052d2010-04-08 19:34:55 +0300114enum hash_op {
115 COP_SHA1,
116 COP_HMAC_SHA1
117};
118
119struct mv_tfm_hash_ctx {
120 struct crypto_shash *fallback;
121 struct crypto_shash *base_hash;
122 u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
123 int count_add;
124 enum hash_op op;
125};
126
127struct mv_req_hash_ctx {
128 u64 count;
129 u32 state[SHA1_DIGEST_SIZE / 4];
130 u8 buffer[SHA1_BLOCK_SIZE];
131 int first_hash; /* marks that we don't have previous state */
132 int last_chunk; /* marks that this is the 'final' request */
133 int extra_bytes; /* unprocessed bytes in buffer */
134 enum hash_op op;
135 int count_add;
136 struct scatterlist dummysg;
137};
138
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000139static void compute_aes_dec_key(struct mv_ctx *ctx)
140{
141 struct crypto_aes_ctx gen_aes_key;
142 int key_pos;
143
144 if (!ctx->need_calc_aes_dkey)
145 return;
146
147 crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
148
149 key_pos = ctx->key_len + 24;
150 memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
151 switch (ctx->key_len) {
152 case AES_KEYSIZE_256:
153 key_pos -= 2;
154 /* fall */
155 case AES_KEYSIZE_192:
156 key_pos -= 2;
157 memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
158 4 * 4);
159 break;
160 }
161 ctx->need_calc_aes_dkey = 0;
162}
163
164static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
165 unsigned int len)
166{
167 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
168 struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
169
170 switch (len) {
171 case AES_KEYSIZE_128:
172 case AES_KEYSIZE_192:
173 case AES_KEYSIZE_256:
174 break;
175 default:
176 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
177 return -EINVAL;
178 }
179 ctx->key_len = len;
180 ctx->need_calc_aes_dkey = 1;
181
182 memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
183 return 0;
184}
185
Uri Simchoni15d4dd32010-04-08 19:27:02 +0300186static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000187{
188 int ret;
Uri Simchoni15d4dd32010-04-08 19:27:02 +0300189 void *sbuf;
190 int copied = 0;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000191
Uri Simchoni15d4dd32010-04-08 19:27:02 +0300192 while (1) {
193 if (!p->sg_src_left) {
194 ret = sg_miter_next(&p->src_sg_it);
195 BUG_ON(!ret);
196 p->sg_src_left = p->src_sg_it.length;
197 p->src_start = 0;
198 }
199
200 sbuf = p->src_sg_it.addr + p->src_start;
201
202 if (p->sg_src_left <= len - copied) {
203 memcpy(dbuf + copied, sbuf, p->sg_src_left);
204 copied += p->sg_src_left;
205 p->sg_src_left = 0;
206 if (copied >= len)
207 break;
208 } else {
209 int copy_len = len - copied;
210 memcpy(dbuf + copied, sbuf, copy_len);
211 p->src_start += copy_len;
212 p->sg_src_left -= copy_len;
213 break;
214 }
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000215 }
Uri Simchoni15d4dd32010-04-08 19:27:02 +0300216}
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000217
Uri Simchoni3b61a902010-04-08 19:27:33 +0300218static void setup_data_in(void)
Uri Simchoni15d4dd32010-04-08 19:27:02 +0300219{
220 struct req_progress *p = &cpg->p;
Uri Simchoni0c5c6c42010-04-08 19:33:26 +0300221 int data_in_sram =
Uri Simchoni7a5f6912010-04-08 19:29:16 +0300222 min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
Uri Simchoni0c5c6c42010-04-08 19:33:26 +0300223 copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
224 data_in_sram - p->crypt_len);
225 p->crypt_len = data_in_sram;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000226}
227
228static void mv_process_current_q(int first_block)
229{
Uri Simchoni3b61a902010-04-08 19:27:33 +0300230 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000231 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
232 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
233 struct sec_accel_config op;
234
235 switch (req_ctx->op) {
236 case COP_AES_ECB:
237 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
238 break;
239 case COP_AES_CBC:
Uri Simchoni6bc6fcd2010-04-08 19:25:56 +0300240 default:
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000241 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
242 op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
243 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
244 if (first_block)
245 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
246 break;
247 }
248 if (req_ctx->decrypt) {
249 op.config |= CFG_DIR_DEC;
250 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
251 AES_KEY_LEN);
252 } else {
253 op.config |= CFG_DIR_ENC;
254 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
255 AES_KEY_LEN);
256 }
257
258 switch (ctx->key_len) {
259 case AES_KEYSIZE_128:
260 op.config |= CFG_AES_LEN_128;
261 break;
262 case AES_KEYSIZE_192:
263 op.config |= CFG_AES_LEN_192;
264 break;
265 case AES_KEYSIZE_256:
266 op.config |= CFG_AES_LEN_256;
267 break;
268 }
269 op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
270 ENC_P_DST(SRAM_DATA_OUT_START);
271 op.enc_key_p = SRAM_DATA_KEY_P;
272
Uri Simchoni3b61a902010-04-08 19:27:33 +0300273 setup_data_in();
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000274 op.enc_len = cpg->p.crypt_len;
275 memcpy(cpg->sram + SRAM_CONFIG, &op,
276 sizeof(struct sec_accel_config));
277
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000278 /* GO */
279 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
280
281 /*
282 * XXX: add timer if the interrupt does not occur for some mystery
283 * reason
284 */
285}
286
287static void mv_crypto_algo_completion(void)
288{
Uri Simchoni3b61a902010-04-08 19:27:33 +0300289 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000290 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
291
Uri Simchonia58094a2010-04-08 19:30:19 +0300292 sg_miter_stop(&cpg->p.src_sg_it);
293 sg_miter_stop(&cpg->p.dst_sg_it);
294
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000295 if (req_ctx->op != COP_AES_CBC)
296 return ;
297
298 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
299}
300
Uri Simchoni750052d2010-04-08 19:34:55 +0300301static void mv_process_hash_current(int first_block)
302{
303 struct ahash_request *req = ahash_request_cast(cpg->cur_req);
304 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
305 struct req_progress *p = &cpg->p;
306 struct sec_accel_config op = { 0 };
307 int is_last;
308
309 switch (req_ctx->op) {
310 case COP_SHA1:
311 default:
312 op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
313 break;
314 case COP_HMAC_SHA1:
315 op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
316 break;
317 }
318
319 op.mac_src_p =
320 MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
321 req_ctx->
322 count);
323
324 setup_data_in();
325
326 op.mac_digest =
327 MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
328 op.mac_iv =
329 MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
330 MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
331
332 is_last = req_ctx->last_chunk
333 && (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
334 && (req_ctx->count <= MAX_HW_HASH_SIZE);
335 if (req_ctx->first_hash) {
336 if (is_last)
337 op.config |= CFG_NOT_FRAG;
338 else
339 op.config |= CFG_FIRST_FRAG;
340
341 req_ctx->first_hash = 0;
342 } else {
343 if (is_last)
344 op.config |= CFG_LAST_FRAG;
345 else
346 op.config |= CFG_MID_FRAG;
347 }
348
349 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
350
Uri Simchoni750052d2010-04-08 19:34:55 +0300351 /* GO */
352 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
353
354 /*
355 * XXX: add timer if the interrupt does not occur for some mystery
356 * reason
357 */
358}
359
360static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
361 struct shash_desc *desc)
362{
363 int i;
364 struct sha1_state shash_state;
365
366 shash_state.count = ctx->count + ctx->count_add;
367 for (i = 0; i < 5; i++)
368 shash_state.state[i] = ctx->state[i];
369 memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
370 return crypto_shash_import(desc, &shash_state);
371}
372
373static int mv_hash_final_fallback(struct ahash_request *req)
374{
375 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
376 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
377 struct {
378 struct shash_desc shash;
379 char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
380 } desc;
381 int rc;
382
383 desc.shash.tfm = tfm_ctx->fallback;
384 desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
385 if (unlikely(req_ctx->first_hash)) {
386 crypto_shash_init(&desc.shash);
387 crypto_shash_update(&desc.shash, req_ctx->buffer,
388 req_ctx->extra_bytes);
389 } else {
390 /* only SHA1 for now....
391 */
392 rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
393 if (rc)
394 goto out;
395 }
396 rc = crypto_shash_final(&desc.shash, req->result);
397out:
398 return rc;
399}
400
401static void mv_hash_algo_completion(void)
402{
403 struct ahash_request *req = ahash_request_cast(cpg->cur_req);
404 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
405
406 if (ctx->extra_bytes)
407 copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
408 sg_miter_stop(&cpg->p.src_sg_it);
409
410 ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
411 ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
412 ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
413 ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
414 ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
415
416 if (likely(ctx->last_chunk)) {
417 if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
418 memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
419 crypto_ahash_digestsize(crypto_ahash_reqtfm
420 (req)));
421 } else
422 mv_hash_final_fallback(req);
423 }
424}
425
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000426static void dequeue_complete_req(void)
427{
Uri Simchoni3b61a902010-04-08 19:27:33 +0300428 struct crypto_async_request *req = cpg->cur_req;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000429 void *buf;
430 int ret;
Uri Simchoni7a5f6912010-04-08 19:29:16 +0300431 cpg->p.hw_processed_bytes += cpg->p.crypt_len;
Uri Simchonif0d03de2010-04-08 19:31:48 +0300432 if (cpg->p.copy_back) {
433 int need_copy_len = cpg->p.crypt_len;
434 int sram_offset = 0;
435 do {
436 int dst_copy;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000437
Uri Simchonif0d03de2010-04-08 19:31:48 +0300438 if (!cpg->p.sg_dst_left) {
439 ret = sg_miter_next(&cpg->p.dst_sg_it);
440 BUG_ON(!ret);
441 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
442 cpg->p.dst_start = 0;
443 }
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000444
Uri Simchonif0d03de2010-04-08 19:31:48 +0300445 buf = cpg->p.dst_sg_it.addr;
446 buf += cpg->p.dst_start;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000447
Uri Simchonif0d03de2010-04-08 19:31:48 +0300448 dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000449
Uri Simchonif0d03de2010-04-08 19:31:48 +0300450 memcpy(buf,
451 cpg->sram + SRAM_DATA_OUT_START + sram_offset,
452 dst_copy);
453 sram_offset += dst_copy;
454 cpg->p.sg_dst_left -= dst_copy;
455 need_copy_len -= dst_copy;
456 cpg->p.dst_start += dst_copy;
457 } while (need_copy_len > 0);
458 }
459
Uri Simchoni0c5c6c42010-04-08 19:33:26 +0300460 cpg->p.crypt_len = 0;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000461
462 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
Uri Simchoni7a5f6912010-04-08 19:29:16 +0300463 if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000464 /* process next scatter list entry */
465 cpg->eng_st = ENGINE_BUSY;
Uri Simchonia58094a2010-04-08 19:30:19 +0300466 cpg->p.process(0);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000467 } else {
Uri Simchonia58094a2010-04-08 19:30:19 +0300468 cpg->p.complete();
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000469 cpg->eng_st = ENGINE_IDLE;
Uri Simchoni0328ac22010-04-08 19:25:37 +0300470 local_bh_disable();
Uri Simchoni3b61a902010-04-08 19:27:33 +0300471 req->complete(req, 0);
Uri Simchoni0328ac22010-04-08 19:25:37 +0300472 local_bh_enable();
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000473 }
474}
475
476static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
477{
478 int i = 0;
Uri Simchoni15d4dd32010-04-08 19:27:02 +0300479 size_t cur_len;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000480
Uri Simchoni15d4dd32010-04-08 19:27:02 +0300481 while (1) {
482 cur_len = sl[i].length;
483 ++i;
484 if (total_bytes > cur_len)
485 total_bytes -= cur_len;
486 else
487 break;
488 }
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000489
490 return i;
491}
492
Uri Simchoni750052d2010-04-08 19:34:55 +0300493static void mv_start_new_crypt_req(struct ablkcipher_request *req)
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000494{
Uri Simchoni3b61a902010-04-08 19:27:33 +0300495 struct req_progress *p = &cpg->p;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000496 int num_sgs;
497
Uri Simchoni3b61a902010-04-08 19:27:33 +0300498 cpg->cur_req = &req->base;
499 memset(p, 0, sizeof(struct req_progress));
500 p->hw_nbytes = req->nbytes;
Uri Simchonia58094a2010-04-08 19:30:19 +0300501 p->complete = mv_crypto_algo_completion;
502 p->process = mv_process_current_q;
Uri Simchonif0d03de2010-04-08 19:31:48 +0300503 p->copy_back = 1;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000504
505 num_sgs = count_sgs(req->src, req->nbytes);
Uri Simchoni3b61a902010-04-08 19:27:33 +0300506 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000507
508 num_sgs = count_sgs(req->dst, req->nbytes);
Uri Simchoni3b61a902010-04-08 19:27:33 +0300509 sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
510
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000511 mv_process_current_q(1);
512}
513
Uri Simchoni750052d2010-04-08 19:34:55 +0300514static void mv_start_new_hash_req(struct ahash_request *req)
515{
516 struct req_progress *p = &cpg->p;
517 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
518 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
519 int num_sgs, hw_bytes, old_extra_bytes, rc;
520 cpg->cur_req = &req->base;
521 memset(p, 0, sizeof(struct req_progress));
522 hw_bytes = req->nbytes + ctx->extra_bytes;
523 old_extra_bytes = ctx->extra_bytes;
524
525 if (unlikely(ctx->extra_bytes)) {
526 memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
527 ctx->extra_bytes);
528 p->crypt_len = ctx->extra_bytes;
529 }
530
531 memcpy(cpg->sram + SRAM_HMAC_IV_IN, tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
532
533 if (unlikely(!ctx->first_hash)) {
534 writel(ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
535 writel(ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
536 writel(ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
537 writel(ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
538 writel(ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
539 }
540
541 ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
542 if (ctx->extra_bytes != 0
543 && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
544 hw_bytes -= ctx->extra_bytes;
545 else
546 ctx->extra_bytes = 0;
547
548 num_sgs = count_sgs(req->src, req->nbytes);
549 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
550
551 if (hw_bytes) {
552 p->hw_nbytes = hw_bytes;
553 p->complete = mv_hash_algo_completion;
554 p->process = mv_process_hash_current;
555
556 mv_process_hash_current(1);
557 } else {
558 copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
559 ctx->extra_bytes - old_extra_bytes);
560 sg_miter_stop(&p->src_sg_it);
561 if (ctx->last_chunk)
562 rc = mv_hash_final_fallback(req);
563 else
564 rc = 0;
565 cpg->eng_st = ENGINE_IDLE;
566 local_bh_disable();
567 req->base.complete(&req->base, rc);
568 local_bh_enable();
569 }
570}
571
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000572static int queue_manag(void *data)
573{
574 cpg->eng_st = ENGINE_IDLE;
575 do {
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000576 struct crypto_async_request *async_req = NULL;
577 struct crypto_async_request *backlog;
578
579 __set_current_state(TASK_INTERRUPTIBLE);
580
581 if (cpg->eng_st == ENGINE_W_DEQUEUE)
582 dequeue_complete_req();
583
584 spin_lock_irq(&cpg->lock);
585 if (cpg->eng_st == ENGINE_IDLE) {
586 backlog = crypto_get_backlog(&cpg->queue);
587 async_req = crypto_dequeue_request(&cpg->queue);
588 if (async_req) {
589 BUG_ON(cpg->eng_st != ENGINE_IDLE);
590 cpg->eng_st = ENGINE_BUSY;
591 }
592 }
593 spin_unlock_irq(&cpg->lock);
594
595 if (backlog) {
596 backlog->complete(backlog, -EINPROGRESS);
597 backlog = NULL;
598 }
599
600 if (async_req) {
Uri Simchoni750052d2010-04-08 19:34:55 +0300601 if (async_req->tfm->__crt_alg->cra_type !=
602 &crypto_ahash_type) {
603 struct ablkcipher_request *req =
Phil Sutter042e9e72011-05-05 15:28:57 +0200604 ablkcipher_request_cast(async_req);
Uri Simchoni750052d2010-04-08 19:34:55 +0300605 mv_start_new_crypt_req(req);
606 } else {
607 struct ahash_request *req =
608 ahash_request_cast(async_req);
609 mv_start_new_hash_req(req);
610 }
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000611 async_req = NULL;
612 }
613
614 schedule();
615
616 } while (!kthread_should_stop());
617 return 0;
618}
619
Uri Simchoni3b61a902010-04-08 19:27:33 +0300620static int mv_handle_req(struct crypto_async_request *req)
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000621{
622 unsigned long flags;
623 int ret;
624
625 spin_lock_irqsave(&cpg->lock, flags);
Uri Simchoni3b61a902010-04-08 19:27:33 +0300626 ret = crypto_enqueue_request(&cpg->queue, req);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000627 spin_unlock_irqrestore(&cpg->lock, flags);
628 wake_up_process(cpg->queue_th);
629 return ret;
630}
631
632static int mv_enc_aes_ecb(struct ablkcipher_request *req)
633{
634 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
635
636 req_ctx->op = COP_AES_ECB;
637 req_ctx->decrypt = 0;
638
Uri Simchoni3b61a902010-04-08 19:27:33 +0300639 return mv_handle_req(&req->base);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000640}
641
642static int mv_dec_aes_ecb(struct ablkcipher_request *req)
643{
644 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
645 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
646
647 req_ctx->op = COP_AES_ECB;
648 req_ctx->decrypt = 1;
649
650 compute_aes_dec_key(ctx);
Uri Simchoni3b61a902010-04-08 19:27:33 +0300651 return mv_handle_req(&req->base);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000652}
653
654static int mv_enc_aes_cbc(struct ablkcipher_request *req)
655{
656 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
657
658 req_ctx->op = COP_AES_CBC;
659 req_ctx->decrypt = 0;
660
Uri Simchoni3b61a902010-04-08 19:27:33 +0300661 return mv_handle_req(&req->base);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000662}
663
664static int mv_dec_aes_cbc(struct ablkcipher_request *req)
665{
666 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
667 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
668
669 req_ctx->op = COP_AES_CBC;
670 req_ctx->decrypt = 1;
671
672 compute_aes_dec_key(ctx);
Uri Simchoni3b61a902010-04-08 19:27:33 +0300673 return mv_handle_req(&req->base);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000674}
675
676static int mv_cra_init(struct crypto_tfm *tfm)
677{
678 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
679 return 0;
680}
681
Uri Simchoni750052d2010-04-08 19:34:55 +0300682static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
683 int is_last, unsigned int req_len,
684 int count_add)
685{
686 memset(ctx, 0, sizeof(*ctx));
687 ctx->op = op;
688 ctx->count = req_len;
689 ctx->first_hash = 1;
690 ctx->last_chunk = is_last;
691 ctx->count_add = count_add;
692}
693
694static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
695 unsigned req_len)
696{
697 ctx->last_chunk = is_last;
698 ctx->count += req_len;
699}
700
701static int mv_hash_init(struct ahash_request *req)
702{
703 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
704 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
705 tfm_ctx->count_add);
706 return 0;
707}
708
709static int mv_hash_update(struct ahash_request *req)
710{
711 if (!req->nbytes)
712 return 0;
713
714 mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
715 return mv_handle_req(&req->base);
716}
717
718static int mv_hash_final(struct ahash_request *req)
719{
720 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
721 /* dummy buffer of 4 bytes */
722 sg_init_one(&ctx->dummysg, ctx->buffer, 4);
723 /* I think I'm allowed to do that... */
724 ahash_request_set_crypt(req, &ctx->dummysg, req->result, 0);
725 mv_update_hash_req_ctx(ctx, 1, 0);
726 return mv_handle_req(&req->base);
727}
728
729static int mv_hash_finup(struct ahash_request *req)
730{
Uri Simchoni750052d2010-04-08 19:34:55 +0300731 mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
732 return mv_handle_req(&req->base);
733}
734
735static int mv_hash_digest(struct ahash_request *req)
736{
737 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
738 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
739 req->nbytes, tfm_ctx->count_add);
740 return mv_handle_req(&req->base);
741}
742
743static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
744 const void *ostate)
745{
746 const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
747 int i;
748 for (i = 0; i < 5; i++) {
749 ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
750 ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
751 }
752}
753
754static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
755 unsigned int keylen)
756{
757 int rc;
758 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
759 int bs, ds, ss;
760
761 if (!ctx->base_hash)
762 return 0;
763
764 rc = crypto_shash_setkey(ctx->fallback, key, keylen);
765 if (rc)
766 return rc;
767
768 /* Can't see a way to extract the ipad/opad from the fallback tfm
769 so I'm basically copying code from the hmac module */
770 bs = crypto_shash_blocksize(ctx->base_hash);
771 ds = crypto_shash_digestsize(ctx->base_hash);
772 ss = crypto_shash_statesize(ctx->base_hash);
773
774 {
775 struct {
776 struct shash_desc shash;
777 char ctx[crypto_shash_descsize(ctx->base_hash)];
778 } desc;
779 unsigned int i;
780 char ipad[ss];
781 char opad[ss];
782
783 desc.shash.tfm = ctx->base_hash;
784 desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
785 CRYPTO_TFM_REQ_MAY_SLEEP;
786
787 if (keylen > bs) {
788 int err;
789
790 err =
791 crypto_shash_digest(&desc.shash, key, keylen, ipad);
792 if (err)
793 return err;
794
795 keylen = ds;
796 } else
797 memcpy(ipad, key, keylen);
798
799 memset(ipad + keylen, 0, bs - keylen);
800 memcpy(opad, ipad, bs);
801
802 for (i = 0; i < bs; i++) {
803 ipad[i] ^= 0x36;
804 opad[i] ^= 0x5c;
805 }
806
807 rc = crypto_shash_init(&desc.shash) ? :
808 crypto_shash_update(&desc.shash, ipad, bs) ? :
809 crypto_shash_export(&desc.shash, ipad) ? :
810 crypto_shash_init(&desc.shash) ? :
811 crypto_shash_update(&desc.shash, opad, bs) ? :
812 crypto_shash_export(&desc.shash, opad);
813
814 if (rc == 0)
815 mv_hash_init_ivs(ctx, ipad, opad);
816
817 return rc;
818 }
819}
820
821static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
822 enum hash_op op, int count_add)
823{
824 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
825 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
826 struct crypto_shash *fallback_tfm = NULL;
827 struct crypto_shash *base_hash = NULL;
828 int err = -ENOMEM;
829
830 ctx->op = op;
831 ctx->count_add = count_add;
832
833 /* Allocate a fallback and abort if it failed. */
834 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
835 CRYPTO_ALG_NEED_FALLBACK);
836 if (IS_ERR(fallback_tfm)) {
837 printk(KERN_WARNING MV_CESA
838 "Fallback driver '%s' could not be loaded!\n",
839 fallback_driver_name);
840 err = PTR_ERR(fallback_tfm);
841 goto out;
842 }
843 ctx->fallback = fallback_tfm;
844
845 if (base_hash_name) {
846 /* Allocate a hash to compute the ipad/opad of hmac. */
847 base_hash = crypto_alloc_shash(base_hash_name, 0,
848 CRYPTO_ALG_NEED_FALLBACK);
849 if (IS_ERR(base_hash)) {
850 printk(KERN_WARNING MV_CESA
851 "Base driver '%s' could not be loaded!\n",
852 base_hash_name);
Roel Kluin41f29772011-01-04 15:37:16 +1100853 err = PTR_ERR(base_hash);
Uri Simchoni750052d2010-04-08 19:34:55 +0300854 goto err_bad_base;
855 }
856 }
857 ctx->base_hash = base_hash;
858
859 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
860 sizeof(struct mv_req_hash_ctx) +
861 crypto_shash_descsize(ctx->fallback));
862 return 0;
863err_bad_base:
864 crypto_free_shash(fallback_tfm);
865out:
866 return err;
867}
868
869static void mv_cra_hash_exit(struct crypto_tfm *tfm)
870{
871 struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
872
873 crypto_free_shash(ctx->fallback);
874 if (ctx->base_hash)
875 crypto_free_shash(ctx->base_hash);
876}
877
878static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
879{
880 return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
881}
882
883static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
884{
885 return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
886}
887
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000888irqreturn_t crypto_int(int irq, void *priv)
889{
890 u32 val;
891
892 val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
893 if (!(val & SEC_INT_ACCEL0_DONE))
894 return IRQ_NONE;
895
896 val &= ~SEC_INT_ACCEL0_DONE;
897 writel(val, cpg->reg + FPGA_INT_STATUS);
898 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
899 BUG_ON(cpg->eng_st != ENGINE_BUSY);
900 cpg->eng_st = ENGINE_W_DEQUEUE;
901 wake_up_process(cpg->queue_th);
902 return IRQ_HANDLED;
903}
904
905struct crypto_alg mv_aes_alg_ecb = {
906 .cra_name = "ecb(aes)",
907 .cra_driver_name = "mv-ecb-aes",
908 .cra_priority = 300,
909 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
910 .cra_blocksize = 16,
911 .cra_ctxsize = sizeof(struct mv_ctx),
912 .cra_alignmask = 0,
913 .cra_type = &crypto_ablkcipher_type,
914 .cra_module = THIS_MODULE,
915 .cra_init = mv_cra_init,
916 .cra_u = {
917 .ablkcipher = {
918 .min_keysize = AES_MIN_KEY_SIZE,
919 .max_keysize = AES_MAX_KEY_SIZE,
920 .setkey = mv_setkey_aes,
921 .encrypt = mv_enc_aes_ecb,
922 .decrypt = mv_dec_aes_ecb,
923 },
924 },
925};
926
927struct crypto_alg mv_aes_alg_cbc = {
928 .cra_name = "cbc(aes)",
929 .cra_driver_name = "mv-cbc-aes",
930 .cra_priority = 300,
931 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
932 .cra_blocksize = AES_BLOCK_SIZE,
933 .cra_ctxsize = sizeof(struct mv_ctx),
934 .cra_alignmask = 0,
935 .cra_type = &crypto_ablkcipher_type,
936 .cra_module = THIS_MODULE,
937 .cra_init = mv_cra_init,
938 .cra_u = {
939 .ablkcipher = {
940 .ivsize = AES_BLOCK_SIZE,
941 .min_keysize = AES_MIN_KEY_SIZE,
942 .max_keysize = AES_MAX_KEY_SIZE,
943 .setkey = mv_setkey_aes,
944 .encrypt = mv_enc_aes_cbc,
945 .decrypt = mv_dec_aes_cbc,
946 },
947 },
948};
949
Uri Simchoni750052d2010-04-08 19:34:55 +0300950struct ahash_alg mv_sha1_alg = {
951 .init = mv_hash_init,
952 .update = mv_hash_update,
953 .final = mv_hash_final,
954 .finup = mv_hash_finup,
955 .digest = mv_hash_digest,
956 .halg = {
957 .digestsize = SHA1_DIGEST_SIZE,
958 .base = {
959 .cra_name = "sha1",
960 .cra_driver_name = "mv-sha1",
961 .cra_priority = 300,
962 .cra_flags =
963 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
964 .cra_blocksize = SHA1_BLOCK_SIZE,
965 .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
966 .cra_init = mv_cra_hash_sha1_init,
967 .cra_exit = mv_cra_hash_exit,
968 .cra_module = THIS_MODULE,
969 }
970 }
971};
972
973struct ahash_alg mv_hmac_sha1_alg = {
974 .init = mv_hash_init,
975 .update = mv_hash_update,
976 .final = mv_hash_final,
977 .finup = mv_hash_finup,
978 .digest = mv_hash_digest,
979 .setkey = mv_hash_setkey,
980 .halg = {
981 .digestsize = SHA1_DIGEST_SIZE,
982 .base = {
983 .cra_name = "hmac(sha1)",
984 .cra_driver_name = "mv-hmac-sha1",
985 .cra_priority = 300,
986 .cra_flags =
987 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
988 .cra_blocksize = SHA1_BLOCK_SIZE,
989 .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
990 .cra_init = mv_cra_hash_hmac_sha1_init,
991 .cra_exit = mv_cra_hash_exit,
992 .cra_module = THIS_MODULE,
993 }
994 }
995};
996
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000997static int mv_probe(struct platform_device *pdev)
998{
999 struct crypto_priv *cp;
1000 struct resource *res;
1001 int irq;
1002 int ret;
1003
1004 if (cpg) {
Uri Simchoni750052d2010-04-08 19:34:55 +03001005 printk(KERN_ERR MV_CESA "Second crypto dev?\n");
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +10001006 return -EEXIST;
1007 }
1008
1009 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1010 if (!res)
1011 return -ENXIO;
1012
1013 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1014 if (!cp)
1015 return -ENOMEM;
1016
1017 spin_lock_init(&cp->lock);
1018 crypto_init_queue(&cp->queue, 50);
Tobias Klauser5bdd5de2010-05-14 14:58:05 +10001019 cp->reg = ioremap(res->start, resource_size(res));
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +10001020 if (!cp->reg) {
1021 ret = -ENOMEM;
1022 goto err;
1023 }
1024
1025 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
1026 if (!res) {
1027 ret = -ENXIO;
1028 goto err_unmap_reg;
1029 }
Tobias Klauser5bdd5de2010-05-14 14:58:05 +10001030 cp->sram_size = resource_size(res);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +10001031 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
1032 cp->sram = ioremap(res->start, cp->sram_size);
1033 if (!cp->sram) {
1034 ret = -ENOMEM;
1035 goto err_unmap_reg;
1036 }
1037
1038 irq = platform_get_irq(pdev, 0);
1039 if (irq < 0 || irq == NO_IRQ) {
1040 ret = irq;
1041 goto err_unmap_sram;
1042 }
1043 cp->irq = irq;
1044
1045 platform_set_drvdata(pdev, cp);
1046 cpg = cp;
1047
1048 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
1049 if (IS_ERR(cp->queue_th)) {
1050 ret = PTR_ERR(cp->queue_th);
Dan Carpenter7cc28352010-05-26 10:45:22 +10001051 goto err_unmap_sram;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +10001052 }
1053
1054 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
1055 cp);
1056 if (ret)
Dan Carpenter7cc28352010-05-26 10:45:22 +10001057 goto err_thread;
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +10001058
1059 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
1060 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
Phil Sutter99db3ea2011-05-05 15:28:58 +02001061 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +10001062
1063 ret = crypto_register_alg(&mv_aes_alg_ecb);
Phil Sutter2a025f52011-05-05 15:29:00 +02001064 if (ret) {
1065 printk(KERN_WARNING MV_CESA
1066 "Could not register aes-ecb driver\n");
Dan Carpenter7cc28352010-05-26 10:45:22 +10001067 goto err_irq;
Phil Sutter2a025f52011-05-05 15:29:00 +02001068 }
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +10001069
1070 ret = crypto_register_alg(&mv_aes_alg_cbc);
Phil Sutter2a025f52011-05-05 15:29:00 +02001071 if (ret) {
1072 printk(KERN_WARNING MV_CESA
1073 "Could not register aes-cbc driver\n");
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +10001074 goto err_unreg_ecb;
Phil Sutter2a025f52011-05-05 15:29:00 +02001075 }
Uri Simchoni750052d2010-04-08 19:34:55 +03001076
1077 ret = crypto_register_ahash(&mv_sha1_alg);
1078 if (ret == 0)
1079 cpg->has_sha1 = 1;
1080 else
1081 printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
1082
1083 ret = crypto_register_ahash(&mv_hmac_sha1_alg);
1084 if (ret == 0) {
1085 cpg->has_hmac_sha1 = 1;
1086 } else {
1087 printk(KERN_WARNING MV_CESA
1088 "Could not register hmac-sha1 driver\n");
1089 }
1090
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +10001091 return 0;
1092err_unreg_ecb:
1093 crypto_unregister_alg(&mv_aes_alg_ecb);
Dan Carpenter7cc28352010-05-26 10:45:22 +10001094err_irq:
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +10001095 free_irq(irq, cp);
Dan Carpenter7cc28352010-05-26 10:45:22 +10001096err_thread:
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +10001097 kthread_stop(cp->queue_th);
1098err_unmap_sram:
1099 iounmap(cp->sram);
1100err_unmap_reg:
1101 iounmap(cp->reg);
1102err:
1103 kfree(cp);
1104 cpg = NULL;
1105 platform_set_drvdata(pdev, NULL);
1106 return ret;
1107}
1108
1109static int mv_remove(struct platform_device *pdev)
1110{
1111 struct crypto_priv *cp = platform_get_drvdata(pdev);
1112
1113 crypto_unregister_alg(&mv_aes_alg_ecb);
1114 crypto_unregister_alg(&mv_aes_alg_cbc);
Uri Simchoni750052d2010-04-08 19:34:55 +03001115 if (cp->has_sha1)
1116 crypto_unregister_ahash(&mv_sha1_alg);
1117 if (cp->has_hmac_sha1)
1118 crypto_unregister_ahash(&mv_hmac_sha1_alg);
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +10001119 kthread_stop(cp->queue_th);
1120 free_irq(cp->irq, cp);
1121 memset(cp->sram, 0, cp->sram_size);
1122 iounmap(cp->sram);
1123 iounmap(cp->reg);
1124 kfree(cp);
1125 cpg = NULL;
1126 return 0;
1127}
1128
1129static struct platform_driver marvell_crypto = {
1130 .probe = mv_probe,
1131 .remove = mv_remove,
1132 .driver = {
1133 .owner = THIS_MODULE,
1134 .name = "mv_crypto",
1135 },
1136};
1137MODULE_ALIAS("platform:mv_crypto");
1138
1139static int __init mv_crypto_init(void)
1140{
1141 return platform_driver_register(&marvell_crypto);
1142}
1143module_init(mv_crypto_init);
1144
1145static void __exit mv_crypto_exit(void)
1146{
1147 platform_driver_unregister(&marvell_crypto);
1148}
1149module_exit(mv_crypto_exit);
1150
1151MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
1152MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
1153MODULE_LICENSE("GPL");