blob: 8edf40c835a77f566b04c677afbd27a68843443f [file] [log] [blame]
Herbert Xu5cde0af2006-08-22 00:07:53 +10001/*
2 * Block chaining cipher operations.
3 *
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
7 *
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 */
16
17#include <linux/crypto.h>
18#include <linux/errno.h>
Herbert Xufb469842006-12-10 10:45:28 +110019#include <linux/hardirq.h>
Herbert Xu5cde0af2006-08-22 00:07:53 +100020#include <linux/kernel.h>
Herbert Xu5cde0af2006-08-22 00:07:53 +100021#include <linux/module.h>
22#include <linux/scatterlist.h>
23#include <linux/seq_file.h>
24#include <linux/slab.h>
25#include <linux/string.h>
26
27#include "internal.h"
28#include "scatterwalk.h"
29
30enum {
31 BLKCIPHER_WALK_PHYS = 1 << 0,
32 BLKCIPHER_WALK_SLOW = 1 << 1,
33 BLKCIPHER_WALK_COPY = 1 << 2,
34 BLKCIPHER_WALK_DIFF = 1 << 3,
35};
36
37static int blkcipher_walk_next(struct blkcipher_desc *desc,
38 struct blkcipher_walk *walk);
39static int blkcipher_walk_first(struct blkcipher_desc *desc,
40 struct blkcipher_walk *walk);
41
42static inline void blkcipher_map_src(struct blkcipher_walk *walk)
43{
44 walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
45}
46
47static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
48{
49 walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
50}
51
52static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
53{
54 scatterwalk_unmap(walk->src.virt.addr, 0);
55}
56
57static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
58{
59 scatterwalk_unmap(walk->dst.virt.addr, 1);
60}
61
62static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
63{
64 if (offset_in_page(start + len) < len)
65 return (u8 *)((unsigned long)(start + len) & PAGE_MASK);
66 return start;
67}
68
69static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
70 struct blkcipher_walk *walk,
71 unsigned int bsize)
72{
73 u8 *addr;
74 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
75
76 addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
77 addr = blkcipher_get_spot(addr, bsize);
78 scatterwalk_copychunks(addr, &walk->out, bsize, 1);
79 return bsize;
80}
81
82static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
83 unsigned int n)
84{
85 n = walk->nbytes - n;
86
87 if (walk->flags & BLKCIPHER_WALK_COPY) {
88 blkcipher_map_dst(walk);
89 memcpy(walk->dst.virt.addr, walk->page, n);
90 blkcipher_unmap_dst(walk);
91 } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
92 blkcipher_unmap_src(walk);
93 if (walk->flags & BLKCIPHER_WALK_DIFF)
94 blkcipher_unmap_dst(walk);
95 }
96
97 scatterwalk_advance(&walk->in, n);
98 scatterwalk_advance(&walk->out, n);
99
100 return n;
101}
102
103int blkcipher_walk_done(struct blkcipher_desc *desc,
104 struct blkcipher_walk *walk, int err)
105{
106 struct crypto_blkcipher *tfm = desc->tfm;
107 unsigned int nbytes = 0;
108
109 if (likely(err >= 0)) {
110 unsigned int bsize = crypto_blkcipher_blocksize(tfm);
111 unsigned int n;
112
113 if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
114 n = blkcipher_done_fast(walk, err);
115 else
116 n = blkcipher_done_slow(tfm, walk, bsize);
117
118 nbytes = walk->total - n;
119 err = 0;
120 }
121
122 scatterwalk_done(&walk->in, 0, nbytes);
123 scatterwalk_done(&walk->out, 1, nbytes);
124
125 walk->total = nbytes;
126 walk->nbytes = nbytes;
127
128 if (nbytes) {
129 crypto_yield(desc->flags);
130 return blkcipher_walk_next(desc, walk);
131 }
132
133 if (walk->iv != desc->info)
134 memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
135 if (walk->buffer != walk->page)
136 kfree(walk->buffer);
137 if (walk->page)
138 free_page((unsigned long)walk->page);
139
140 return err;
141}
142EXPORT_SYMBOL_GPL(blkcipher_walk_done);
143
144static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
145 struct blkcipher_walk *walk,
146 unsigned int bsize,
147 unsigned int alignmask)
148{
149 unsigned int n;
150
151 if (walk->buffer)
152 goto ok;
153
154 walk->buffer = walk->page;
155 if (walk->buffer)
156 goto ok;
157
158 n = bsize * 2 + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
159 walk->buffer = kmalloc(n, GFP_ATOMIC);
160 if (!walk->buffer)
161 return blkcipher_walk_done(desc, walk, -ENOMEM);
162
163ok:
164 walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
165 alignmask + 1);
166 walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
167 walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + bsize,
168 bsize);
169
170 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
171
172 walk->nbytes = bsize;
173 walk->flags |= BLKCIPHER_WALK_SLOW;
174
175 return 0;
176}
177
178static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
179{
180 u8 *tmp = walk->page;
181
182 blkcipher_map_src(walk);
183 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
184 blkcipher_unmap_src(walk);
185
186 walk->src.virt.addr = tmp;
187 walk->dst.virt.addr = tmp;
188
189 return 0;
190}
191
192static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
193 struct blkcipher_walk *walk)
194{
195 unsigned long diff;
196
197 walk->src.phys.page = scatterwalk_page(&walk->in);
198 walk->src.phys.offset = offset_in_page(walk->in.offset);
199 walk->dst.phys.page = scatterwalk_page(&walk->out);
200 walk->dst.phys.offset = offset_in_page(walk->out.offset);
201
202 if (walk->flags & BLKCIPHER_WALK_PHYS)
203 return 0;
204
205 diff = walk->src.phys.offset - walk->dst.phys.offset;
206 diff |= walk->src.virt.page - walk->dst.virt.page;
207
208 blkcipher_map_src(walk);
209 walk->dst.virt.addr = walk->src.virt.addr;
210
211 if (diff) {
212 walk->flags |= BLKCIPHER_WALK_DIFF;
213 blkcipher_map_dst(walk);
214 }
215
216 return 0;
217}
218
219static int blkcipher_walk_next(struct blkcipher_desc *desc,
220 struct blkcipher_walk *walk)
221{
222 struct crypto_blkcipher *tfm = desc->tfm;
223 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
224 unsigned int bsize = crypto_blkcipher_blocksize(tfm);
225 unsigned int n;
226 int err;
227
228 n = walk->total;
229 if (unlikely(n < bsize)) {
230 desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
231 return blkcipher_walk_done(desc, walk, -EINVAL);
232 }
233
234 walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
235 BLKCIPHER_WALK_DIFF);
236 if (!scatterwalk_aligned(&walk->in, alignmask) ||
237 !scatterwalk_aligned(&walk->out, alignmask)) {
238 walk->flags |= BLKCIPHER_WALK_COPY;
239 if (!walk->page) {
240 walk->page = (void *)__get_free_page(GFP_ATOMIC);
241 if (!walk->page)
242 n = 0;
243 }
244 }
245
246 n = scatterwalk_clamp(&walk->in, n);
247 n = scatterwalk_clamp(&walk->out, n);
248
249 if (unlikely(n < bsize)) {
250 err = blkcipher_next_slow(desc, walk, bsize, alignmask);
251 goto set_phys_lowmem;
252 }
253
254 walk->nbytes = n;
255 if (walk->flags & BLKCIPHER_WALK_COPY) {
256 err = blkcipher_next_copy(walk);
257 goto set_phys_lowmem;
258 }
259
260 return blkcipher_next_fast(desc, walk);
261
262set_phys_lowmem:
263 if (walk->flags & BLKCIPHER_WALK_PHYS) {
264 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
265 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
266 walk->src.phys.offset &= PAGE_SIZE - 1;
267 walk->dst.phys.offset &= PAGE_SIZE - 1;
268 }
269 return err;
270}
271
272static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
273 struct crypto_blkcipher *tfm,
274 unsigned int alignmask)
275{
276 unsigned bs = crypto_blkcipher_blocksize(tfm);
277 unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
278 unsigned int size = bs * 2 + ivsize + max(bs, ivsize) - (alignmask + 1);
279 u8 *iv;
280
281 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
282 walk->buffer = kmalloc(size, GFP_ATOMIC);
283 if (!walk->buffer)
284 return -ENOMEM;
285
286 iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
287 iv = blkcipher_get_spot(iv, bs) + bs;
288 iv = blkcipher_get_spot(iv, bs) + bs;
289 iv = blkcipher_get_spot(iv, ivsize);
290
291 walk->iv = memcpy(iv, walk->iv, ivsize);
292 return 0;
293}
294
295int blkcipher_walk_virt(struct blkcipher_desc *desc,
296 struct blkcipher_walk *walk)
297{
298 walk->flags &= ~BLKCIPHER_WALK_PHYS;
299 return blkcipher_walk_first(desc, walk);
300}
301EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
302
303int blkcipher_walk_phys(struct blkcipher_desc *desc,
304 struct blkcipher_walk *walk)
305{
306 walk->flags |= BLKCIPHER_WALK_PHYS;
307 return blkcipher_walk_first(desc, walk);
308}
309EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
310
311static int blkcipher_walk_first(struct blkcipher_desc *desc,
312 struct blkcipher_walk *walk)
313{
314 struct crypto_blkcipher *tfm = desc->tfm;
315 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
316
Herbert Xufb469842006-12-10 10:45:28 +1100317 if (WARN_ON_ONCE(in_irq()))
318 return -EDEADLK;
319
Herbert Xu5cde0af2006-08-22 00:07:53 +1000320 walk->nbytes = walk->total;
321 if (unlikely(!walk->total))
322 return 0;
323
324 walk->buffer = NULL;
325 walk->iv = desc->info;
326 if (unlikely(((unsigned long)walk->iv & alignmask))) {
327 int err = blkcipher_copy_iv(walk, tfm, alignmask);
328 if (err)
329 return err;
330 }
331
332 scatterwalk_start(&walk->in, walk->in.sg);
333 scatterwalk_start(&walk->out, walk->out.sg);
334 walk->page = NULL;
335
336 return blkcipher_walk_next(desc, walk);
337}
338
339static int setkey(struct crypto_tfm *tfm, const u8 *key,
340 unsigned int keylen)
341{
342 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
343
344 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
345 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
346 return -EINVAL;
347 }
348
349 return cipher->setkey(tfm, key, keylen);
350}
351
Herbert Xu32e3983f2007-03-24 14:35:34 +1100352static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
353 unsigned int keylen)
354{
355 return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
356}
357
358static int async_encrypt(struct ablkcipher_request *req)
359{
360 struct crypto_tfm *tfm = req->base.tfm;
361 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
362 struct blkcipher_desc desc = {
363 .tfm = __crypto_blkcipher_cast(tfm),
364 .info = req->info,
365 .flags = req->base.flags,
366 };
367
368
369 return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
370}
371
372static int async_decrypt(struct ablkcipher_request *req)
373{
374 struct crypto_tfm *tfm = req->base.tfm;
375 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
376 struct blkcipher_desc desc = {
377 .tfm = __crypto_blkcipher_cast(tfm),
378 .info = req->info,
379 .flags = req->base.flags,
380 };
381
382 return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
383}
384
Herbert Xu27d2a332007-01-24 20:50:26 +1100385static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
386 u32 mask)
Herbert Xu5cde0af2006-08-22 00:07:53 +1000387{
388 struct blkcipher_alg *cipher = &alg->cra_blkcipher;
389 unsigned int len = alg->cra_ctxsize;
390
Herbert Xu32e3983f2007-03-24 14:35:34 +1100391 type ^= CRYPTO_ALG_ASYNC;
392 mask &= CRYPTO_ALG_ASYNC;
393 if ((type & mask) && cipher->ivsize) {
Herbert Xu5cde0af2006-08-22 00:07:53 +1000394 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
395 len += cipher->ivsize;
396 }
397
398 return len;
399}
400
Herbert Xu32e3983f2007-03-24 14:35:34 +1100401static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
402{
403 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
404 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
405
406 crt->setkey = async_setkey;
407 crt->encrypt = async_encrypt;
408 crt->decrypt = async_decrypt;
409 crt->ivsize = alg->ivsize;
410
411 return 0;
412}
413
414static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
Herbert Xu5cde0af2006-08-22 00:07:53 +1000415{
416 struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
417 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
418 unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
419 unsigned long addr;
420
Herbert Xu5cde0af2006-08-22 00:07:53 +1000421 crt->setkey = setkey;
422 crt->encrypt = alg->encrypt;
423 crt->decrypt = alg->decrypt;
424
425 addr = (unsigned long)crypto_tfm_ctx(tfm);
426 addr = ALIGN(addr, align);
427 addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
428 crt->iv = (void *)addr;
429
430 return 0;
431}
432
Herbert Xu32e3983f2007-03-24 14:35:34 +1100433static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
434{
435 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
436
437 if (alg->ivsize > PAGE_SIZE / 8)
438 return -EINVAL;
439
440 type ^= CRYPTO_ALG_ASYNC;
441 mask &= CRYPTO_ALG_ASYNC;
442 if (type & mask)
443 return crypto_init_blkcipher_ops_sync(tfm);
444 else
445 return crypto_init_blkcipher_ops_async(tfm);
446}
447
Herbert Xu5cde0af2006-08-22 00:07:53 +1000448static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
Herbert Xu03f5d8c2006-12-31 10:42:06 +1100449 __attribute__ ((unused));
Herbert Xu5cde0af2006-08-22 00:07:53 +1000450static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
451{
452 seq_printf(m, "type : blkcipher\n");
453 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
454 seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
455 seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
456 seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
457}
458
459const struct crypto_type crypto_blkcipher_type = {
460 .ctxsize = crypto_blkcipher_ctxsize,
461 .init = crypto_init_blkcipher_ops,
462#ifdef CONFIG_PROC_FS
463 .show = crypto_blkcipher_show,
464#endif
465};
466EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
467
468MODULE_LICENSE("GPL");
469MODULE_DESCRIPTION("Generic block chaining cipher type");