blob: f70bcf844975c3630412f31409b2d6f34ed5887a [file] [log] [blame]
Stephan Mueller400c40c2015-02-28 20:50:00 +01001/*
2 * algif_aead: User-space interface for AEAD algorithms
3 *
4 * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
5 *
6 * This file provides the user-space API for AEAD ciphers.
7 *
8 * This file is derived from algif_skcipher.c.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 */
15
Herbert Xu89081da2015-04-22 15:06:28 +080016#include <crypto/aead.h>
Stephan Mueller400c40c2015-02-28 20:50:00 +010017#include <crypto/scatterwalk.h>
18#include <crypto/if_alg.h>
19#include <linux/init.h>
20#include <linux/list.h>
21#include <linux/kernel.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/net.h>
25#include <net/sock.h>
26
27struct aead_sg_list {
28 unsigned int cur;
29 struct scatterlist sg[ALG_MAX_PAGES];
30};
31
32struct aead_ctx {
33 struct aead_sg_list tsgl;
34 /*
35 * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum
36 * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES
Tadeusz Struk7b2a18e2015-05-15 10:18:37 -070037 * pages
Stephan Mueller400c40c2015-02-28 20:50:00 +010038 */
39#define RSGL_MAX_ENTRIES ALG_MAX_PAGES
40 struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES];
41
42 void *iv;
43
44 struct af_alg_completion completion;
45
46 unsigned long used;
47
48 unsigned int len;
49 bool more;
50 bool merge;
51 bool enc;
52
53 size_t aead_assoclen;
54 struct aead_request aead_req;
55};
56
57static inline int aead_sndbuf(struct sock *sk)
58{
59 struct alg_sock *ask = alg_sk(sk);
60 struct aead_ctx *ctx = ask->private;
61
62 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
63 ctx->used, 0);
64}
65
66static inline bool aead_writable(struct sock *sk)
67{
68 return PAGE_SIZE <= aead_sndbuf(sk);
69}
70
71static inline bool aead_sufficient_data(struct aead_ctx *ctx)
72{
73 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
74
Herbert Xu19fa7752015-05-27 17:24:41 +080075 return ctx->used >= ctx->aead_assoclen + as;
Stephan Mueller400c40c2015-02-28 20:50:00 +010076}
77
78static void aead_put_sgl(struct sock *sk)
79{
80 struct alg_sock *ask = alg_sk(sk);
81 struct aead_ctx *ctx = ask->private;
82 struct aead_sg_list *sgl = &ctx->tsgl;
83 struct scatterlist *sg = sgl->sg;
84 unsigned int i;
85
86 for (i = 0; i < sgl->cur; i++) {
87 if (!sg_page(sg + i))
88 continue;
89
90 put_page(sg_page(sg + i));
91 sg_assign_page(sg + i, NULL);
92 }
Lars Perssonbf433412015-08-25 11:59:15 +020093 sg_init_table(sg, ALG_MAX_PAGES);
Stephan Mueller400c40c2015-02-28 20:50:00 +010094 sgl->cur = 0;
95 ctx->used = 0;
96 ctx->more = 0;
97 ctx->merge = 0;
98}
99
100static void aead_wmem_wakeup(struct sock *sk)
101{
102 struct socket_wq *wq;
103
104 if (!aead_writable(sk))
105 return;
106
107 rcu_read_lock();
108 wq = rcu_dereference(sk->sk_wq);
109 if (wq_has_sleeper(wq))
110 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
111 POLLRDNORM |
112 POLLRDBAND);
113 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
114 rcu_read_unlock();
115}
116
117static int aead_wait_for_data(struct sock *sk, unsigned flags)
118{
119 struct alg_sock *ask = alg_sk(sk);
120 struct aead_ctx *ctx = ask->private;
121 long timeout;
122 DEFINE_WAIT(wait);
123 int err = -ERESTARTSYS;
124
125 if (flags & MSG_DONTWAIT)
126 return -EAGAIN;
127
128 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
129
130 for (;;) {
131 if (signal_pending(current))
132 break;
133 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
134 timeout = MAX_SCHEDULE_TIMEOUT;
135 if (sk_wait_event(sk, &timeout, !ctx->more)) {
136 err = 0;
137 break;
138 }
139 }
140 finish_wait(sk_sleep(sk), &wait);
141
142 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
143
144 return err;
145}
146
147static void aead_data_wakeup(struct sock *sk)
148{
149 struct alg_sock *ask = alg_sk(sk);
150 struct aead_ctx *ctx = ask->private;
151 struct socket_wq *wq;
152
153 if (ctx->more)
154 return;
155 if (!ctx->used)
156 return;
157
158 rcu_read_lock();
159 wq = rcu_dereference(sk->sk_wq);
160 if (wq_has_sleeper(wq))
161 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
162 POLLRDNORM |
163 POLLRDBAND);
164 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
165 rcu_read_unlock();
166}
167
Linus Torvaldseccd02f2015-04-15 14:09:46 -0700168static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
Stephan Mueller400c40c2015-02-28 20:50:00 +0100169{
170 struct sock *sk = sock->sk;
171 struct alg_sock *ask = alg_sk(sk);
172 struct aead_ctx *ctx = ask->private;
173 unsigned ivsize =
174 crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
175 struct aead_sg_list *sgl = &ctx->tsgl;
176 struct af_alg_control con = {};
177 long copied = 0;
178 bool enc = 0;
179 bool init = 0;
180 int err = -EINVAL;
181
182 if (msg->msg_controllen) {
183 err = af_alg_cmsg_send(msg, &con);
184 if (err)
185 return err;
186
187 init = 1;
188 switch (con.op) {
189 case ALG_OP_ENCRYPT:
190 enc = 1;
191 break;
192 case ALG_OP_DECRYPT:
193 enc = 0;
194 break;
195 default:
196 return -EINVAL;
197 }
198
199 if (con.iv && con.iv->ivlen != ivsize)
200 return -EINVAL;
201 }
202
203 lock_sock(sk);
204 if (!ctx->more && ctx->used)
205 goto unlock;
206
207 if (init) {
208 ctx->enc = enc;
209 if (con.iv)
210 memcpy(ctx->iv, con.iv->iv, ivsize);
211
212 ctx->aead_assoclen = con.aead_assoclen;
213 }
214
215 while (size) {
LABBE Corentin652d5b82015-10-23 14:10:36 +0200216 size_t len = size;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100217 struct scatterlist *sg = NULL;
218
219 /* use the existing memory in an allocated page */
220 if (ctx->merge) {
221 sg = sgl->sg + sgl->cur - 1;
222 len = min_t(unsigned long, len,
223 PAGE_SIZE - sg->offset - sg->length);
224 err = memcpy_from_msg(page_address(sg_page(sg)) +
225 sg->offset + sg->length,
226 msg, len);
227 if (err)
228 goto unlock;
229
230 sg->length += len;
231 ctx->merge = (sg->offset + sg->length) &
232 (PAGE_SIZE - 1);
233
234 ctx->used += len;
235 copied += len;
236 size -= len;
237 continue;
238 }
239
240 if (!aead_writable(sk)) {
241 /* user space sent too much data */
242 aead_put_sgl(sk);
243 err = -EMSGSIZE;
244 goto unlock;
245 }
246
247 /* allocate a new page */
248 len = min_t(unsigned long, size, aead_sndbuf(sk));
249 while (len) {
LABBE Corentin652d5b82015-10-23 14:10:36 +0200250 size_t plen = 0;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100251
252 if (sgl->cur >= ALG_MAX_PAGES) {
253 aead_put_sgl(sk);
254 err = -E2BIG;
255 goto unlock;
256 }
257
258 sg = sgl->sg + sgl->cur;
LABBE Corentin652d5b82015-10-23 14:10:36 +0200259 plen = min_t(size_t, len, PAGE_SIZE);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100260
261 sg_assign_page(sg, alloc_page(GFP_KERNEL));
262 err = -ENOMEM;
263 if (!sg_page(sg))
264 goto unlock;
265
266 err = memcpy_from_msg(page_address(sg_page(sg)),
267 msg, plen);
268 if (err) {
269 __free_page(sg_page(sg));
270 sg_assign_page(sg, NULL);
271 goto unlock;
272 }
273
274 sg->offset = 0;
275 sg->length = plen;
276 len -= plen;
277 ctx->used += plen;
278 copied += plen;
279 sgl->cur++;
280 size -= plen;
281 ctx->merge = plen & (PAGE_SIZE - 1);
282 }
283 }
284
285 err = 0;
286
287 ctx->more = msg->msg_flags & MSG_MORE;
288 if (!ctx->more && !aead_sufficient_data(ctx)) {
289 aead_put_sgl(sk);
290 err = -EMSGSIZE;
291 }
292
293unlock:
294 aead_data_wakeup(sk);
295 release_sock(sk);
296
297 return err ?: copied;
298}
299
300static ssize_t aead_sendpage(struct socket *sock, struct page *page,
301 int offset, size_t size, int flags)
302{
303 struct sock *sk = sock->sk;
304 struct alg_sock *ask = alg_sk(sk);
305 struct aead_ctx *ctx = ask->private;
306 struct aead_sg_list *sgl = &ctx->tsgl;
307 int err = -EINVAL;
308
309 if (flags & MSG_SENDPAGE_NOTLAST)
310 flags |= MSG_MORE;
311
312 if (sgl->cur >= ALG_MAX_PAGES)
313 return -E2BIG;
314
315 lock_sock(sk);
316 if (!ctx->more && ctx->used)
317 goto unlock;
318
319 if (!size)
320 goto done;
321
322 if (!aead_writable(sk)) {
323 /* user space sent too much data */
324 aead_put_sgl(sk);
325 err = -EMSGSIZE;
326 goto unlock;
327 }
328
329 ctx->merge = 0;
330
331 get_page(page);
332 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
333 sgl->cur++;
334 ctx->used += size;
335
336 err = 0;
337
338done:
339 ctx->more = flags & MSG_MORE;
340 if (!ctx->more && !aead_sufficient_data(ctx)) {
341 aead_put_sgl(sk);
342 err = -EMSGSIZE;
343 }
344
345unlock:
346 aead_data_wakeup(sk);
347 release_sock(sk);
348
349 return err ?: size;
350}
351
Linus Torvaldseccd02f2015-04-15 14:09:46 -0700352static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags)
Stephan Mueller400c40c2015-02-28 20:50:00 +0100353{
354 struct sock *sk = sock->sk;
355 struct alg_sock *ask = alg_sk(sk);
356 struct aead_ctx *ctx = ask->private;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100357 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
358 struct aead_sg_list *sgl = &ctx->tsgl;
Stephan Mueller400c40c2015-02-28 20:50:00 +0100359 unsigned int i = 0;
360 int err = -EINVAL;
361 unsigned long used = 0;
362 size_t outlen = 0;
363 size_t usedpages = 0;
364 unsigned int cnt = 0;
365
366 /* Limit number of IOV blocks to be accessed below */
367 if (msg->msg_iter.nr_segs > RSGL_MAX_ENTRIES)
368 return -ENOMSG;
369
370 lock_sock(sk);
371
372 /*
373 * AEAD memory structure: For encryption, the tag is appended to the
374 * ciphertext which implies that the memory allocated for the ciphertext
375 * must be increased by the tag length. For decryption, the tag
376 * is expected to be concatenated to the ciphertext. The plaintext
377 * therefore has a memory size of the ciphertext minus the tag length.
378 *
379 * The memory structure for cipher operation has the following
380 * structure:
381 * AEAD encryption input: assoc data || plaintext
382 * AEAD encryption output: cipherntext || auth tag
383 * AEAD decryption input: assoc data || ciphertext || auth tag
384 * AEAD decryption output: plaintext
385 */
386
387 if (ctx->more) {
388 err = aead_wait_for_data(sk, flags);
389 if (err)
390 goto unlock;
391 }
392
393 used = ctx->used;
394
395 /*
396 * Make sure sufficient data is present -- note, the same check is
397 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
398 * shall provide an information to the data sender that something is
399 * wrong, but they are irrelevant to maintain the kernel integrity.
400 * We need this check here too in case user space decides to not honor
401 * the error message in sendmsg/sendpage and still call recvmsg. This
402 * check here protects the kernel integrity.
403 */
404 if (!aead_sufficient_data(ctx))
405 goto unlock;
406
Herbert Xu19fa7752015-05-27 17:24:41 +0800407 outlen = used;
408
Stephan Mueller400c40c2015-02-28 20:50:00 +0100409 /*
410 * The cipher operation input data is reduced by the associated data
411 * length as this data is processed separately later on.
412 */
Herbert Xu19fa7752015-05-27 17:24:41 +0800413 used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100414
415 /* convert iovecs of output buffers into scatterlists */
416 while (iov_iter_count(&msg->msg_iter)) {
417 size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
418 (outlen - usedpages));
419
420 /* make one iovec available as scatterlist */
421 err = af_alg_make_sg(&ctx->rsgl[cnt], &msg->msg_iter,
422 seglen);
423 if (err < 0)
424 goto unlock;
425 usedpages += err;
Tadeusz Struk7b2a18e2015-05-15 10:18:37 -0700426 /* chain the new scatterlist with previous one */
Stephan Mueller400c40c2015-02-28 20:50:00 +0100427 if (cnt)
Tadeusz Struk7b2a18e2015-05-15 10:18:37 -0700428 af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]);
429
Stephan Mueller400c40c2015-02-28 20:50:00 +0100430 /* we do not need more iovecs as we have sufficient memory */
431 if (outlen <= usedpages)
432 break;
433 iov_iter_advance(&msg->msg_iter, err);
434 cnt++;
435 }
436
437 err = -EINVAL;
438 /* ensure output buffer is sufficiently large */
439 if (usedpages < outlen)
440 goto unlock;
441
Herbert Xu19fa7752015-05-27 17:24:41 +0800442 sg_mark_end(sgl->sg + sgl->cur - 1);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100443
Herbert Xu19fa7752015-05-27 17:24:41 +0800444 aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->rsgl[0].sg,
445 used, ctx->iv);
446 aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100447
448 err = af_alg_wait_for_completion(ctx->enc ?
449 crypto_aead_encrypt(&ctx->aead_req) :
450 crypto_aead_decrypt(&ctx->aead_req),
451 &ctx->completion);
452
453 if (err) {
454 /* EBADMSG implies a valid cipher operation took place */
455 if (err == -EBADMSG)
456 aead_put_sgl(sk);
457 goto unlock;
458 }
459
460 aead_put_sgl(sk);
461
462 err = 0;
463
464unlock:
465 for (i = 0; i < cnt; i++)
466 af_alg_free_sg(&ctx->rsgl[i]);
467
468 aead_wmem_wakeup(sk);
469 release_sock(sk);
470
471 return err ? err : outlen;
472}
473
474static unsigned int aead_poll(struct file *file, struct socket *sock,
475 poll_table *wait)
476{
477 struct sock *sk = sock->sk;
478 struct alg_sock *ask = alg_sk(sk);
479 struct aead_ctx *ctx = ask->private;
480 unsigned int mask;
481
482 sock_poll_wait(file, sk_sleep(sk), wait);
483 mask = 0;
484
485 if (!ctx->more)
486 mask |= POLLIN | POLLRDNORM;
487
488 if (aead_writable(sk))
489 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
490
491 return mask;
492}
493
494static struct proto_ops algif_aead_ops = {
495 .family = PF_ALG,
496
497 .connect = sock_no_connect,
498 .socketpair = sock_no_socketpair,
499 .getname = sock_no_getname,
500 .ioctl = sock_no_ioctl,
501 .listen = sock_no_listen,
502 .shutdown = sock_no_shutdown,
503 .getsockopt = sock_no_getsockopt,
504 .mmap = sock_no_mmap,
505 .bind = sock_no_bind,
506 .accept = sock_no_accept,
507 .setsockopt = sock_no_setsockopt,
508
509 .release = af_alg_release,
510 .sendmsg = aead_sendmsg,
511 .sendpage = aead_sendpage,
512 .recvmsg = aead_recvmsg,
513 .poll = aead_poll,
514};
515
516static void *aead_bind(const char *name, u32 type, u32 mask)
517{
Herbert Xu5e4b8c12015-08-13 17:29:06 +0800518 return crypto_alloc_aead(name, type, mask);
Stephan Mueller400c40c2015-02-28 20:50:00 +0100519}
520
521static void aead_release(void *private)
522{
523 crypto_free_aead(private);
524}
525
526static int aead_setauthsize(void *private, unsigned int authsize)
527{
528 return crypto_aead_setauthsize(private, authsize);
529}
530
531static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
532{
533 return crypto_aead_setkey(private, key, keylen);
534}
535
536static void aead_sock_destruct(struct sock *sk)
537{
538 struct alg_sock *ask = alg_sk(sk);
539 struct aead_ctx *ctx = ask->private;
540 unsigned int ivlen = crypto_aead_ivsize(
541 crypto_aead_reqtfm(&ctx->aead_req));
542
543 aead_put_sgl(sk);
544 sock_kzfree_s(sk, ctx->iv, ivlen);
545 sock_kfree_s(sk, ctx, ctx->len);
546 af_alg_release_parent(sk);
547}
548
549static int aead_accept_parent(void *private, struct sock *sk)
550{
551 struct aead_ctx *ctx;
552 struct alg_sock *ask = alg_sk(sk);
553 unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
554 unsigned int ivlen = crypto_aead_ivsize(private);
555
556 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
557 if (!ctx)
558 return -ENOMEM;
559 memset(ctx, 0, len);
560
561 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
562 if (!ctx->iv) {
563 sock_kfree_s(sk, ctx, len);
564 return -ENOMEM;
565 }
566 memset(ctx->iv, 0, ivlen);
567
568 ctx->len = len;
569 ctx->used = 0;
570 ctx->more = 0;
571 ctx->merge = 0;
572 ctx->enc = 0;
573 ctx->tsgl.cur = 0;
574 ctx->aead_assoclen = 0;
575 af_alg_init_completion(&ctx->completion);
576 sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
577
578 ask->private = ctx;
579
580 aead_request_set_tfm(&ctx->aead_req, private);
581 aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
582 af_alg_complete, &ctx->completion);
583
584 sk->sk_destruct = aead_sock_destruct;
585
586 return 0;
587}
588
589static const struct af_alg_type algif_type_aead = {
590 .bind = aead_bind,
591 .release = aead_release,
592 .setkey = aead_setkey,
593 .setauthsize = aead_setauthsize,
594 .accept = aead_accept_parent,
595 .ops = &algif_aead_ops,
596 .name = "aead",
597 .owner = THIS_MODULE
598};
599
600static int __init algif_aead_init(void)
601{
602 return af_alg_register_type(&algif_type_aead);
603}
604
605static void __exit algif_aead_exit(void)
606{
607 int err = af_alg_unregister_type(&algif_type_aead);
608 BUG_ON(err);
609}
610
611module_init(algif_aead_init);
612module_exit(algif_aead_exit);
613MODULE_LICENSE("GPL");
614MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
615MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");