GNU Linux-libre 4.9.337-gnu1
[releases.git] / crypto / ablkcipher.c
1 /*
2  * Asynchronous block chaining cipher operations.
3  *
4  * This is the asynchronous version of blkcipher.c indicating completion
5  * via a callback.
6  *
7  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15
16 #include <crypto/internal/skcipher.h>
17 #include <linux/err.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/seq_file.h>
21 #include <linux/cryptouser.h>
22 #include <net/netlink.h>
23
24 #include <crypto/scatterwalk.h>
25
26 #include "internal.h"
27
28 struct ablkcipher_buffer {
29         struct list_head        entry;
30         struct scatter_walk     dst;
31         unsigned int            len;
32         void                    *data;
33 };
34
35 enum {
36         ABLKCIPHER_WALK_SLOW = 1 << 0,
37 };
38
39 static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p)
40 {
41         scatterwalk_copychunks(p->data, &p->dst, p->len, 1);
42 }
43
44 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk)
45 {
46         struct ablkcipher_buffer *p, *tmp;
47
48         list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
49                 ablkcipher_buffer_write(p);
50                 list_del(&p->entry);
51                 kfree(p);
52         }
53 }
54 EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete);
55
56 static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk,
57                                           struct ablkcipher_buffer *p)
58 {
59         p->dst = walk->out;
60         list_add_tail(&p->entry, &walk->buffers);
61 }
62
63 /* Get a spot of the specified length that does not straddle a page.
64  * The caller needs to ensure that there is enough space for this operation.
65  */
66 static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
67 {
68         u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
69
70         return max(start, end_page);
71 }
72
73 static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
74                                         unsigned int n)
75 {
76         for (;;) {
77                 unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
78
79                 if (len_this_page > n)
80                         len_this_page = n;
81                 scatterwalk_advance(&walk->out, n);
82                 if (n == len_this_page)
83                         break;
84                 n -= len_this_page;
85                 scatterwalk_start(&walk->out, sg_next(walk->out.sg));
86         }
87 }
88
89 static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
90                                         unsigned int n)
91 {
92         scatterwalk_advance(&walk->in, n);
93         scatterwalk_advance(&walk->out, n);
94 }
95
96 static int ablkcipher_walk_next(struct ablkcipher_request *req,
97                                 struct ablkcipher_walk *walk);
98
99 int ablkcipher_walk_done(struct ablkcipher_request *req,
100                          struct ablkcipher_walk *walk, int err)
101 {
102         struct crypto_tfm *tfm = req->base.tfm;
103         unsigned int n; /* bytes processed */
104         bool more;
105
106         if (unlikely(err < 0))
107                 goto finish;
108
109         n = walk->nbytes - err;
110         walk->total -= n;
111         more = (walk->total != 0);
112
113         if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
114                 ablkcipher_done_fast(walk, n);
115         } else {
116                 if (WARN_ON(err)) {
117                         /* unexpected case; didn't process all bytes */
118                         err = -EINVAL;
119                         goto finish;
120                 }
121                 ablkcipher_done_slow(walk, n);
122         }
123
124         scatterwalk_done(&walk->in, 0, more);
125         scatterwalk_done(&walk->out, 1, more);
126
127         if (more) {
128                 crypto_yield(req->base.flags);
129                 return ablkcipher_walk_next(req, walk);
130         }
131         err = 0;
132 finish:
133         walk->nbytes = 0;
134         if (walk->iv != req->info)
135                 memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
136         kfree(walk->iv_buffer);
137         return err;
138 }
139 EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
140
141 static inline int ablkcipher_next_slow(struct ablkcipher_request *req,
142                                        struct ablkcipher_walk *walk,
143                                        unsigned int bsize,
144                                        unsigned int alignmask,
145                                        void **src_p, void **dst_p)
146 {
147         unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
148         struct ablkcipher_buffer *p;
149         void *src, *dst, *base;
150         unsigned int n;
151
152         n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1);
153         n += (aligned_bsize * 3 - (alignmask + 1) +
154               (alignmask & ~(crypto_tfm_ctx_alignment() - 1)));
155
156         p = kmalloc(n, GFP_ATOMIC);
157         if (!p)
158                 return ablkcipher_walk_done(req, walk, -ENOMEM);
159
160         base = p + 1;
161
162         dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1);
163         src = dst = ablkcipher_get_spot(dst, bsize);
164
165         p->len = bsize;
166         p->data = dst;
167
168         scatterwalk_copychunks(src, &walk->in, bsize, 0);
169
170         ablkcipher_queue_write(walk, p);
171
172         walk->nbytes = bsize;
173         walk->flags |= ABLKCIPHER_WALK_SLOW;
174
175         *src_p = src;
176         *dst_p = dst;
177
178         return 0;
179 }
180
181 static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk,
182                                      struct crypto_tfm *tfm,
183                                      unsigned int alignmask)
184 {
185         unsigned bs = walk->blocksize;
186         unsigned int ivsize = tfm->crt_ablkcipher.ivsize;
187         unsigned aligned_bs = ALIGN(bs, alignmask + 1);
188         unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
189                             (alignmask + 1);
190         u8 *iv;
191
192         size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
193         walk->iv_buffer = kmalloc(size, GFP_ATOMIC);
194         if (!walk->iv_buffer)
195                 return -ENOMEM;
196
197         iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1);
198         iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
199         iv = ablkcipher_get_spot(iv, bs) + aligned_bs;
200         iv = ablkcipher_get_spot(iv, ivsize);
201
202         walk->iv = memcpy(iv, walk->iv, ivsize);
203         return 0;
204 }
205
206 static inline int ablkcipher_next_fast(struct ablkcipher_request *req,
207                                        struct ablkcipher_walk *walk)
208 {
209         walk->src.page = scatterwalk_page(&walk->in);
210         walk->src.offset = offset_in_page(walk->in.offset);
211         walk->dst.page = scatterwalk_page(&walk->out);
212         walk->dst.offset = offset_in_page(walk->out.offset);
213
214         return 0;
215 }
216
217 static int ablkcipher_walk_next(struct ablkcipher_request *req,
218                                 struct ablkcipher_walk *walk)
219 {
220         struct crypto_tfm *tfm = req->base.tfm;
221         unsigned int alignmask, bsize, n;
222         void *src, *dst;
223         int err;
224
225         alignmask = crypto_tfm_alg_alignmask(tfm);
226         n = walk->total;
227         if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) {
228                 req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
229                 return ablkcipher_walk_done(req, walk, -EINVAL);
230         }
231
232         walk->flags &= ~ABLKCIPHER_WALK_SLOW;
233         src = dst = NULL;
234
235         bsize = min(walk->blocksize, n);
236         n = scatterwalk_clamp(&walk->in, n);
237         n = scatterwalk_clamp(&walk->out, n);
238
239         if (n < bsize ||
240             !scatterwalk_aligned(&walk->in, alignmask) ||
241             !scatterwalk_aligned(&walk->out, alignmask)) {
242                 err = ablkcipher_next_slow(req, walk, bsize, alignmask,
243                                            &src, &dst);
244                 goto set_phys_lowmem;
245         }
246
247         walk->nbytes = n;
248
249         return ablkcipher_next_fast(req, walk);
250
251 set_phys_lowmem:
252         if (err >= 0) {
253                 walk->src.page = virt_to_page(src);
254                 walk->dst.page = virt_to_page(dst);
255                 walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1));
256                 walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1));
257         }
258
259         return err;
260 }
261
262 static int ablkcipher_walk_first(struct ablkcipher_request *req,
263                                  struct ablkcipher_walk *walk)
264 {
265         struct crypto_tfm *tfm = req->base.tfm;
266         unsigned int alignmask;
267
268         alignmask = crypto_tfm_alg_alignmask(tfm);
269         if (WARN_ON_ONCE(in_irq()))
270                 return -EDEADLK;
271
272         walk->iv = req->info;
273         walk->nbytes = walk->total;
274         if (unlikely(!walk->total))
275                 return 0;
276
277         walk->iv_buffer = NULL;
278         if (unlikely(((unsigned long)walk->iv & alignmask))) {
279                 int err = ablkcipher_copy_iv(walk, tfm, alignmask);
280
281                 if (err)
282                         return err;
283         }
284
285         scatterwalk_start(&walk->in, walk->in.sg);
286         scatterwalk_start(&walk->out, walk->out.sg);
287
288         return ablkcipher_walk_next(req, walk);
289 }
290
291 int ablkcipher_walk_phys(struct ablkcipher_request *req,
292                          struct ablkcipher_walk *walk)
293 {
294         walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm);
295         return ablkcipher_walk_first(req, walk);
296 }
297 EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
298
299 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
300                             unsigned int keylen)
301 {
302         struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
303         unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
304         int ret;
305         u8 *buffer, *alignbuffer;
306         unsigned long absize;
307
308         absize = keylen + alignmask;
309         buffer = kmalloc(absize, GFP_ATOMIC);
310         if (!buffer)
311                 return -ENOMEM;
312
313         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
314         memcpy(alignbuffer, key, keylen);
315         ret = cipher->setkey(tfm, alignbuffer, keylen);
316         memset(alignbuffer, 0, keylen);
317         kfree(buffer);
318         return ret;
319 }
320
321 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
322                   unsigned int keylen)
323 {
324         struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
325         unsigned long alignmask = crypto_ablkcipher_alignmask(tfm);
326
327         if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
328                 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
329                 return -EINVAL;
330         }
331
332         if ((unsigned long)key & alignmask)
333                 return setkey_unaligned(tfm, key, keylen);
334
335         return cipher->setkey(tfm, key, keylen);
336 }
337
338 static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type,
339                                               u32 mask)
340 {
341         return alg->cra_ctxsize;
342 }
343
344 static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type,
345                                       u32 mask)
346 {
347         struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
348         struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
349
350         if (alg->ivsize > PAGE_SIZE / 8)
351                 return -EINVAL;
352
353         crt->setkey = setkey;
354         crt->encrypt = alg->encrypt;
355         crt->decrypt = alg->decrypt;
356         crt->base = __crypto_ablkcipher_cast(tfm);
357         crt->ivsize = alg->ivsize;
358
359         return 0;
360 }
361
362 #ifdef CONFIG_NET
363 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
364 {
365         struct crypto_report_blkcipher rblkcipher;
366
367         strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
368         strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
369                 sizeof(rblkcipher.geniv));
370         rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
371
372         rblkcipher.blocksize = alg->cra_blocksize;
373         rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
374         rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
375         rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
376
377         if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
378                     sizeof(struct crypto_report_blkcipher), &rblkcipher))
379                 goto nla_put_failure;
380         return 0;
381
382 nla_put_failure:
383         return -EMSGSIZE;
384 }
385 #else
386 static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
387 {
388         return -ENOSYS;
389 }
390 #endif
391
392 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
393         __attribute__ ((unused));
394 static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
395 {
396         struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
397
398         seq_printf(m, "type         : ablkcipher\n");
399         seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
400                                              "yes" : "no");
401         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
402         seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
403         seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
404         seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
405         seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<default>");
406 }
407
408 const struct crypto_type crypto_ablkcipher_type = {
409         .ctxsize = crypto_ablkcipher_ctxsize,
410         .init = crypto_init_ablkcipher_ops,
411 #ifdef CONFIG_PROC_FS
412         .show = crypto_ablkcipher_show,
413 #endif
414         .report = crypto_ablkcipher_report,
415 };
416 EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
417
418 static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
419                                       u32 mask)
420 {
421         struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
422         struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
423
424         if (alg->ivsize > PAGE_SIZE / 8)
425                 return -EINVAL;
426
427         crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
428                       alg->setkey : setkey;
429         crt->encrypt = alg->encrypt;
430         crt->decrypt = alg->decrypt;
431         crt->base = __crypto_ablkcipher_cast(tfm);
432         crt->ivsize = alg->ivsize;
433
434         return 0;
435 }
436
437 #ifdef CONFIG_NET
438 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
439 {
440         struct crypto_report_blkcipher rblkcipher;
441
442         strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
443         strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
444                 sizeof(rblkcipher.geniv));
445         rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
446
447         rblkcipher.blocksize = alg->cra_blocksize;
448         rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
449         rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
450         rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
451
452         if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
453                     sizeof(struct crypto_report_blkcipher), &rblkcipher))
454                 goto nla_put_failure;
455         return 0;
456
457 nla_put_failure:
458         return -EMSGSIZE;
459 }
460 #else
461 static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
462 {
463         return -ENOSYS;
464 }
465 #endif
466
467 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
468         __attribute__ ((unused));
469 static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
470 {
471         struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
472
473         seq_printf(m, "type         : givcipher\n");
474         seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
475                                              "yes" : "no");
476         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
477         seq_printf(m, "min keysize  : %u\n", ablkcipher->min_keysize);
478         seq_printf(m, "max keysize  : %u\n", ablkcipher->max_keysize);
479         seq_printf(m, "ivsize       : %u\n", ablkcipher->ivsize);
480         seq_printf(m, "geniv        : %s\n", ablkcipher->geniv ?: "<built-in>");
481 }
482
483 const struct crypto_type crypto_givcipher_type = {
484         .ctxsize = crypto_ablkcipher_ctxsize,
485         .init = crypto_init_givcipher_ops,
486 #ifdef CONFIG_PROC_FS
487         .show = crypto_givcipher_show,
488 #endif
489         .report = crypto_givcipher_report,
490 };
491 EXPORT_SYMBOL_GPL(crypto_givcipher_type);