GNU Linux-libre 4.14.290-gnu1
[releases.git] / crypto / skcipher.c
1 /*
2  * Symmetric key cipher operations.
3  *
4  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5  * multiple page boundaries by using temporary blocks.  In user context,
6  * the kernel is given a chance to schedule us once per page.
7  *
8  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License as published by the Free
12  * Software Foundation; either version 2 of the License, or (at your option)
13  * any later version.
14  *
15  */
16
17 #include <crypto/internal/aead.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/bug.h>
21 #include <linux/cryptouser.h>
22 #include <linux/compiler.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/seq_file.h>
27 #include <net/netlink.h>
28
29 #include "internal.h"
30
31 enum {
32         SKCIPHER_WALK_PHYS = 1 << 0,
33         SKCIPHER_WALK_SLOW = 1 << 1,
34         SKCIPHER_WALK_COPY = 1 << 2,
35         SKCIPHER_WALK_DIFF = 1 << 3,
36         SKCIPHER_WALK_SLEEP = 1 << 4,
37 };
38
39 struct skcipher_walk_buffer {
40         struct list_head entry;
41         struct scatter_walk dst;
42         unsigned int len;
43         u8 *data;
44         u8 buffer[];
45 };
46
47 static int skcipher_walk_next(struct skcipher_walk *walk);
48
49 static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
50 {
51         if (PageHighMem(scatterwalk_page(walk)))
52                 kunmap_atomic(vaddr);
53 }
54
55 static inline void *skcipher_map(struct scatter_walk *walk)
56 {
57         struct page *page = scatterwalk_page(walk);
58
59         return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
60                offset_in_page(walk->offset);
61 }
62
63 static inline void skcipher_map_src(struct skcipher_walk *walk)
64 {
65         walk->src.virt.addr = skcipher_map(&walk->in);
66 }
67
68 static inline void skcipher_map_dst(struct skcipher_walk *walk)
69 {
70         walk->dst.virt.addr = skcipher_map(&walk->out);
71 }
72
73 static inline void skcipher_unmap_src(struct skcipher_walk *walk)
74 {
75         skcipher_unmap(&walk->in, walk->src.virt.addr);
76 }
77
78 static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
79 {
80         skcipher_unmap(&walk->out, walk->dst.virt.addr);
81 }
82
83 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
84 {
85         return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
86 }
87
88 /* Get a spot of the specified length that does not straddle a page.
89  * The caller needs to ensure that there is enough space for this operation.
90  */
91 static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
92 {
93         u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
94
95         return max(start, end_page);
96 }
97
98 static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
99 {
100         u8 *addr;
101
102         addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
103         addr = skcipher_get_spot(addr, bsize);
104         scatterwalk_copychunks(addr, &walk->out, bsize,
105                                (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
106         return 0;
107 }
108
109 int skcipher_walk_done(struct skcipher_walk *walk, int err)
110 {
111         unsigned int n = walk->nbytes;
112         unsigned int nbytes = 0;
113
114         if (!n)
115                 goto finish;
116
117         if (likely(err >= 0)) {
118                 n -= err;
119                 nbytes = walk->total - n;
120         }
121
122         if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
123                                     SKCIPHER_WALK_SLOW |
124                                     SKCIPHER_WALK_COPY |
125                                     SKCIPHER_WALK_DIFF)))) {
126 unmap_src:
127                 skcipher_unmap_src(walk);
128         } else if (walk->flags & SKCIPHER_WALK_DIFF) {
129                 skcipher_unmap_dst(walk);
130                 goto unmap_src;
131         } else if (walk->flags & SKCIPHER_WALK_COPY) {
132                 skcipher_map_dst(walk);
133                 memcpy(walk->dst.virt.addr, walk->page, n);
134                 skcipher_unmap_dst(walk);
135         } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
136                 if (err > 0) {
137                         /*
138                          * Didn't process all bytes.  Either the algorithm is
139                          * broken, or this was the last step and it turned out
140                          * the message wasn't evenly divisible into blocks but
141                          * the algorithm requires it.
142                          */
143                         err = -EINVAL;
144                         nbytes = 0;
145                 } else
146                         n = skcipher_done_slow(walk, n);
147         }
148
149         if (err > 0)
150                 err = 0;
151
152         walk->total = nbytes;
153         walk->nbytes = 0;
154
155         scatterwalk_advance(&walk->in, n);
156         scatterwalk_advance(&walk->out, n);
157         scatterwalk_done(&walk->in, 0, nbytes);
158         scatterwalk_done(&walk->out, 1, nbytes);
159
160         if (nbytes) {
161                 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
162                              CRYPTO_TFM_REQ_MAY_SLEEP : 0);
163                 return skcipher_walk_next(walk);
164         }
165
166 finish:
167         /* Short-circuit for the common/fast path. */
168         if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
169                 goto out;
170
171         if (walk->flags & SKCIPHER_WALK_PHYS)
172                 goto out;
173
174         if (walk->iv != walk->oiv)
175                 memcpy(walk->oiv, walk->iv, walk->ivsize);
176         if (walk->buffer != walk->page)
177                 kfree(walk->buffer);
178         if (walk->page)
179                 free_page((unsigned long)walk->page);
180
181 out:
182         return err;
183 }
184 EXPORT_SYMBOL_GPL(skcipher_walk_done);
185
186 void skcipher_walk_complete(struct skcipher_walk *walk, int err)
187 {
188         struct skcipher_walk_buffer *p, *tmp;
189
190         list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
191                 u8 *data;
192
193                 if (err)
194                         goto done;
195
196                 data = p->data;
197                 if (!data) {
198                         data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
199                         data = skcipher_get_spot(data, walk->stride);
200                 }
201
202                 scatterwalk_copychunks(data, &p->dst, p->len, 1);
203
204                 if (offset_in_page(p->data) + p->len + walk->stride >
205                     PAGE_SIZE)
206                         free_page((unsigned long)p->data);
207
208 done:
209                 list_del(&p->entry);
210                 kfree(p);
211         }
212
213         if (!err && walk->iv != walk->oiv)
214                 memcpy(walk->oiv, walk->iv, walk->ivsize);
215         if (walk->buffer != walk->page)
216                 kfree(walk->buffer);
217         if (walk->page)
218                 free_page((unsigned long)walk->page);
219 }
220 EXPORT_SYMBOL_GPL(skcipher_walk_complete);
221
222 static void skcipher_queue_write(struct skcipher_walk *walk,
223                                  struct skcipher_walk_buffer *p)
224 {
225         p->dst = walk->out;
226         list_add_tail(&p->entry, &walk->buffers);
227 }
228
229 static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
230 {
231         bool phys = walk->flags & SKCIPHER_WALK_PHYS;
232         unsigned alignmask = walk->alignmask;
233         struct skcipher_walk_buffer *p;
234         unsigned a;
235         unsigned n;
236         u8 *buffer;
237         void *v;
238
239         if (!phys) {
240                 if (!walk->buffer)
241                         walk->buffer = walk->page;
242                 buffer = walk->buffer;
243                 if (buffer)
244                         goto ok;
245         }
246
247         /* Start with the minimum alignment of kmalloc. */
248         a = crypto_tfm_ctx_alignment() - 1;
249         n = bsize;
250
251         if (phys) {
252                 /* Calculate the minimum alignment of p->buffer. */
253                 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
254                 n += sizeof(*p);
255         }
256
257         /* Minimum size to align p->buffer by alignmask. */
258         n += alignmask & ~a;
259
260         /* Minimum size to ensure p->buffer does not straddle a page. */
261         n += (bsize - 1) & ~(alignmask | a);
262
263         v = kzalloc(n, skcipher_walk_gfp(walk));
264         if (!v)
265                 return skcipher_walk_done(walk, -ENOMEM);
266
267         if (phys) {
268                 p = v;
269                 p->len = bsize;
270                 skcipher_queue_write(walk, p);
271                 buffer = p->buffer;
272         } else {
273                 walk->buffer = v;
274                 buffer = v;
275         }
276
277 ok:
278         walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
279         walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
280         walk->src.virt.addr = walk->dst.virt.addr;
281
282         scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
283
284         walk->nbytes = bsize;
285         walk->flags |= SKCIPHER_WALK_SLOW;
286
287         return 0;
288 }
289
290 static int skcipher_next_copy(struct skcipher_walk *walk)
291 {
292         struct skcipher_walk_buffer *p;
293         u8 *tmp = walk->page;
294
295         skcipher_map_src(walk);
296         memcpy(tmp, walk->src.virt.addr, walk->nbytes);
297         skcipher_unmap_src(walk);
298
299         walk->src.virt.addr = tmp;
300         walk->dst.virt.addr = tmp;
301
302         if (!(walk->flags & SKCIPHER_WALK_PHYS))
303                 return 0;
304
305         p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
306         if (!p)
307                 return -ENOMEM;
308
309         p->data = walk->page;
310         p->len = walk->nbytes;
311         skcipher_queue_write(walk, p);
312
313         if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
314             PAGE_SIZE)
315                 walk->page = NULL;
316         else
317                 walk->page += walk->nbytes;
318
319         return 0;
320 }
321
322 static int skcipher_next_fast(struct skcipher_walk *walk)
323 {
324         unsigned long diff;
325
326         walk->src.phys.page = scatterwalk_page(&walk->in);
327         walk->src.phys.offset = offset_in_page(walk->in.offset);
328         walk->dst.phys.page = scatterwalk_page(&walk->out);
329         walk->dst.phys.offset = offset_in_page(walk->out.offset);
330
331         if (walk->flags & SKCIPHER_WALK_PHYS)
332                 return 0;
333
334         diff = walk->src.phys.offset - walk->dst.phys.offset;
335         diff |= walk->src.virt.page - walk->dst.virt.page;
336
337         skcipher_map_src(walk);
338         walk->dst.virt.addr = walk->src.virt.addr;
339
340         if (diff) {
341                 walk->flags |= SKCIPHER_WALK_DIFF;
342                 skcipher_map_dst(walk);
343         }
344
345         return 0;
346 }
347
348 static int skcipher_walk_next(struct skcipher_walk *walk)
349 {
350         unsigned int bsize;
351         unsigned int n;
352         int err;
353
354         walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
355                          SKCIPHER_WALK_DIFF);
356
357         n = walk->total;
358         bsize = min(walk->stride, max(n, walk->blocksize));
359         n = scatterwalk_clamp(&walk->in, n);
360         n = scatterwalk_clamp(&walk->out, n);
361
362         if (unlikely(n < bsize)) {
363                 if (unlikely(walk->total < walk->blocksize))
364                         return skcipher_walk_done(walk, -EINVAL);
365
366 slow_path:
367                 err = skcipher_next_slow(walk, bsize);
368                 goto set_phys_lowmem;
369         }
370
371         if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
372                 if (!walk->page) {
373                         gfp_t gfp = skcipher_walk_gfp(walk);
374
375                         walk->page = (void *)__get_free_page(gfp);
376                         if (!walk->page)
377                                 goto slow_path;
378                 }
379
380                 walk->nbytes = min_t(unsigned, n,
381                                      PAGE_SIZE - offset_in_page(walk->page));
382                 walk->flags |= SKCIPHER_WALK_COPY;
383                 err = skcipher_next_copy(walk);
384                 goto set_phys_lowmem;
385         }
386
387         walk->nbytes = n;
388
389         return skcipher_next_fast(walk);
390
391 set_phys_lowmem:
392         if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
393                 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
394                 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
395                 walk->src.phys.offset &= PAGE_SIZE - 1;
396                 walk->dst.phys.offset &= PAGE_SIZE - 1;
397         }
398         return err;
399 }
400 EXPORT_SYMBOL_GPL(skcipher_walk_next);
401
402 static int skcipher_copy_iv(struct skcipher_walk *walk)
403 {
404         unsigned a = crypto_tfm_ctx_alignment() - 1;
405         unsigned alignmask = walk->alignmask;
406         unsigned ivsize = walk->ivsize;
407         unsigned bs = walk->stride;
408         unsigned aligned_bs;
409         unsigned size;
410         u8 *iv;
411
412         aligned_bs = ALIGN(bs, alignmask + 1);
413
414         /* Minimum size to align buffer by alignmask. */
415         size = alignmask & ~a;
416
417         if (walk->flags & SKCIPHER_WALK_PHYS)
418                 size += ivsize;
419         else {
420                 size += aligned_bs + ivsize;
421
422                 /* Minimum size to ensure buffer does not straddle a page. */
423                 size += (bs - 1) & ~(alignmask | a);
424         }
425
426         walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
427         if (!walk->buffer)
428                 return -ENOMEM;
429
430         iv = PTR_ALIGN(walk->buffer, alignmask + 1);
431         iv = skcipher_get_spot(iv, bs) + aligned_bs;
432
433         walk->iv = memcpy(iv, walk->iv, walk->ivsize);
434         return 0;
435 }
436
437 static int skcipher_walk_first(struct skcipher_walk *walk)
438 {
439         if (WARN_ON_ONCE(in_irq()))
440                 return -EDEADLK;
441
442         walk->buffer = NULL;
443         if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
444                 int err = skcipher_copy_iv(walk);
445                 if (err)
446                         return err;
447         }
448
449         walk->page = NULL;
450         walk->nbytes = walk->total;
451
452         return skcipher_walk_next(walk);
453 }
454
455 static int skcipher_walk_skcipher(struct skcipher_walk *walk,
456                                   struct skcipher_request *req)
457 {
458         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
459
460         walk->total = req->cryptlen;
461         walk->nbytes = 0;
462         walk->iv = req->iv;
463         walk->oiv = req->iv;
464
465         if (unlikely(!walk->total))
466                 return 0;
467
468         scatterwalk_start(&walk->in, req->src);
469         scatterwalk_start(&walk->out, req->dst);
470
471         walk->flags &= ~SKCIPHER_WALK_SLEEP;
472         walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
473                        SKCIPHER_WALK_SLEEP : 0;
474
475         walk->blocksize = crypto_skcipher_blocksize(tfm);
476         walk->stride = crypto_skcipher_walksize(tfm);
477         walk->ivsize = crypto_skcipher_ivsize(tfm);
478         walk->alignmask = crypto_skcipher_alignmask(tfm);
479
480         return skcipher_walk_first(walk);
481 }
482
483 int skcipher_walk_virt(struct skcipher_walk *walk,
484                        struct skcipher_request *req, bool atomic)
485 {
486         int err;
487
488         walk->flags &= ~SKCIPHER_WALK_PHYS;
489
490         err = skcipher_walk_skcipher(walk, req);
491
492         walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
493
494         return err;
495 }
496 EXPORT_SYMBOL_GPL(skcipher_walk_virt);
497
498 void skcipher_walk_atomise(struct skcipher_walk *walk)
499 {
500         walk->flags &= ~SKCIPHER_WALK_SLEEP;
501 }
502 EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
503
504 int skcipher_walk_async(struct skcipher_walk *walk,
505                         struct skcipher_request *req)
506 {
507         walk->flags |= SKCIPHER_WALK_PHYS;
508
509         INIT_LIST_HEAD(&walk->buffers);
510
511         return skcipher_walk_skcipher(walk, req);
512 }
513 EXPORT_SYMBOL_GPL(skcipher_walk_async);
514
515 static int skcipher_walk_aead_common(struct skcipher_walk *walk,
516                                      struct aead_request *req, bool atomic)
517 {
518         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
519         int err;
520
521         walk->nbytes = 0;
522         walk->iv = req->iv;
523         walk->oiv = req->iv;
524
525         if (unlikely(!walk->total))
526                 return 0;
527
528         walk->flags &= ~SKCIPHER_WALK_PHYS;
529
530         scatterwalk_start(&walk->in, req->src);
531         scatterwalk_start(&walk->out, req->dst);
532
533         scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
534         scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
535
536         scatterwalk_done(&walk->in, 0, walk->total);
537         scatterwalk_done(&walk->out, 0, walk->total);
538
539         if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
540                 walk->flags |= SKCIPHER_WALK_SLEEP;
541         else
542                 walk->flags &= ~SKCIPHER_WALK_SLEEP;
543
544         walk->blocksize = crypto_aead_blocksize(tfm);
545         walk->stride = crypto_aead_chunksize(tfm);
546         walk->ivsize = crypto_aead_ivsize(tfm);
547         walk->alignmask = crypto_aead_alignmask(tfm);
548
549         err = skcipher_walk_first(walk);
550
551         if (atomic)
552                 walk->flags &= ~SKCIPHER_WALK_SLEEP;
553
554         return err;
555 }
556
557 int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
558                        bool atomic)
559 {
560         walk->total = req->cryptlen;
561
562         return skcipher_walk_aead_common(walk, req, atomic);
563 }
564 EXPORT_SYMBOL_GPL(skcipher_walk_aead);
565
566 int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
567                                struct aead_request *req, bool atomic)
568 {
569         walk->total = req->cryptlen;
570
571         return skcipher_walk_aead_common(walk, req, atomic);
572 }
573 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
574
575 int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
576                                struct aead_request *req, bool atomic)
577 {
578         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
579
580         walk->total = req->cryptlen - crypto_aead_authsize(tfm);
581
582         return skcipher_walk_aead_common(walk, req, atomic);
583 }
584 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
585
586 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
587 {
588         if (alg->cra_type == &crypto_blkcipher_type)
589                 return sizeof(struct crypto_blkcipher *);
590
591         if (alg->cra_type == &crypto_ablkcipher_type ||
592             alg->cra_type == &crypto_givcipher_type)
593                 return sizeof(struct crypto_ablkcipher *);
594
595         return crypto_alg_extsize(alg);
596 }
597
598 static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
599                                      const u8 *key, unsigned int keylen)
600 {
601         struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
602         struct crypto_blkcipher *blkcipher = *ctx;
603         int err;
604
605         crypto_blkcipher_clear_flags(blkcipher, ~0);
606         crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
607                                               CRYPTO_TFM_REQ_MASK);
608         err = crypto_blkcipher_setkey(blkcipher, key, keylen);
609         crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
610                                        CRYPTO_TFM_RES_MASK);
611
612         return err;
613 }
614
615 static int skcipher_crypt_blkcipher(struct skcipher_request *req,
616                                     int (*crypt)(struct blkcipher_desc *,
617                                                  struct scatterlist *,
618                                                  struct scatterlist *,
619                                                  unsigned int))
620 {
621         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
622         struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
623         struct blkcipher_desc desc = {
624                 .tfm = *ctx,
625                 .info = req->iv,
626                 .flags = req->base.flags,
627         };
628
629
630         return crypt(&desc, req->dst, req->src, req->cryptlen);
631 }
632
633 static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
634 {
635         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
636         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
637         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
638
639         return skcipher_crypt_blkcipher(req, alg->encrypt);
640 }
641
642 static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
643 {
644         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
645         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
646         struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
647
648         return skcipher_crypt_blkcipher(req, alg->decrypt);
649 }
650
651 static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
652 {
653         struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
654
655         crypto_free_blkcipher(*ctx);
656 }
657
658 static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
659 {
660         struct crypto_alg *calg = tfm->__crt_alg;
661         struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
662         struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
663         struct crypto_blkcipher *blkcipher;
664         struct crypto_tfm *btfm;
665
666         if (!crypto_mod_get(calg))
667                 return -EAGAIN;
668
669         btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
670                                         CRYPTO_ALG_TYPE_MASK);
671         if (IS_ERR(btfm)) {
672                 crypto_mod_put(calg);
673                 return PTR_ERR(btfm);
674         }
675
676         blkcipher = __crypto_blkcipher_cast(btfm);
677         *ctx = blkcipher;
678         tfm->exit = crypto_exit_skcipher_ops_blkcipher;
679
680         skcipher->setkey = skcipher_setkey_blkcipher;
681         skcipher->encrypt = skcipher_encrypt_blkcipher;
682         skcipher->decrypt = skcipher_decrypt_blkcipher;
683
684         skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
685         skcipher->keysize = calg->cra_blkcipher.max_keysize;
686
687         return 0;
688 }
689
690 static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
691                                       const u8 *key, unsigned int keylen)
692 {
693         struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
694         struct crypto_ablkcipher *ablkcipher = *ctx;
695         int err;
696
697         crypto_ablkcipher_clear_flags(ablkcipher, ~0);
698         crypto_ablkcipher_set_flags(ablkcipher,
699                                     crypto_skcipher_get_flags(tfm) &
700                                     CRYPTO_TFM_REQ_MASK);
701         err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
702         crypto_skcipher_set_flags(tfm,
703                                   crypto_ablkcipher_get_flags(ablkcipher) &
704                                   CRYPTO_TFM_RES_MASK);
705
706         return err;
707 }
708
709 static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
710                                      int (*crypt)(struct ablkcipher_request *))
711 {
712         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
713         struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
714         struct ablkcipher_request *subreq = skcipher_request_ctx(req);
715
716         ablkcipher_request_set_tfm(subreq, *ctx);
717         ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
718                                         req->base.complete, req->base.data);
719         ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
720                                      req->iv);
721
722         return crypt(subreq);
723 }
724
725 static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
726 {
727         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
728         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
729         struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
730
731         return skcipher_crypt_ablkcipher(req, alg->encrypt);
732 }
733
734 static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
735 {
736         struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
737         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
738         struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
739
740         return skcipher_crypt_ablkcipher(req, alg->decrypt);
741 }
742
743 static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
744 {
745         struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
746
747         crypto_free_ablkcipher(*ctx);
748 }
749
750 static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
751 {
752         struct crypto_alg *calg = tfm->__crt_alg;
753         struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
754         struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
755         struct crypto_ablkcipher *ablkcipher;
756         struct crypto_tfm *abtfm;
757
758         if (!crypto_mod_get(calg))
759                 return -EAGAIN;
760
761         abtfm = __crypto_alloc_tfm(calg, 0, 0);
762         if (IS_ERR(abtfm)) {
763                 crypto_mod_put(calg);
764                 return PTR_ERR(abtfm);
765         }
766
767         ablkcipher = __crypto_ablkcipher_cast(abtfm);
768         *ctx = ablkcipher;
769         tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
770
771         skcipher->setkey = skcipher_setkey_ablkcipher;
772         skcipher->encrypt = skcipher_encrypt_ablkcipher;
773         skcipher->decrypt = skcipher_decrypt_ablkcipher;
774
775         skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
776         skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
777                             sizeof(struct ablkcipher_request);
778         skcipher->keysize = calg->cra_ablkcipher.max_keysize;
779
780         return 0;
781 }
782
783 static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
784                                      const u8 *key, unsigned int keylen)
785 {
786         unsigned long alignmask = crypto_skcipher_alignmask(tfm);
787         struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
788         u8 *buffer, *alignbuffer;
789         unsigned long absize;
790         int ret;
791
792         absize = keylen + alignmask;
793         buffer = kmalloc(absize, GFP_ATOMIC);
794         if (!buffer)
795                 return -ENOMEM;
796
797         alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
798         memcpy(alignbuffer, key, keylen);
799         ret = cipher->setkey(tfm, alignbuffer, keylen);
800         kzfree(buffer);
801         return ret;
802 }
803
804 static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
805                            unsigned int keylen)
806 {
807         struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
808         unsigned long alignmask = crypto_skcipher_alignmask(tfm);
809
810         if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
811                 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
812                 return -EINVAL;
813         }
814
815         if ((unsigned long)key & alignmask)
816                 return skcipher_setkey_unaligned(tfm, key, keylen);
817
818         return cipher->setkey(tfm, key, keylen);
819 }
820
821 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
822 {
823         struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
824         struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
825
826         alg->exit(skcipher);
827 }
828
829 static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
830 {
831         struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
832         struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
833
834         if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
835                 return crypto_init_skcipher_ops_blkcipher(tfm);
836
837         if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
838             tfm->__crt_alg->cra_type == &crypto_givcipher_type)
839                 return crypto_init_skcipher_ops_ablkcipher(tfm);
840
841         skcipher->setkey = skcipher_setkey;
842         skcipher->encrypt = alg->encrypt;
843         skcipher->decrypt = alg->decrypt;
844         skcipher->ivsize = alg->ivsize;
845         skcipher->keysize = alg->max_keysize;
846
847         if (alg->exit)
848                 skcipher->base.exit = crypto_skcipher_exit_tfm;
849
850         if (alg->init)
851                 return alg->init(skcipher);
852
853         return 0;
854 }
855
856 static void crypto_skcipher_free_instance(struct crypto_instance *inst)
857 {
858         struct skcipher_instance *skcipher =
859                 container_of(inst, struct skcipher_instance, s.base);
860
861         skcipher->free(skcipher);
862 }
863
864 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
865         __maybe_unused;
866 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
867 {
868         struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
869                                                      base);
870
871         seq_printf(m, "type         : skcipher\n");
872         seq_printf(m, "async        : %s\n",
873                    alg->cra_flags & CRYPTO_ALG_ASYNC ?  "yes" : "no");
874         seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
875         seq_printf(m, "min keysize  : %u\n", skcipher->min_keysize);
876         seq_printf(m, "max keysize  : %u\n", skcipher->max_keysize);
877         seq_printf(m, "ivsize       : %u\n", skcipher->ivsize);
878         seq_printf(m, "chunksize    : %u\n", skcipher->chunksize);
879         seq_printf(m, "walksize     : %u\n", skcipher->walksize);
880 }
881
882 #ifdef CONFIG_NET
883 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
884 {
885         struct crypto_report_blkcipher rblkcipher;
886         struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
887                                                      base);
888
889         strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
890         strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
891
892         rblkcipher.blocksize = alg->cra_blocksize;
893         rblkcipher.min_keysize = skcipher->min_keysize;
894         rblkcipher.max_keysize = skcipher->max_keysize;
895         rblkcipher.ivsize = skcipher->ivsize;
896
897         if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
898                     sizeof(struct crypto_report_blkcipher), &rblkcipher))
899                 goto nla_put_failure;
900         return 0;
901
902 nla_put_failure:
903         return -EMSGSIZE;
904 }
905 #else
906 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
907 {
908         return -ENOSYS;
909 }
910 #endif
911
912 static const struct crypto_type crypto_skcipher_type2 = {
913         .extsize = crypto_skcipher_extsize,
914         .init_tfm = crypto_skcipher_init_tfm,
915         .free = crypto_skcipher_free_instance,
916 #ifdef CONFIG_PROC_FS
917         .show = crypto_skcipher_show,
918 #endif
919         .report = crypto_skcipher_report,
920         .maskclear = ~CRYPTO_ALG_TYPE_MASK,
921         .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
922         .type = CRYPTO_ALG_TYPE_SKCIPHER,
923         .tfmsize = offsetof(struct crypto_skcipher, base),
924 };
925
926 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
927                           const char *name, u32 type, u32 mask)
928 {
929         spawn->base.frontend = &crypto_skcipher_type2;
930         return crypto_grab_spawn(&spawn->base, name, type, mask);
931 }
932 EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
933
934 struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
935                                               u32 type, u32 mask)
936 {
937         return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
938 }
939 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
940
941 int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
942 {
943         return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
944                                    type, mask);
945 }
946 EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
947
948 static int skcipher_prepare_alg(struct skcipher_alg *alg)
949 {
950         struct crypto_alg *base = &alg->base;
951
952         if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
953             alg->walksize > PAGE_SIZE / 8)
954                 return -EINVAL;
955
956         if (!alg->chunksize)
957                 alg->chunksize = base->cra_blocksize;
958         if (!alg->walksize)
959                 alg->walksize = alg->chunksize;
960
961         base->cra_type = &crypto_skcipher_type2;
962         base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
963         base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
964
965         return 0;
966 }
967
968 int crypto_register_skcipher(struct skcipher_alg *alg)
969 {
970         struct crypto_alg *base = &alg->base;
971         int err;
972
973         err = skcipher_prepare_alg(alg);
974         if (err)
975                 return err;
976
977         return crypto_register_alg(base);
978 }
979 EXPORT_SYMBOL_GPL(crypto_register_skcipher);
980
981 void crypto_unregister_skcipher(struct skcipher_alg *alg)
982 {
983         crypto_unregister_alg(&alg->base);
984 }
985 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
986
987 int crypto_register_skciphers(struct skcipher_alg *algs, int count)
988 {
989         int i, ret;
990
991         for (i = 0; i < count; i++) {
992                 ret = crypto_register_skcipher(&algs[i]);
993                 if (ret)
994                         goto err;
995         }
996
997         return 0;
998
999 err:
1000         for (--i; i >= 0; --i)
1001                 crypto_unregister_skcipher(&algs[i]);
1002
1003         return ret;
1004 }
1005 EXPORT_SYMBOL_GPL(crypto_register_skciphers);
1006
1007 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
1008 {
1009         int i;
1010
1011         for (i = count - 1; i >= 0; --i)
1012                 crypto_unregister_skcipher(&algs[i]);
1013 }
1014 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
1015
1016 int skcipher_register_instance(struct crypto_template *tmpl,
1017                            struct skcipher_instance *inst)
1018 {
1019         int err;
1020
1021         err = skcipher_prepare_alg(&inst->alg);
1022         if (err)
1023                 return err;
1024
1025         return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
1026 }
1027 EXPORT_SYMBOL_GPL(skcipher_register_instance);
1028
1029 MODULE_LICENSE("GPL");
1030 MODULE_DESCRIPTION("Symmetric key cipher type");