GNU Linux-libre 4.19.264-gnu1
[releases.git] / arch / x86 / crypto / ghash-clmulni-intel_glue.c
1 /*
2  * Accelerated GHASH implementation with Intel PCLMULQDQ-NI
3  * instructions. This file contains glue code.
4  *
5  * Copyright (c) 2009 Intel Corp.
6  *   Author: Huang Ying <ying.huang@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License version 2 as published
10  * by the Free Software Foundation.
11  */
12
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/crypto.h>
18 #include <crypto/algapi.h>
19 #include <crypto/cryptd.h>
20 #include <crypto/gf128mul.h>
21 #include <crypto/internal/hash.h>
22 #include <asm/fpu/api.h>
23 #include <asm/cpu_device_id.h>
24
25 #define GHASH_BLOCK_SIZE        16
26 #define GHASH_DIGEST_SIZE       16
27
28 void clmul_ghash_mul(char *dst, const u128 *shash);
29
30 void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
31                         const u128 *shash);
32
33 struct ghash_async_ctx {
34         struct cryptd_ahash *cryptd_tfm;
35 };
36
37 struct ghash_ctx {
38         u128 shash;
39 };
40
41 struct ghash_desc_ctx {
42         u8 buffer[GHASH_BLOCK_SIZE];
43         u32 bytes;
44 };
45
46 static int ghash_init(struct shash_desc *desc)
47 {
48         struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
49
50         memset(dctx, 0, sizeof(*dctx));
51
52         return 0;
53 }
54
55 static int ghash_setkey(struct crypto_shash *tfm,
56                         const u8 *key, unsigned int keylen)
57 {
58         struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
59         be128 *x = (be128 *)key;
60         u64 a, b;
61
62         if (keylen != GHASH_BLOCK_SIZE) {
63                 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
64                 return -EINVAL;
65         }
66
67         /* perform multiplication by 'x' in GF(2^128) */
68         a = be64_to_cpu(x->a);
69         b = be64_to_cpu(x->b);
70
71         ctx->shash.a = (b << 1) | (a >> 63);
72         ctx->shash.b = (a << 1) | (b >> 63);
73
74         if (a >> 63)
75                 ctx->shash.b ^= ((u64)0xc2) << 56;
76
77         return 0;
78 }
79
80 static int ghash_update(struct shash_desc *desc,
81                          const u8 *src, unsigned int srclen)
82 {
83         struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
84         struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
85         u8 *dst = dctx->buffer;
86
87         kernel_fpu_begin();
88         if (dctx->bytes) {
89                 int n = min(srclen, dctx->bytes);
90                 u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
91
92                 dctx->bytes -= n;
93                 srclen -= n;
94
95                 while (n--)
96                         *pos++ ^= *src++;
97
98                 if (!dctx->bytes)
99                         clmul_ghash_mul(dst, &ctx->shash);
100         }
101
102         clmul_ghash_update(dst, src, srclen, &ctx->shash);
103         kernel_fpu_end();
104
105         if (srclen & 0xf) {
106                 src += srclen - (srclen & 0xf);
107                 srclen &= 0xf;
108                 dctx->bytes = GHASH_BLOCK_SIZE - srclen;
109                 while (srclen--)
110                         *dst++ ^= *src++;
111         }
112
113         return 0;
114 }
115
116 static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
117 {
118         u8 *dst = dctx->buffer;
119
120         if (dctx->bytes) {
121                 u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
122
123                 while (dctx->bytes--)
124                         *tmp++ ^= 0;
125
126                 kernel_fpu_begin();
127                 clmul_ghash_mul(dst, &ctx->shash);
128                 kernel_fpu_end();
129         }
130
131         dctx->bytes = 0;
132 }
133
134 static int ghash_final(struct shash_desc *desc, u8 *dst)
135 {
136         struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
137         struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
138         u8 *buf = dctx->buffer;
139
140         ghash_flush(ctx, dctx);
141         memcpy(dst, buf, GHASH_BLOCK_SIZE);
142
143         return 0;
144 }
145
146 static struct shash_alg ghash_alg = {
147         .digestsize     = GHASH_DIGEST_SIZE,
148         .init           = ghash_init,
149         .update         = ghash_update,
150         .final          = ghash_final,
151         .setkey         = ghash_setkey,
152         .descsize       = sizeof(struct ghash_desc_ctx),
153         .base           = {
154                 .cra_name               = "__ghash",
155                 .cra_driver_name        = "__ghash-pclmulqdqni",
156                 .cra_priority           = 0,
157                 .cra_flags              = CRYPTO_ALG_INTERNAL,
158                 .cra_blocksize          = GHASH_BLOCK_SIZE,
159                 .cra_ctxsize            = sizeof(struct ghash_ctx),
160                 .cra_module             = THIS_MODULE,
161         },
162 };
163
164 static int ghash_async_init(struct ahash_request *req)
165 {
166         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
167         struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
168         struct ahash_request *cryptd_req = ahash_request_ctx(req);
169         struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
170         struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
171         struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
172
173         desc->tfm = child;
174         desc->flags = req->base.flags;
175         return crypto_shash_init(desc);
176 }
177
178 static int ghash_async_update(struct ahash_request *req)
179 {
180         struct ahash_request *cryptd_req = ahash_request_ctx(req);
181         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
182         struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
183         struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
184
185         if (!irq_fpu_usable() ||
186             (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
187                 memcpy(cryptd_req, req, sizeof(*req));
188                 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
189                 return crypto_ahash_update(cryptd_req);
190         } else {
191                 struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
192                 return shash_ahash_update(req, desc);
193         }
194 }
195
196 static int ghash_async_final(struct ahash_request *req)
197 {
198         struct ahash_request *cryptd_req = ahash_request_ctx(req);
199         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
200         struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
201         struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
202
203         if (!irq_fpu_usable() ||
204             (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
205                 memcpy(cryptd_req, req, sizeof(*req));
206                 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
207                 return crypto_ahash_final(cryptd_req);
208         } else {
209                 struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
210                 return crypto_shash_final(desc, req->result);
211         }
212 }
213
214 static int ghash_async_import(struct ahash_request *req, const void *in)
215 {
216         struct ahash_request *cryptd_req = ahash_request_ctx(req);
217         struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
218         struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
219
220         ghash_async_init(req);
221         memcpy(dctx, in, sizeof(*dctx));
222         return 0;
223
224 }
225
226 static int ghash_async_export(struct ahash_request *req, void *out)
227 {
228         struct ahash_request *cryptd_req = ahash_request_ctx(req);
229         struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
230         struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
231
232         memcpy(out, dctx, sizeof(*dctx));
233         return 0;
234
235 }
236
237 static int ghash_async_digest(struct ahash_request *req)
238 {
239         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
240         struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
241         struct ahash_request *cryptd_req = ahash_request_ctx(req);
242         struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
243
244         if (!irq_fpu_usable() ||
245             (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
246                 memcpy(cryptd_req, req, sizeof(*req));
247                 ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
248                 return crypto_ahash_digest(cryptd_req);
249         } else {
250                 struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
251                 struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
252
253                 desc->tfm = child;
254                 desc->flags = req->base.flags;
255                 return shash_ahash_digest(req, desc);
256         }
257 }
258
259 static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
260                               unsigned int keylen)
261 {
262         struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
263         struct crypto_ahash *child = &ctx->cryptd_tfm->base;
264         int err;
265
266         crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
267         crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm)
268                                & CRYPTO_TFM_REQ_MASK);
269         err = crypto_ahash_setkey(child, key, keylen);
270         crypto_ahash_set_flags(tfm, crypto_ahash_get_flags(child)
271                                & CRYPTO_TFM_RES_MASK);
272
273         return err;
274 }
275
276 static int ghash_async_init_tfm(struct crypto_tfm *tfm)
277 {
278         struct cryptd_ahash *cryptd_tfm;
279         struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
280
281         cryptd_tfm = cryptd_alloc_ahash("__ghash-pclmulqdqni",
282                                         CRYPTO_ALG_INTERNAL,
283                                         CRYPTO_ALG_INTERNAL);
284         if (IS_ERR(cryptd_tfm))
285                 return PTR_ERR(cryptd_tfm);
286         ctx->cryptd_tfm = cryptd_tfm;
287         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
288                                  sizeof(struct ahash_request) +
289                                  crypto_ahash_reqsize(&cryptd_tfm->base));
290
291         return 0;
292 }
293
294 static void ghash_async_exit_tfm(struct crypto_tfm *tfm)
295 {
296         struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
297
298         cryptd_free_ahash(ctx->cryptd_tfm);
299 }
300
301 static struct ahash_alg ghash_async_alg = {
302         .init           = ghash_async_init,
303         .update         = ghash_async_update,
304         .final          = ghash_async_final,
305         .setkey         = ghash_async_setkey,
306         .digest         = ghash_async_digest,
307         .export         = ghash_async_export,
308         .import         = ghash_async_import,
309         .halg = {
310                 .digestsize     = GHASH_DIGEST_SIZE,
311                 .statesize = sizeof(struct ghash_desc_ctx),
312                 .base = {
313                         .cra_name               = "ghash",
314                         .cra_driver_name        = "ghash-clmulni",
315                         .cra_priority           = 400,
316                         .cra_ctxsize            = sizeof(struct ghash_async_ctx),
317                         .cra_flags              = CRYPTO_ALG_ASYNC,
318                         .cra_blocksize          = GHASH_BLOCK_SIZE,
319                         .cra_module             = THIS_MODULE,
320                         .cra_init               = ghash_async_init_tfm,
321                         .cra_exit               = ghash_async_exit_tfm,
322                 },
323         },
324 };
325
326 static const struct x86_cpu_id pcmul_cpu_id[] = {
327         X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ), /* Pickle-Mickle-Duck */
328         {}
329 };
330 MODULE_DEVICE_TABLE(x86cpu, pcmul_cpu_id);
331
332 static int __init ghash_pclmulqdqni_mod_init(void)
333 {
334         int err;
335
336         if (!x86_match_cpu(pcmul_cpu_id))
337                 return -ENODEV;
338
339         err = crypto_register_shash(&ghash_alg);
340         if (err)
341                 goto err_out;
342         err = crypto_register_ahash(&ghash_async_alg);
343         if (err)
344                 goto err_shash;
345
346         return 0;
347
348 err_shash:
349         crypto_unregister_shash(&ghash_alg);
350 err_out:
351         return err;
352 }
353
354 static void __exit ghash_pclmulqdqni_mod_exit(void)
355 {
356         crypto_unregister_ahash(&ghash_async_alg);
357         crypto_unregister_shash(&ghash_alg);
358 }
359
360 module_init(ghash_pclmulqdqni_mod_init);
361 module_exit(ghash_pclmulqdqni_mod_exit);
362
363 MODULE_LICENSE("GPL");
364 MODULE_DESCRIPTION("GHASH Message Digest Algorithm, "
365                    "accelerated by PCLMULQDQ-NI");
366 MODULE_ALIAS_CRYPTO("ghash");