GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / crypto / inside-secure / safexcel_hash.c
1 /*
2  * Copyright (C) 2017 Marvell
3  *
4  * Antoine Tenart <antoine.tenart@free-electrons.com>
5  *
6  * This file is licensed under the terms of the GNU General Public
7  * License version 2. This program is licensed "as is" without any
8  * warranty of any kind, whether express or implied.
9  */
10
11 #include <crypto/hmac.h>
12 #include <crypto/sha.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
16
17
18 #include "safexcel.h"
19
20 struct safexcel_ahash_ctx {
21         struct safexcel_context base;
22         struct safexcel_crypto_priv *priv;
23
24         u32 alg;
25         u32 digest;
26
27         u32 ipad[SHA1_DIGEST_SIZE / sizeof(u32)];
28         u32 opad[SHA1_DIGEST_SIZE / sizeof(u32)];
29 };
30
31 struct safexcel_ahash_req {
32         bool last_req;
33         bool finish;
34         bool hmac;
35         bool needs_inv;
36
37         int nents;
38
39         u8 state_sz;    /* expected sate size, only set once */
40         u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
41
42         u64 len;
43         u64 processed;
44
45         u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
46         u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
47 };
48
49 struct safexcel_ahash_export_state {
50         u64 len;
51         u64 processed;
52
53         u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
54         u8 cache[SHA256_BLOCK_SIZE];
55 };
56
57 static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
58                                 u32 input_length, u32 result_length)
59 {
60         struct safexcel_token *token =
61                 (struct safexcel_token *)cdesc->control_data.token;
62
63         token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
64         token[0].packet_length = input_length;
65         token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
66         token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
67
68         token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
69         token[1].packet_length = result_length;
70         token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
71                         EIP197_TOKEN_STAT_LAST_PACKET;
72         token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
73                                 EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
74 }
75
76 static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
77                                      struct safexcel_ahash_req *req,
78                                      struct safexcel_command_desc *cdesc,
79                                      unsigned int digestsize,
80                                      unsigned int blocksize)
81 {
82         int i;
83
84         cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
85         cdesc->control_data.control0 |= ctx->alg;
86         cdesc->control_data.control0 |= ctx->digest;
87
88         if (ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
89                 if (req->processed) {
90                         if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
91                                 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
92                         else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
93                                  ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
94                                 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
95
96                         cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
97                 } else {
98                         cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
99                 }
100
101                 if (!req->finish)
102                         cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
103
104                 /*
105                  * Copy the input digest if needed, and setup the context
106                  * fields. Do this now as we need it to setup the first command
107                  * descriptor.
108                  */
109                 if (req->processed) {
110                         for (i = 0; i < digestsize / sizeof(u32); i++)
111                                 ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
112
113                         if (req->finish)
114                                 ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
115                 }
116         } else if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
117                 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(10);
118
119                 memcpy(ctx->base.ctxr->data, ctx->ipad, digestsize);
120                 memcpy(ctx->base.ctxr->data + digestsize / sizeof(u32),
121                        ctx->opad, digestsize);
122         }
123 }
124
125 static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
126                                       struct crypto_async_request *async,
127                                       bool *should_complete, int *ret)
128 {
129         struct safexcel_result_desc *rdesc;
130         struct ahash_request *areq = ahash_request_cast(async);
131         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
132         struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
133         int cache_len, result_sz = sreq->state_sz;
134
135         *ret = 0;
136
137         spin_lock_bh(&priv->ring[ring].egress_lock);
138         rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
139         if (IS_ERR(rdesc)) {
140                 dev_err(priv->dev,
141                         "hash: result: could not retrieve the result descriptor\n");
142                 *ret = PTR_ERR(rdesc);
143         } else if (rdesc->result_data.error_code) {
144                 dev_err(priv->dev,
145                         "hash: result: result descriptor error (%d)\n",
146                         rdesc->result_data.error_code);
147                 *ret = -EINVAL;
148         }
149
150         safexcel_complete(priv, ring);
151         spin_unlock_bh(&priv->ring[ring].egress_lock);
152
153         if (sreq->finish)
154                 result_sz = crypto_ahash_digestsize(ahash);
155         memcpy(sreq->state, areq->result, result_sz);
156
157         if (sreq->nents) {
158                 dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
159                 sreq->nents = 0;
160         }
161
162         safexcel_free_context(priv, async, sreq->state_sz);
163
164         cache_len = sreq->len - sreq->processed;
165         if (cache_len)
166                 memcpy(sreq->cache, sreq->cache_next, cache_len);
167
168         *should_complete = true;
169
170         return 1;
171 }
172
173 static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
174                                    struct safexcel_request *request,
175                                    int *commands, int *results)
176 {
177         struct ahash_request *areq = ahash_request_cast(async);
178         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
179         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
180         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
181         struct safexcel_crypto_priv *priv = ctx->priv;
182         struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
183         struct safexcel_result_desc *rdesc;
184         struct scatterlist *sg;
185         int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
186
187         queued = len = req->len - req->processed;
188         if (queued <= crypto_ahash_blocksize(ahash))
189                 cache_len = queued;
190         else
191                 cache_len = queued - areq->nbytes;
192
193         if (!req->last_req) {
194                 /* If this is not the last request and the queued data does not
195                  * fit into full blocks, cache it for the next send() call.
196                  */
197                 extra = queued & (crypto_ahash_blocksize(ahash) - 1);
198                 if (!extra)
199                         /* If this is not the last request and the queued data
200                          * is a multiple of a block, cache the last one for now.
201                          */
202                         extra = crypto_ahash_blocksize(ahash);
203
204                 if (extra) {
205                         sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
206                                            req->cache_next, extra,
207                                            areq->nbytes - extra);
208
209                         queued -= extra;
210                         len -= extra;
211
212                         if (!queued) {
213                                 *commands = 0;
214                                 *results = 0;
215                                 return 0;
216                         }
217                 }
218         }
219
220         spin_lock_bh(&priv->ring[ring].egress_lock);
221
222         /* Add a command descriptor for the cached data, if any */
223         if (cache_len) {
224                 ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async));
225                 if (!ctx->base.cache) {
226                         ret = -ENOMEM;
227                         goto unlock;
228                 }
229                 memcpy(ctx->base.cache, req->cache, cache_len);
230                 ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache,
231                                                      cache_len, DMA_TO_DEVICE);
232                 if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) {
233                         ret = -EINVAL;
234                         goto free_cache;
235                 }
236
237                 ctx->base.cache_sz = cache_len;
238                 first_cdesc = safexcel_add_cdesc(priv, ring, 1,
239                                                  (cache_len == len),
240                                                  ctx->base.cache_dma,
241                                                  cache_len, len,
242                                                  ctx->base.ctxr_dma);
243                 if (IS_ERR(first_cdesc)) {
244                         ret = PTR_ERR(first_cdesc);
245                         goto unmap_cache;
246                 }
247                 n_cdesc++;
248
249                 queued -= cache_len;
250                 if (!queued)
251                         goto send_command;
252         }
253
254         /* Now handle the current ahash request buffer(s) */
255         req->nents = dma_map_sg(priv->dev, areq->src,
256                                 sg_nents_for_len(areq->src, areq->nbytes),
257                                 DMA_TO_DEVICE);
258         if (!req->nents) {
259                 ret = -ENOMEM;
260                 goto cdesc_rollback;
261         }
262
263         for_each_sg(areq->src, sg, req->nents, i) {
264                 int sglen = sg_dma_len(sg);
265
266                 /* Do not overflow the request */
267                 if (queued - sglen < 0)
268                         sglen = queued;
269
270                 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
271                                            !(queued - sglen), sg_dma_address(sg),
272                                            sglen, len, ctx->base.ctxr_dma);
273                 if (IS_ERR(cdesc)) {
274                         ret = PTR_ERR(cdesc);
275                         goto cdesc_rollback;
276                 }
277                 n_cdesc++;
278
279                 if (n_cdesc == 1)
280                         first_cdesc = cdesc;
281
282                 queued -= sglen;
283                 if (!queued)
284                         break;
285         }
286
287 send_command:
288         /* Setup the context options */
289         safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
290                                  crypto_ahash_blocksize(ahash));
291
292         /* Add the token */
293         safexcel_hash_token(first_cdesc, len, req->state_sz);
294
295         ctx->base.result_dma = dma_map_single(priv->dev, areq->result,
296                                               req->state_sz, DMA_FROM_DEVICE);
297         if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
298                 ret = -EINVAL;
299                 goto cdesc_rollback;
300         }
301
302         /* Add a result descriptor */
303         rdesc = safexcel_add_rdesc(priv, ring, 1, 1, ctx->base.result_dma,
304                                    req->state_sz);
305         if (IS_ERR(rdesc)) {
306                 ret = PTR_ERR(rdesc);
307                 goto cdesc_rollback;
308         }
309
310         spin_unlock_bh(&priv->ring[ring].egress_lock);
311
312         req->processed += len;
313         request->req = &areq->base;
314
315         *commands = n_cdesc;
316         *results = 1;
317         return 0;
318
319 cdesc_rollback:
320         for (i = 0; i < n_cdesc; i++)
321                 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
322 unmap_cache:
323         if (ctx->base.cache_dma) {
324                 dma_unmap_single(priv->dev, ctx->base.cache_dma,
325                                  ctx->base.cache_sz, DMA_TO_DEVICE);
326                 ctx->base.cache_sz = 0;
327         }
328 free_cache:
329         if (ctx->base.cache) {
330                 kfree(ctx->base.cache);
331                 ctx->base.cache = NULL;
332         }
333
334 unlock:
335         spin_unlock_bh(&priv->ring[ring].egress_lock);
336         return ret;
337 }
338
339 static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
340 {
341         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
342         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
343         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
344         unsigned int state_w_sz = req->state_sz / sizeof(u32);
345         int i;
346
347         for (i = 0; i < state_w_sz; i++)
348                 if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
349                         return true;
350
351         if (ctx->base.ctxr->data[state_w_sz] !=
352             cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
353                 return true;
354
355         return false;
356 }
357
358 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
359                                       int ring,
360                                       struct crypto_async_request *async,
361                                       bool *should_complete, int *ret)
362 {
363         struct safexcel_result_desc *rdesc;
364         struct ahash_request *areq = ahash_request_cast(async);
365         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
366         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
367         int enq_ret;
368
369         *ret = 0;
370
371         spin_lock_bh(&priv->ring[ring].egress_lock);
372         rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
373         if (IS_ERR(rdesc)) {
374                 dev_err(priv->dev,
375                         "hash: invalidate: could not retrieve the result descriptor\n");
376                 *ret = PTR_ERR(rdesc);
377         } else if (rdesc->result_data.error_code) {
378                 dev_err(priv->dev,
379                         "hash: invalidate: result descriptor error (%d)\n",
380                         rdesc->result_data.error_code);
381                 *ret = -EINVAL;
382         }
383
384         safexcel_complete(priv, ring);
385         spin_unlock_bh(&priv->ring[ring].egress_lock);
386
387         if (ctx->base.exit_inv) {
388                 dma_pool_free(priv->context_pool, ctx->base.ctxr,
389                               ctx->base.ctxr_dma);
390
391                 *should_complete = true;
392                 return 1;
393         }
394
395         ring = safexcel_select_ring(priv);
396         ctx->base.ring = ring;
397
398         spin_lock_bh(&priv->ring[ring].queue_lock);
399         enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
400         spin_unlock_bh(&priv->ring[ring].queue_lock);
401
402         if (enq_ret != -EINPROGRESS)
403                 *ret = enq_ret;
404
405         if (!priv->ring[ring].need_dequeue)
406                 safexcel_dequeue(priv, ring);
407
408         *should_complete = false;
409
410         return 1;
411 }
412
413 static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
414                                   struct crypto_async_request *async,
415                                   bool *should_complete, int *ret)
416 {
417         struct ahash_request *areq = ahash_request_cast(async);
418         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
419         int err;
420
421         if (req->needs_inv) {
422                 req->needs_inv = false;
423                 err = safexcel_handle_inv_result(priv, ring, async,
424                                                  should_complete, ret);
425         } else {
426                 err = safexcel_handle_req_result(priv, ring, async,
427                                                  should_complete, ret);
428         }
429
430         return err;
431 }
432
433 static int safexcel_ahash_send_inv(struct crypto_async_request *async,
434                                    int ring, struct safexcel_request *request,
435                                    int *commands, int *results)
436 {
437         struct ahash_request *areq = ahash_request_cast(async);
438         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
439         int ret;
440
441         ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
442                                         ctx->base.ctxr_dma, ring, request);
443         if (unlikely(ret))
444                 return ret;
445
446         *commands = 1;
447         *results = 1;
448
449         return 0;
450 }
451
452 static int safexcel_ahash_send(struct crypto_async_request *async,
453                                int ring, struct safexcel_request *request,
454                                int *commands, int *results)
455 {
456         struct ahash_request *areq = ahash_request_cast(async);
457         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
458         int ret;
459
460         if (req->needs_inv)
461                 ret = safexcel_ahash_send_inv(async, ring, request,
462                                               commands, results);
463         else
464                 ret = safexcel_ahash_send_req(async, ring, request,
465                                               commands, results);
466         return ret;
467 }
468
469 static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
470 {
471         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
472         struct safexcel_crypto_priv *priv = ctx->priv;
473         AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
474         struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
475         struct safexcel_inv_result result = {};
476         int ring = ctx->base.ring;
477
478         memset(req, 0, sizeof(struct ahash_request));
479
480         /* create invalidation request */
481         init_completion(&result.completion);
482         ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
483                                    safexcel_inv_complete, &result);
484
485         ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
486         ctx = crypto_tfm_ctx(req->base.tfm);
487         ctx->base.exit_inv = true;
488         rctx->needs_inv = true;
489
490         spin_lock_bh(&priv->ring[ring].queue_lock);
491         crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
492         spin_unlock_bh(&priv->ring[ring].queue_lock);
493
494         if (!priv->ring[ring].need_dequeue)
495                 safexcel_dequeue(priv, ring);
496
497         wait_for_completion(&result.completion);
498
499         if (result.error) {
500                 dev_warn(priv->dev, "hash: completion error (%d)\n",
501                          result.error);
502                 return result.error;
503         }
504
505         return 0;
506 }
507
508 static int safexcel_ahash_cache(struct ahash_request *areq)
509 {
510         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
511         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
512         int queued, cache_len;
513
514         cache_len = req->len - areq->nbytes - req->processed;
515         queued = req->len - req->processed;
516
517         /*
518          * In case there isn't enough bytes to proceed (less than a
519          * block size), cache the data until we have enough.
520          */
521         if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
522                 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
523                                    req->cache + cache_len,
524                                    areq->nbytes, 0);
525                 return areq->nbytes;
526         }
527
528         /* We could'nt cache all the data */
529         return -E2BIG;
530 }
531
532 static int safexcel_ahash_enqueue(struct ahash_request *areq)
533 {
534         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
535         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
536         struct safexcel_crypto_priv *priv = ctx->priv;
537         int ret, ring;
538
539         req->needs_inv = false;
540
541         if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
542                 ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
543
544         if (ctx->base.ctxr) {
545                 if (ctx->base.needs_inv) {
546                         ctx->base.needs_inv = false;
547                         req->needs_inv = true;
548                 }
549         } else {
550                 ctx->base.ring = safexcel_select_ring(priv);
551                 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
552                                                  EIP197_GFP_FLAGS(areq->base),
553                                                  &ctx->base.ctxr_dma);
554                 if (!ctx->base.ctxr)
555                         return -ENOMEM;
556         }
557
558         ring = ctx->base.ring;
559
560         spin_lock_bh(&priv->ring[ring].queue_lock);
561         ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
562         spin_unlock_bh(&priv->ring[ring].queue_lock);
563
564         if (!priv->ring[ring].need_dequeue)
565                 safexcel_dequeue(priv, ring);
566
567         return ret;
568 }
569
570 static int safexcel_ahash_update(struct ahash_request *areq)
571 {
572         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
573         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
574         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
575
576         /* If the request is 0 length, do nothing */
577         if (!areq->nbytes)
578                 return 0;
579
580         req->len += areq->nbytes;
581
582         safexcel_ahash_cache(areq);
583
584         /*
585          * We're not doing partial updates when performing an hmac request.
586          * Everything will be handled by the final() call.
587          */
588         if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC)
589                 return 0;
590
591         if (req->hmac)
592                 return safexcel_ahash_enqueue(areq);
593
594         if (!req->last_req &&
595             req->len - req->processed > crypto_ahash_blocksize(ahash))
596                 return safexcel_ahash_enqueue(areq);
597
598         return 0;
599 }
600
601 static int safexcel_ahash_final(struct ahash_request *areq)
602 {
603         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
604         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
605
606         req->last_req = true;
607         req->finish = true;
608
609         /* If we have an overall 0 length request */
610         if (!(req->len + areq->nbytes)) {
611                 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
612                         memcpy(areq->result, sha1_zero_message_hash,
613                                SHA1_DIGEST_SIZE);
614                 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
615                         memcpy(areq->result, sha224_zero_message_hash,
616                                SHA224_DIGEST_SIZE);
617                 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
618                         memcpy(areq->result, sha256_zero_message_hash,
619                                SHA256_DIGEST_SIZE);
620
621                 return 0;
622         }
623
624         return safexcel_ahash_enqueue(areq);
625 }
626
627 static int safexcel_ahash_finup(struct ahash_request *areq)
628 {
629         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
630
631         req->last_req = true;
632         req->finish = true;
633
634         safexcel_ahash_update(areq);
635         return safexcel_ahash_final(areq);
636 }
637
638 static int safexcel_ahash_export(struct ahash_request *areq, void *out)
639 {
640         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
641         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
642         struct safexcel_ahash_export_state *export = out;
643
644         export->len = req->len;
645         export->processed = req->processed;
646
647         memcpy(export->state, req->state, req->state_sz);
648         memset(export->cache, 0, crypto_ahash_blocksize(ahash));
649         memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
650
651         return 0;
652 }
653
654 static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
655 {
656         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
657         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
658         const struct safexcel_ahash_export_state *export = in;
659         int ret;
660
661         ret = crypto_ahash_init(areq);
662         if (ret)
663                 return ret;
664
665         req->len = export->len;
666         req->processed = export->processed;
667
668         memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
669         memcpy(req->state, export->state, req->state_sz);
670
671         return 0;
672 }
673
674 static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
675 {
676         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
677         struct safexcel_alg_template *tmpl =
678                 container_of(__crypto_ahash_alg(tfm->__crt_alg),
679                              struct safexcel_alg_template, alg.ahash);
680
681         ctx->priv = tmpl->priv;
682         ctx->base.send = safexcel_ahash_send;
683         ctx->base.handle_result = safexcel_handle_result;
684
685         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
686                                  sizeof(struct safexcel_ahash_req));
687         return 0;
688 }
689
690 static int safexcel_sha1_init(struct ahash_request *areq)
691 {
692         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
693         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
694
695         memset(req, 0, sizeof(*req));
696
697         req->state[0] = SHA1_H0;
698         req->state[1] = SHA1_H1;
699         req->state[2] = SHA1_H2;
700         req->state[3] = SHA1_H3;
701         req->state[4] = SHA1_H4;
702
703         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
704         ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
705         req->state_sz = SHA1_DIGEST_SIZE;
706
707         return 0;
708 }
709
710 static int safexcel_sha1_digest(struct ahash_request *areq)
711 {
712         int ret = safexcel_sha1_init(areq);
713
714         if (ret)
715                 return ret;
716
717         return safexcel_ahash_finup(areq);
718 }
719
720 static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
721 {
722         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
723         struct safexcel_crypto_priv *priv = ctx->priv;
724         int ret;
725
726         /* context not allocated, skip invalidation */
727         if (!ctx->base.ctxr)
728                 return;
729
730         ret = safexcel_ahash_exit_inv(tfm);
731         if (ret)
732                 dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
733 }
734
735 struct safexcel_alg_template safexcel_alg_sha1 = {
736         .type = SAFEXCEL_ALG_TYPE_AHASH,
737         .alg.ahash = {
738                 .init = safexcel_sha1_init,
739                 .update = safexcel_ahash_update,
740                 .final = safexcel_ahash_final,
741                 .finup = safexcel_ahash_finup,
742                 .digest = safexcel_sha1_digest,
743                 .export = safexcel_ahash_export,
744                 .import = safexcel_ahash_import,
745                 .halg = {
746                         .digestsize = SHA1_DIGEST_SIZE,
747                         .statesize = sizeof(struct safexcel_ahash_export_state),
748                         .base = {
749                                 .cra_name = "sha1",
750                                 .cra_driver_name = "safexcel-sha1",
751                                 .cra_priority = 300,
752                                 .cra_flags = CRYPTO_ALG_ASYNC |
753                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
754                                 .cra_blocksize = SHA1_BLOCK_SIZE,
755                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
756                                 .cra_init = safexcel_ahash_cra_init,
757                                 .cra_exit = safexcel_ahash_cra_exit,
758                                 .cra_module = THIS_MODULE,
759                         },
760                 },
761         },
762 };
763
764 static int safexcel_hmac_sha1_init(struct ahash_request *areq)
765 {
766         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
767
768         safexcel_sha1_init(areq);
769         ctx->digest = CONTEXT_CONTROL_DIGEST_HMAC;
770         return 0;
771 }
772
773 static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
774 {
775         int ret = safexcel_hmac_sha1_init(areq);
776
777         if (ret)
778                 return ret;
779
780         return safexcel_ahash_finup(areq);
781 }
782
783 struct safexcel_ahash_result {
784         struct completion completion;
785         int error;
786 };
787
788 static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
789 {
790         struct safexcel_ahash_result *result = req->data;
791
792         if (error == -EINPROGRESS)
793                 return;
794
795         result->error = error;
796         complete(&result->completion);
797 }
798
799 static int safexcel_hmac_init_pad(struct ahash_request *areq,
800                                   unsigned int blocksize, const u8 *key,
801                                   unsigned int keylen, u8 *ipad, u8 *opad)
802 {
803         struct safexcel_ahash_result result;
804         struct scatterlist sg;
805         int ret, i;
806         u8 *keydup;
807
808         if (keylen <= blocksize) {
809                 memcpy(ipad, key, keylen);
810         } else {
811                 keydup = kmemdup(key, keylen, GFP_KERNEL);
812                 if (!keydup)
813                         return -ENOMEM;
814
815                 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
816                                            safexcel_ahash_complete, &result);
817                 sg_init_one(&sg, keydup, keylen);
818                 ahash_request_set_crypt(areq, &sg, ipad, keylen);
819                 init_completion(&result.completion);
820
821                 ret = crypto_ahash_digest(areq);
822                 if (ret == -EINPROGRESS || ret == -EBUSY) {
823                         wait_for_completion_interruptible(&result.completion);
824                         ret = result.error;
825                 }
826
827                 /* Avoid leaking */
828                 memzero_explicit(keydup, keylen);
829                 kfree(keydup);
830
831                 if (ret)
832                         return ret;
833
834                 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
835         }
836
837         memset(ipad + keylen, 0, blocksize - keylen);
838         memcpy(opad, ipad, blocksize);
839
840         for (i = 0; i < blocksize; i++) {
841                 ipad[i] ^= HMAC_IPAD_VALUE;
842                 opad[i] ^= HMAC_OPAD_VALUE;
843         }
844
845         return 0;
846 }
847
848 static int safexcel_hmac_init_iv(struct ahash_request *areq,
849                                  unsigned int blocksize, u8 *pad, void *state)
850 {
851         struct safexcel_ahash_result result;
852         struct safexcel_ahash_req *req;
853         struct scatterlist sg;
854         int ret;
855
856         ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
857                                    safexcel_ahash_complete, &result);
858         sg_init_one(&sg, pad, blocksize);
859         ahash_request_set_crypt(areq, &sg, pad, blocksize);
860         init_completion(&result.completion);
861
862         ret = crypto_ahash_init(areq);
863         if (ret)
864                 return ret;
865
866         req = ahash_request_ctx(areq);
867         req->hmac = true;
868         req->last_req = true;
869
870         ret = crypto_ahash_update(areq);
871         if (ret && ret != -EINPROGRESS)
872                 return ret;
873
874         wait_for_completion_interruptible(&result.completion);
875         if (result.error)
876                 return result.error;
877
878         return crypto_ahash_export(areq, state);
879 }
880
881 static int safexcel_hmac_setkey(const char *alg, const u8 *key,
882                                 unsigned int keylen, void *istate, void *ostate)
883 {
884         struct ahash_request *areq;
885         struct crypto_ahash *tfm;
886         unsigned int blocksize;
887         u8 *ipad, *opad;
888         int ret;
889
890         tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
891                                  CRYPTO_ALG_TYPE_AHASH_MASK);
892         if (IS_ERR(tfm))
893                 return PTR_ERR(tfm);
894
895         areq = ahash_request_alloc(tfm, GFP_KERNEL);
896         if (!areq) {
897                 ret = -ENOMEM;
898                 goto free_ahash;
899         }
900
901         crypto_ahash_clear_flags(tfm, ~0);
902         blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
903
904         ipad = kzalloc(2 * blocksize, GFP_KERNEL);
905         if (!ipad) {
906                 ret = -ENOMEM;
907                 goto free_request;
908         }
909
910         opad = ipad + blocksize;
911
912         ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
913         if (ret)
914                 goto free_ipad;
915
916         ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
917         if (ret)
918                 goto free_ipad;
919
920         ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
921
922 free_ipad:
923         kfree(ipad);
924 free_request:
925         ahash_request_free(areq);
926 free_ahash:
927         crypto_free_ahash(tfm);
928
929         return ret;
930 }
931
932 static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
933                                      unsigned int keylen)
934 {
935         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
936         struct safexcel_ahash_export_state istate, ostate;
937         int ret, i;
938
939         ret = safexcel_hmac_setkey("safexcel-sha1", key, keylen, &istate, &ostate);
940         if (ret)
941                 return ret;
942
943         for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
944                 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
945                     ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
946                         ctx->base.needs_inv = true;
947                         break;
948                 }
949         }
950
951         memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
952         memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
953
954         return 0;
955 }
956
957 struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
958         .type = SAFEXCEL_ALG_TYPE_AHASH,
959         .alg.ahash = {
960                 .init = safexcel_hmac_sha1_init,
961                 .update = safexcel_ahash_update,
962                 .final = safexcel_ahash_final,
963                 .finup = safexcel_ahash_finup,
964                 .digest = safexcel_hmac_sha1_digest,
965                 .setkey = safexcel_hmac_sha1_setkey,
966                 .export = safexcel_ahash_export,
967                 .import = safexcel_ahash_import,
968                 .halg = {
969                         .digestsize = SHA1_DIGEST_SIZE,
970                         .statesize = sizeof(struct safexcel_ahash_export_state),
971                         .base = {
972                                 .cra_name = "hmac(sha1)",
973                                 .cra_driver_name = "safexcel-hmac-sha1",
974                                 .cra_priority = 300,
975                                 .cra_flags = CRYPTO_ALG_ASYNC |
976                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
977                                 .cra_blocksize = SHA1_BLOCK_SIZE,
978                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
979                                 .cra_init = safexcel_ahash_cra_init,
980                                 .cra_exit = safexcel_ahash_cra_exit,
981                                 .cra_module = THIS_MODULE,
982                         },
983                 },
984         },
985 };
986
987 static int safexcel_sha256_init(struct ahash_request *areq)
988 {
989         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
990         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
991
992         memset(req, 0, sizeof(*req));
993
994         req->state[0] = SHA256_H0;
995         req->state[1] = SHA256_H1;
996         req->state[2] = SHA256_H2;
997         req->state[3] = SHA256_H3;
998         req->state[4] = SHA256_H4;
999         req->state[5] = SHA256_H5;
1000         req->state[6] = SHA256_H6;
1001         req->state[7] = SHA256_H7;
1002
1003         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
1004         ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1005         req->state_sz = SHA256_DIGEST_SIZE;
1006
1007         return 0;
1008 }
1009
1010 static int safexcel_sha256_digest(struct ahash_request *areq)
1011 {
1012         int ret = safexcel_sha256_init(areq);
1013
1014         if (ret)
1015                 return ret;
1016
1017         return safexcel_ahash_finup(areq);
1018 }
1019
1020 struct safexcel_alg_template safexcel_alg_sha256 = {
1021         .type = SAFEXCEL_ALG_TYPE_AHASH,
1022         .alg.ahash = {
1023                 .init = safexcel_sha256_init,
1024                 .update = safexcel_ahash_update,
1025                 .final = safexcel_ahash_final,
1026                 .finup = safexcel_ahash_finup,
1027                 .digest = safexcel_sha256_digest,
1028                 .export = safexcel_ahash_export,
1029                 .import = safexcel_ahash_import,
1030                 .halg = {
1031                         .digestsize = SHA256_DIGEST_SIZE,
1032                         .statesize = sizeof(struct safexcel_ahash_export_state),
1033                         .base = {
1034                                 .cra_name = "sha256",
1035                                 .cra_driver_name = "safexcel-sha256",
1036                                 .cra_priority = 300,
1037                                 .cra_flags = CRYPTO_ALG_ASYNC |
1038                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1039                                 .cra_blocksize = SHA256_BLOCK_SIZE,
1040                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1041                                 .cra_init = safexcel_ahash_cra_init,
1042                                 .cra_exit = safexcel_ahash_cra_exit,
1043                                 .cra_module = THIS_MODULE,
1044                         },
1045                 },
1046         },
1047 };
1048
1049 static int safexcel_sha224_init(struct ahash_request *areq)
1050 {
1051         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1052         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1053
1054         memset(req, 0, sizeof(*req));
1055
1056         req->state[0] = SHA224_H0;
1057         req->state[1] = SHA224_H1;
1058         req->state[2] = SHA224_H2;
1059         req->state[3] = SHA224_H3;
1060         req->state[4] = SHA224_H4;
1061         req->state[5] = SHA224_H5;
1062         req->state[6] = SHA224_H6;
1063         req->state[7] = SHA224_H7;
1064
1065         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
1066         ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1067         req->state_sz = SHA256_DIGEST_SIZE;
1068
1069         return 0;
1070 }
1071
1072 static int safexcel_sha224_digest(struct ahash_request *areq)
1073 {
1074         int ret = safexcel_sha224_init(areq);
1075
1076         if (ret)
1077                 return ret;
1078
1079         return safexcel_ahash_finup(areq);
1080 }
1081
1082 struct safexcel_alg_template safexcel_alg_sha224 = {
1083         .type = SAFEXCEL_ALG_TYPE_AHASH,
1084         .alg.ahash = {
1085                 .init = safexcel_sha224_init,
1086                 .update = safexcel_ahash_update,
1087                 .final = safexcel_ahash_final,
1088                 .finup = safexcel_ahash_finup,
1089                 .digest = safexcel_sha224_digest,
1090                 .export = safexcel_ahash_export,
1091                 .import = safexcel_ahash_import,
1092                 .halg = {
1093                         .digestsize = SHA224_DIGEST_SIZE,
1094                         .statesize = sizeof(struct safexcel_ahash_export_state),
1095                         .base = {
1096                                 .cra_name = "sha224",
1097                                 .cra_driver_name = "safexcel-sha224",
1098                                 .cra_priority = 300,
1099                                 .cra_flags = CRYPTO_ALG_ASYNC |
1100                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1101                                 .cra_blocksize = SHA224_BLOCK_SIZE,
1102                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1103                                 .cra_init = safexcel_ahash_cra_init,
1104                                 .cra_exit = safexcel_ahash_cra_exit,
1105                                 .cra_module = THIS_MODULE,
1106                         },
1107                 },
1108         },
1109 };