GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / crypto / caam / caamhash.c
1 /*
2  * caam - Freescale FSL CAAM support for ahash functions of crypto API
3  *
4  * Copyright 2011 Freescale Semiconductor, Inc.
5  *
6  * Based on caamalg.c crypto API driver.
7  *
8  * relationship of digest job descriptor or first job descriptor after init to
9  * shared descriptors:
10  *
11  * ---------------                     ---------------
12  * | JobDesc #1  |-------------------->|  ShareDesc  |
13  * | *(packet 1) |                     |  (hashKey)  |
14  * ---------------                     | (operation) |
15  *                                     ---------------
16  *
17  * relationship of subsequent job descriptors to shared descriptors:
18  *
19  * ---------------                     ---------------
20  * | JobDesc #2  |-------------------->|  ShareDesc  |
21  * | *(packet 2) |      |------------->|  (hashKey)  |
22  * ---------------      |    |-------->| (operation) |
23  *       .              |    |         | (load ctx2) |
24  *       .              |    |         ---------------
25  * ---------------      |    |
26  * | JobDesc #3  |------|    |
27  * | *(packet 3) |           |
28  * ---------------           |
29  *       .                   |
30  *       .                   |
31  * ---------------           |
32  * | JobDesc #4  |------------
33  * | *(packet 4) |
34  * ---------------
35  *
36  * The SharedDesc never changes for a connection unless rekeyed, but
37  * each packet will likely be in a different place. So all we need
38  * to know to process the packet is where the input is, where the
39  * output goes, and what context we want to process with. Context is
40  * in the SharedDesc, packet references in the JobDesc.
41  *
42  * So, a job desc looks like:
43  *
44  * ---------------------
45  * | Header            |
46  * | ShareDesc Pointer |
47  * | SEQ_OUT_PTR       |
48  * | (output buffer)   |
49  * | (output length)   |
50  * | SEQ_IN_PTR        |
51  * | (input buffer)    |
52  * | (input length)    |
53  * ---------------------
54  */
55
56 #include "compat.h"
57
58 #include "regs.h"
59 #include "intern.h"
60 #include "desc_constr.h"
61 #include "jr.h"
62 #include "error.h"
63 #include "sg_sw_sec4.h"
64 #include "key_gen.h"
65
66 #define CAAM_CRA_PRIORITY               3000
67
68 /* max hash key is max split key size */
69 #define CAAM_MAX_HASH_KEY_SIZE          (SHA512_DIGEST_SIZE * 2)
70
71 #define CAAM_MAX_HASH_BLOCK_SIZE        SHA512_BLOCK_SIZE
72 #define CAAM_MAX_HASH_DIGEST_SIZE       SHA512_DIGEST_SIZE
73
74 /* length of descriptors text */
75 #define DESC_AHASH_BASE                 (3 * CAAM_CMD_SZ)
76 #define DESC_AHASH_UPDATE_LEN           (6 * CAAM_CMD_SZ)
77 #define DESC_AHASH_UPDATE_FIRST_LEN     (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
78 #define DESC_AHASH_FINAL_LEN            (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
79 #define DESC_AHASH_FINUP_LEN            (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
80 #define DESC_AHASH_DIGEST_LEN           (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
81
82 #define DESC_HASH_MAX_USED_BYTES        (DESC_AHASH_FINAL_LEN + \
83                                          CAAM_MAX_HASH_KEY_SIZE)
84 #define DESC_HASH_MAX_USED_LEN          (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
85
86 /* caam context sizes for hashes: running digest + 8 */
87 #define HASH_MSG_LEN                    8
88 #define MAX_CTX_LEN                     (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
89
90 #ifdef DEBUG
91 /* for print_hex_dumps with line references */
92 #define debug(format, arg...) printk(format, arg)
93 #else
94 #define debug(format, arg...)
95 #endif
96
97
98 static struct list_head hash_list;
99
100 /* ahash per-session context */
101 struct caam_hash_ctx {
102         u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
103         u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
104         u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
105         u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
106         dma_addr_t sh_desc_update_dma ____cacheline_aligned;
107         dma_addr_t sh_desc_update_first_dma;
108         dma_addr_t sh_desc_fin_dma;
109         dma_addr_t sh_desc_digest_dma;
110         enum dma_data_direction dir;
111         struct device *jrdev;
112         u8 key[CAAM_MAX_HASH_KEY_SIZE];
113         int ctx_len;
114         struct alginfo adata;
115 };
116
117 /* ahash state */
118 struct caam_hash_state {
119         dma_addr_t buf_dma;
120         dma_addr_t ctx_dma;
121         int ctx_dma_len;
122         u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
123         int buflen_0;
124         u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
125         int buflen_1;
126         u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
127         int (*update)(struct ahash_request *req);
128         int (*final)(struct ahash_request *req);
129         int (*finup)(struct ahash_request *req);
130         int current_buf;
131 };
132
133 struct caam_export_state {
134         u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
135         u8 caam_ctx[MAX_CTX_LEN];
136         int buflen;
137         int (*update)(struct ahash_request *req);
138         int (*final)(struct ahash_request *req);
139         int (*finup)(struct ahash_request *req);
140 };
141
142 static inline void switch_buf(struct caam_hash_state *state)
143 {
144         state->current_buf ^= 1;
145 }
146
147 static inline u8 *current_buf(struct caam_hash_state *state)
148 {
149         return state->current_buf ? state->buf_1 : state->buf_0;
150 }
151
152 static inline u8 *alt_buf(struct caam_hash_state *state)
153 {
154         return state->current_buf ? state->buf_0 : state->buf_1;
155 }
156
157 static inline int *current_buflen(struct caam_hash_state *state)
158 {
159         return state->current_buf ? &state->buflen_1 : &state->buflen_0;
160 }
161
162 static inline int *alt_buflen(struct caam_hash_state *state)
163 {
164         return state->current_buf ? &state->buflen_0 : &state->buflen_1;
165 }
166
167 /* Common job descriptor seq in/out ptr routines */
168
169 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
170 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
171                                       struct caam_hash_state *state,
172                                       int ctx_len)
173 {
174         state->ctx_dma_len = ctx_len;
175         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
176                                         ctx_len, DMA_FROM_DEVICE);
177         if (dma_mapping_error(jrdev, state->ctx_dma)) {
178                 dev_err(jrdev, "unable to map ctx\n");
179                 state->ctx_dma = 0;
180                 return -ENOMEM;
181         }
182
183         append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
184
185         return 0;
186 }
187
188 /* Map current buffer in state (if length > 0) and put it in link table */
189 static inline int buf_map_to_sec4_sg(struct device *jrdev,
190                                      struct sec4_sg_entry *sec4_sg,
191                                      struct caam_hash_state *state)
192 {
193         int buflen = *current_buflen(state);
194
195         if (!buflen)
196                 return 0;
197
198         state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
199                                         DMA_TO_DEVICE);
200         if (dma_mapping_error(jrdev, state->buf_dma)) {
201                 dev_err(jrdev, "unable to map buf\n");
202                 state->buf_dma = 0;
203                 return -ENOMEM;
204         }
205
206         dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
207
208         return 0;
209 }
210
211 /* Map state->caam_ctx, and add it to link table */
212 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
213                                      struct caam_hash_state *state, int ctx_len,
214                                      struct sec4_sg_entry *sec4_sg, u32 flag)
215 {
216         state->ctx_dma_len = ctx_len;
217         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
218         if (dma_mapping_error(jrdev, state->ctx_dma)) {
219                 dev_err(jrdev, "unable to map ctx\n");
220                 state->ctx_dma = 0;
221                 return -ENOMEM;
222         }
223
224         dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
225
226         return 0;
227 }
228
229 /*
230  * For ahash update, final and finup (import_ctx = true)
231  *     import context, read and write to seqout
232  * For ahash firsts and digest (import_ctx = false)
233  *     read and write to seqout
234  */
235 static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
236                                      struct caam_hash_ctx *ctx, bool import_ctx,
237                                      int era)
238 {
239         u32 op = ctx->adata.algtype;
240         u32 *skip_key_load;
241
242         init_sh_desc(desc, HDR_SHARE_SERIAL);
243
244         /* Append key if it has been set; ahash update excluded */
245         if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
246                 /* Skip key loading if already shared */
247                 skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
248                                             JUMP_COND_SHRD);
249
250                 if (era < 6)
251                         append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
252                                           ctx->adata.keylen, CLASS_2 |
253                                           KEY_DEST_MDHA_SPLIT | KEY_ENC);
254                 else
255                         append_proto_dkp(desc, &ctx->adata);
256
257                 set_jump_tgt_here(desc, skip_key_load);
258
259                 op |= OP_ALG_AAI_HMAC_PRECOMP;
260         }
261
262         /* If needed, import context from software */
263         if (import_ctx)
264                 append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
265                                 LDST_SRCDST_BYTE_CONTEXT);
266
267         /* Class 2 operation */
268         append_operation(desc, op | state | OP_ALG_ENCRYPT);
269
270         /*
271          * Load from buf and/or src and write to req->result or state->context
272          * Calculate remaining bytes to read
273          */
274         append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
275         /* Read remaining bytes */
276         append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
277                              FIFOLD_TYPE_MSG | KEY_VLF);
278         /* Store class2 context bytes */
279         append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
280                          LDST_SRCDST_BYTE_CONTEXT);
281 }
282
283 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
284 {
285         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
286         int digestsize = crypto_ahash_digestsize(ahash);
287         struct device *jrdev = ctx->jrdev;
288         struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
289         u32 *desc;
290
291         ctx->adata.key_virt = ctx->key;
292
293         /* ahash_update shared descriptor */
294         desc = ctx->sh_desc_update;
295         ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
296                           ctrlpriv->era);
297         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
298                                    desc_bytes(desc), ctx->dir);
299 #ifdef DEBUG
300         print_hex_dump(KERN_ERR,
301                        "ahash update shdesc@"__stringify(__LINE__)": ",
302                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
303 #endif
304
305         /* ahash_update_first shared descriptor */
306         desc = ctx->sh_desc_update_first;
307         ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
308                           ctrlpriv->era);
309         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
310                                    desc_bytes(desc), ctx->dir);
311 #ifdef DEBUG
312         print_hex_dump(KERN_ERR,
313                        "ahash update first shdesc@"__stringify(__LINE__)": ",
314                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
315 #endif
316
317         /* ahash_final shared descriptor */
318         desc = ctx->sh_desc_fin;
319         ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
320                           ctrlpriv->era);
321         dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
322                                    desc_bytes(desc), ctx->dir);
323 #ifdef DEBUG
324         print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
325                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
326                        desc_bytes(desc), 1);
327 #endif
328
329         /* ahash_digest shared descriptor */
330         desc = ctx->sh_desc_digest;
331         ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
332                           ctrlpriv->era);
333         dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
334                                    desc_bytes(desc), ctx->dir);
335 #ifdef DEBUG
336         print_hex_dump(KERN_ERR,
337                        "ahash digest shdesc@"__stringify(__LINE__)": ",
338                        DUMP_PREFIX_ADDRESS, 16, 4, desc,
339                        desc_bytes(desc), 1);
340 #endif
341
342         return 0;
343 }
344
345 /* Digest hash size if it is too large */
346 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
347                            u32 *keylen, u8 *key_out, u32 digestsize)
348 {
349         struct device *jrdev = ctx->jrdev;
350         u32 *desc;
351         struct split_key_result result;
352         dma_addr_t src_dma, dst_dma;
353         int ret;
354
355         desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
356         if (!desc) {
357                 dev_err(jrdev, "unable to allocate key input memory\n");
358                 return -ENOMEM;
359         }
360
361         init_job_desc(desc, 0);
362
363         src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
364                                  DMA_TO_DEVICE);
365         if (dma_mapping_error(jrdev, src_dma)) {
366                 dev_err(jrdev, "unable to map key input memory\n");
367                 kfree(desc);
368                 return -ENOMEM;
369         }
370         dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
371                                  DMA_FROM_DEVICE);
372         if (dma_mapping_error(jrdev, dst_dma)) {
373                 dev_err(jrdev, "unable to map key output memory\n");
374                 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
375                 kfree(desc);
376                 return -ENOMEM;
377         }
378
379         /* Job descriptor to perform unkeyed hash on key_in */
380         append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
381                          OP_ALG_AS_INITFINAL);
382         append_seq_in_ptr(desc, src_dma, *keylen, 0);
383         append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
384                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
385         append_seq_out_ptr(desc, dst_dma, digestsize, 0);
386         append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
387                          LDST_SRCDST_BYTE_CONTEXT);
388
389 #ifdef DEBUG
390         print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
391                        DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
392         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
393                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
394 #endif
395
396         result.err = 0;
397         init_completion(&result.completion);
398
399         ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
400         if (!ret) {
401                 /* in progress */
402                 wait_for_completion(&result.completion);
403                 ret = result.err;
404 #ifdef DEBUG
405                 print_hex_dump(KERN_ERR,
406                                "digested key@"__stringify(__LINE__)": ",
407                                DUMP_PREFIX_ADDRESS, 16, 4, key_in,
408                                digestsize, 1);
409 #endif
410         }
411         dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
412         dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
413
414         *keylen = digestsize;
415
416         kfree(desc);
417
418         return ret;
419 }
420
421 static int ahash_setkey(struct crypto_ahash *ahash,
422                         const u8 *key, unsigned int keylen)
423 {
424         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
425         int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
426         int digestsize = crypto_ahash_digestsize(ahash);
427         struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
428         int ret;
429         u8 *hashed_key = NULL;
430
431 #ifdef DEBUG
432         printk(KERN_ERR "keylen %d\n", keylen);
433 #endif
434
435         if (keylen > blocksize) {
436                 hashed_key = kmalloc_array(digestsize,
437                                            sizeof(*hashed_key),
438                                            GFP_KERNEL | GFP_DMA);
439                 if (!hashed_key)
440                         return -ENOMEM;
441                 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
442                                       digestsize);
443                 if (ret)
444                         goto bad_free_key;
445                 key = hashed_key;
446         }
447
448         /*
449          * If DKP is supported, use it in the shared descriptor to generate
450          * the split key.
451          */
452         if (ctrlpriv->era >= 6) {
453                 ctx->adata.key_inline = true;
454                 ctx->adata.keylen = keylen;
455                 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
456                                                       OP_ALG_ALGSEL_MASK);
457
458                 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
459                         goto bad_free_key;
460
461                 memcpy(ctx->key, key, keylen);
462         } else {
463                 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
464                                     keylen, CAAM_MAX_HASH_KEY_SIZE);
465                 if (ret)
466                         goto bad_free_key;
467         }
468
469         kfree(hashed_key);
470         return ahash_set_sh_desc(ahash);
471  bad_free_key:
472         kfree(hashed_key);
473         crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
474         return -EINVAL;
475 }
476
477 /*
478  * ahash_edesc - s/w-extended ahash descriptor
479  * @sec4_sg_dma: physical mapped address of h/w link table
480  * @src_nents: number of segments in input scatterlist
481  * @sec4_sg_bytes: length of dma mapped sec4_sg space
482  * @hw_desc: the h/w job descriptor followed by any referenced link tables
483  * @sec4_sg: h/w link table
484  */
485 struct ahash_edesc {
486         dma_addr_t sec4_sg_dma;
487         int src_nents;
488         int sec4_sg_bytes;
489         u32 hw_desc[DESC_JOB_IO_LEN / sizeof(u32)] ____cacheline_aligned;
490         struct sec4_sg_entry sec4_sg[0];
491 };
492
493 static inline void ahash_unmap(struct device *dev,
494                         struct ahash_edesc *edesc,
495                         struct ahash_request *req, int dst_len)
496 {
497         struct caam_hash_state *state = ahash_request_ctx(req);
498
499         if (edesc->src_nents)
500                 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
501
502         if (edesc->sec4_sg_bytes)
503                 dma_unmap_single(dev, edesc->sec4_sg_dma,
504                                  edesc->sec4_sg_bytes, DMA_TO_DEVICE);
505
506         if (state->buf_dma) {
507                 dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
508                                  DMA_TO_DEVICE);
509                 state->buf_dma = 0;
510         }
511 }
512
513 static inline void ahash_unmap_ctx(struct device *dev,
514                         struct ahash_edesc *edesc,
515                         struct ahash_request *req, int dst_len, u32 flag)
516 {
517         struct caam_hash_state *state = ahash_request_ctx(req);
518
519         if (state->ctx_dma) {
520                 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
521                 state->ctx_dma = 0;
522         }
523         ahash_unmap(dev, edesc, req, dst_len);
524 }
525
526 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
527                        void *context)
528 {
529         struct ahash_request *req = context;
530         struct ahash_edesc *edesc;
531         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
532         int digestsize = crypto_ahash_digestsize(ahash);
533         struct caam_hash_state *state = ahash_request_ctx(req);
534 #ifdef DEBUG
535         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
536
537         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
538 #endif
539
540         edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
541         if (err)
542                 caam_jr_strstatus(jrdev, err);
543
544         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
545         memcpy(req->result, state->caam_ctx, digestsize);
546         kfree(edesc);
547
548 #ifdef DEBUG
549         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
550                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
551                        ctx->ctx_len, 1);
552 #endif
553
554         req->base.complete(&req->base, err);
555 }
556
557 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
558                             void *context)
559 {
560         struct ahash_request *req = context;
561         struct ahash_edesc *edesc;
562         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
563         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
564         struct caam_hash_state *state = ahash_request_ctx(req);
565 #ifdef DEBUG
566         int digestsize = crypto_ahash_digestsize(ahash);
567
568         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
569 #endif
570
571         edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
572         if (err)
573                 caam_jr_strstatus(jrdev, err);
574
575         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
576         switch_buf(state);
577         kfree(edesc);
578
579 #ifdef DEBUG
580         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
581                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
582                        ctx->ctx_len, 1);
583         if (req->result)
584                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
585                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
586                                digestsize, 1);
587 #endif
588
589         req->base.complete(&req->base, err);
590 }
591
592 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
593                                void *context)
594 {
595         struct ahash_request *req = context;
596         struct ahash_edesc *edesc;
597         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
598         int digestsize = crypto_ahash_digestsize(ahash);
599         struct caam_hash_state *state = ahash_request_ctx(req);
600 #ifdef DEBUG
601         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
602
603         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
604 #endif
605
606         edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
607         if (err)
608                 caam_jr_strstatus(jrdev, err);
609
610         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
611         memcpy(req->result, state->caam_ctx, digestsize);
612         kfree(edesc);
613
614 #ifdef DEBUG
615         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
616                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
617                        ctx->ctx_len, 1);
618 #endif
619
620         req->base.complete(&req->base, err);
621 }
622
623 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
624                                void *context)
625 {
626         struct ahash_request *req = context;
627         struct ahash_edesc *edesc;
628         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
629         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
630         struct caam_hash_state *state = ahash_request_ctx(req);
631 #ifdef DEBUG
632         int digestsize = crypto_ahash_digestsize(ahash);
633
634         dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
635 #endif
636
637         edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
638         if (err)
639                 caam_jr_strstatus(jrdev, err);
640
641         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
642         switch_buf(state);
643         kfree(edesc);
644
645 #ifdef DEBUG
646         print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
647                        DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
648                        ctx->ctx_len, 1);
649         if (req->result)
650                 print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
651                                DUMP_PREFIX_ADDRESS, 16, 4, req->result,
652                                digestsize, 1);
653 #endif
654
655         req->base.complete(&req->base, err);
656 }
657
658 /*
659  * Allocate an enhanced descriptor, which contains the hardware descriptor
660  * and space for hardware scatter table containing sg_num entries.
661  */
662 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
663                                              int sg_num, u32 *sh_desc,
664                                              dma_addr_t sh_desc_dma,
665                                              gfp_t flags)
666 {
667         struct ahash_edesc *edesc;
668         unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
669
670         edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
671         if (!edesc) {
672                 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
673                 return NULL;
674         }
675
676         init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
677                              HDR_SHARE_DEFER | HDR_REVERSE);
678
679         return edesc;
680 }
681
682 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
683                                struct ahash_edesc *edesc,
684                                struct ahash_request *req, int nents,
685                                unsigned int first_sg,
686                                unsigned int first_bytes, size_t to_hash)
687 {
688         dma_addr_t src_dma;
689         u32 options;
690
691         if (nents > 1 || first_sg) {
692                 struct sec4_sg_entry *sg = edesc->sec4_sg;
693                 unsigned int sgsize = sizeof(*sg) * (first_sg + nents);
694
695                 sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0);
696
697                 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
698                 if (dma_mapping_error(ctx->jrdev, src_dma)) {
699                         dev_err(ctx->jrdev, "unable to map S/G table\n");
700                         return -ENOMEM;
701                 }
702
703                 edesc->sec4_sg_bytes = sgsize;
704                 edesc->sec4_sg_dma = src_dma;
705                 options = LDST_SGF;
706         } else {
707                 src_dma = sg_dma_address(req->src);
708                 options = 0;
709         }
710
711         append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
712                           options);
713
714         return 0;
715 }
716
717 /* submit update job descriptor */
718 static int ahash_update_ctx(struct ahash_request *req)
719 {
720         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
721         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
722         struct caam_hash_state *state = ahash_request_ctx(req);
723         struct device *jrdev = ctx->jrdev;
724         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
725                        GFP_KERNEL : GFP_ATOMIC;
726         u8 *buf = current_buf(state);
727         int *buflen = current_buflen(state);
728         u8 *next_buf = alt_buf(state);
729         int *next_buflen = alt_buflen(state), last_buflen;
730         int in_len = *buflen + req->nbytes, to_hash;
731         u32 *desc;
732         int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
733         struct ahash_edesc *edesc;
734         int ret = 0;
735
736         last_buflen = *next_buflen;
737         *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
738         to_hash = in_len - *next_buflen;
739
740         if (to_hash) {
741                 src_nents = sg_nents_for_len(req->src,
742                                              req->nbytes - (*next_buflen));
743                 if (src_nents < 0) {
744                         dev_err(jrdev, "Invalid number of src SG.\n");
745                         return src_nents;
746                 }
747
748                 if (src_nents) {
749                         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
750                                                   DMA_TO_DEVICE);
751                         if (!mapped_nents) {
752                                 dev_err(jrdev, "unable to DMA map source\n");
753                                 return -ENOMEM;
754                         }
755                 } else {
756                         mapped_nents = 0;
757                 }
758
759                 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
760                 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
761                                  sizeof(struct sec4_sg_entry);
762
763                 /*
764                  * allocate space for base edesc and hw desc commands,
765                  * link tables
766                  */
767                 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
768                                           ctx->sh_desc_update,
769                                           ctx->sh_desc_update_dma, flags);
770                 if (!edesc) {
771                         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
772                         return -ENOMEM;
773                 }
774
775                 edesc->src_nents = src_nents;
776                 edesc->sec4_sg_bytes = sec4_sg_bytes;
777
778                 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
779                                          edesc->sec4_sg, DMA_BIDIRECTIONAL);
780                 if (ret)
781                         goto unmap_ctx;
782
783                 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
784                 if (ret)
785                         goto unmap_ctx;
786
787                 if (mapped_nents) {
788                         sg_to_sec4_sg_last(req->src, mapped_nents,
789                                            edesc->sec4_sg + sec4_sg_src_index,
790                                            0);
791                         if (*next_buflen)
792                                 scatterwalk_map_and_copy(next_buf, req->src,
793                                                          to_hash - *buflen,
794                                                          *next_buflen, 0);
795                 } else {
796                         sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
797                                             1);
798                 }
799
800                 desc = edesc->hw_desc;
801
802                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
803                                                      sec4_sg_bytes,
804                                                      DMA_TO_DEVICE);
805                 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
806                         dev_err(jrdev, "unable to map S/G table\n");
807                         ret = -ENOMEM;
808                         goto unmap_ctx;
809                 }
810
811                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
812                                        to_hash, LDST_SGF);
813
814                 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
815
816 #ifdef DEBUG
817                 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
818                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
819                                desc_bytes(desc), 1);
820 #endif
821
822                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
823                 if (ret)
824                         goto unmap_ctx;
825
826                 ret = -EINPROGRESS;
827         } else if (*next_buflen) {
828                 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
829                                          req->nbytes, 0);
830                 *buflen = *next_buflen;
831                 *next_buflen = last_buflen;
832         }
833 #ifdef DEBUG
834         print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
835                        DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
836         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
837                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
838                        *next_buflen, 1);
839 #endif
840
841         return ret;
842  unmap_ctx:
843         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
844         kfree(edesc);
845         return ret;
846 }
847
848 static int ahash_final_ctx(struct ahash_request *req)
849 {
850         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
851         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
852         struct caam_hash_state *state = ahash_request_ctx(req);
853         struct device *jrdev = ctx->jrdev;
854         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
855                        GFP_KERNEL : GFP_ATOMIC;
856         int buflen = *current_buflen(state);
857         u32 *desc;
858         int sec4_sg_bytes, sec4_sg_src_index;
859         int digestsize = crypto_ahash_digestsize(ahash);
860         struct ahash_edesc *edesc;
861         int ret;
862
863         sec4_sg_src_index = 1 + (buflen ? 1 : 0);
864         sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
865
866         /* allocate space for base edesc and hw desc commands, link tables */
867         edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index,
868                                   ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
869                                   flags);
870         if (!edesc)
871                 return -ENOMEM;
872
873         desc = edesc->hw_desc;
874
875         edesc->sec4_sg_bytes = sec4_sg_bytes;
876
877         ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
878                                  edesc->sec4_sg, DMA_BIDIRECTIONAL);
879         if (ret)
880                 goto unmap_ctx;
881
882         ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
883         if (ret)
884                 goto unmap_ctx;
885
886         sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
887
888         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
889                                             sec4_sg_bytes, DMA_TO_DEVICE);
890         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
891                 dev_err(jrdev, "unable to map S/G table\n");
892                 ret = -ENOMEM;
893                 goto unmap_ctx;
894         }
895
896         append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
897                           LDST_SGF);
898         append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
899
900 #ifdef DEBUG
901         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
902                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
903 #endif
904
905         ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
906         if (ret)
907                 goto unmap_ctx;
908
909         return -EINPROGRESS;
910  unmap_ctx:
911         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
912         kfree(edesc);
913         return ret;
914 }
915
916 static int ahash_finup_ctx(struct ahash_request *req)
917 {
918         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
919         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
920         struct caam_hash_state *state = ahash_request_ctx(req);
921         struct device *jrdev = ctx->jrdev;
922         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
923                        GFP_KERNEL : GFP_ATOMIC;
924         int buflen = *current_buflen(state);
925         u32 *desc;
926         int sec4_sg_src_index;
927         int src_nents, mapped_nents;
928         int digestsize = crypto_ahash_digestsize(ahash);
929         struct ahash_edesc *edesc;
930         int ret;
931
932         src_nents = sg_nents_for_len(req->src, req->nbytes);
933         if (src_nents < 0) {
934                 dev_err(jrdev, "Invalid number of src SG.\n");
935                 return src_nents;
936         }
937
938         if (src_nents) {
939                 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
940                                           DMA_TO_DEVICE);
941                 if (!mapped_nents) {
942                         dev_err(jrdev, "unable to DMA map source\n");
943                         return -ENOMEM;
944                 }
945         } else {
946                 mapped_nents = 0;
947         }
948
949         sec4_sg_src_index = 1 + (buflen ? 1 : 0);
950
951         /* allocate space for base edesc and hw desc commands, link tables */
952         edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
953                                   ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
954                                   flags);
955         if (!edesc) {
956                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
957                 return -ENOMEM;
958         }
959
960         desc = edesc->hw_desc;
961
962         edesc->src_nents = src_nents;
963
964         ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
965                                  edesc->sec4_sg, DMA_BIDIRECTIONAL);
966         if (ret)
967                 goto unmap_ctx;
968
969         ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
970         if (ret)
971                 goto unmap_ctx;
972
973         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
974                                   sec4_sg_src_index, ctx->ctx_len + buflen,
975                                   req->nbytes);
976         if (ret)
977                 goto unmap_ctx;
978
979         append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
980
981 #ifdef DEBUG
982         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
983                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
984 #endif
985
986         ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
987         if (ret)
988                 goto unmap_ctx;
989
990         return -EINPROGRESS;
991  unmap_ctx:
992         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
993         kfree(edesc);
994         return ret;
995 }
996
997 static int ahash_digest(struct ahash_request *req)
998 {
999         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1000         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1001         struct caam_hash_state *state = ahash_request_ctx(req);
1002         struct device *jrdev = ctx->jrdev;
1003         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1004                        GFP_KERNEL : GFP_ATOMIC;
1005         u32 *desc;
1006         int digestsize = crypto_ahash_digestsize(ahash);
1007         int src_nents, mapped_nents;
1008         struct ahash_edesc *edesc;
1009         int ret;
1010
1011         state->buf_dma = 0;
1012
1013         src_nents = sg_nents_for_len(req->src, req->nbytes);
1014         if (src_nents < 0) {
1015                 dev_err(jrdev, "Invalid number of src SG.\n");
1016                 return src_nents;
1017         }
1018
1019         if (src_nents) {
1020                 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1021                                           DMA_TO_DEVICE);
1022                 if (!mapped_nents) {
1023                         dev_err(jrdev, "unable to map source for DMA\n");
1024                         return -ENOMEM;
1025                 }
1026         } else {
1027                 mapped_nents = 0;
1028         }
1029
1030         /* allocate space for base edesc and hw desc commands, link tables */
1031         edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1032                                   ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1033                                   flags);
1034         if (!edesc) {
1035                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1036                 return -ENOMEM;
1037         }
1038
1039         edesc->src_nents = src_nents;
1040
1041         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1042                                   req->nbytes);
1043         if (ret) {
1044                 ahash_unmap(jrdev, edesc, req, digestsize);
1045                 kfree(edesc);
1046                 return ret;
1047         }
1048
1049         desc = edesc->hw_desc;
1050
1051         ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1052         if (ret) {
1053                 ahash_unmap(jrdev, edesc, req, digestsize);
1054                 kfree(edesc);
1055                 return -ENOMEM;
1056         }
1057
1058 #ifdef DEBUG
1059         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1060                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1061 #endif
1062
1063         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1064         if (!ret) {
1065                 ret = -EINPROGRESS;
1066         } else {
1067                 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1068                 kfree(edesc);
1069         }
1070
1071         return ret;
1072 }
1073
1074 /* submit ahash final if it the first job descriptor */
1075 static int ahash_final_no_ctx(struct ahash_request *req)
1076 {
1077         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1078         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1079         struct caam_hash_state *state = ahash_request_ctx(req);
1080         struct device *jrdev = ctx->jrdev;
1081         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1082                        GFP_KERNEL : GFP_ATOMIC;
1083         u8 *buf = current_buf(state);
1084         int buflen = *current_buflen(state);
1085         u32 *desc;
1086         int digestsize = crypto_ahash_digestsize(ahash);
1087         struct ahash_edesc *edesc;
1088         int ret;
1089
1090         /* allocate space for base edesc and hw desc commands, link tables */
1091         edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1092                                   ctx->sh_desc_digest_dma, flags);
1093         if (!edesc)
1094                 return -ENOMEM;
1095
1096         desc = edesc->hw_desc;
1097
1098         if (buflen) {
1099                 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1100                                                 DMA_TO_DEVICE);
1101                 if (dma_mapping_error(jrdev, state->buf_dma)) {
1102                         dev_err(jrdev, "unable to map src\n");
1103                         goto unmap;
1104                 }
1105
1106                 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1107         }
1108
1109         ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1110         if (ret)
1111                 goto unmap;
1112
1113 #ifdef DEBUG
1114         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1115                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1116 #endif
1117
1118         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1119         if (!ret) {
1120                 ret = -EINPROGRESS;
1121         } else {
1122                 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1123                 kfree(edesc);
1124         }
1125
1126         return ret;
1127  unmap:
1128         ahash_unmap(jrdev, edesc, req, digestsize);
1129         kfree(edesc);
1130         return -ENOMEM;
1131
1132 }
1133
1134 /* submit ahash update if it the first job descriptor after update */
1135 static int ahash_update_no_ctx(struct ahash_request *req)
1136 {
1137         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1138         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1139         struct caam_hash_state *state = ahash_request_ctx(req);
1140         struct device *jrdev = ctx->jrdev;
1141         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1142                        GFP_KERNEL : GFP_ATOMIC;
1143         u8 *buf = current_buf(state);
1144         int *buflen = current_buflen(state);
1145         u8 *next_buf = alt_buf(state);
1146         int *next_buflen = alt_buflen(state);
1147         int in_len = *buflen + req->nbytes, to_hash;
1148         int sec4_sg_bytes, src_nents, mapped_nents;
1149         struct ahash_edesc *edesc;
1150         u32 *desc;
1151         int ret = 0;
1152
1153         *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1154         to_hash = in_len - *next_buflen;
1155
1156         if (to_hash) {
1157                 src_nents = sg_nents_for_len(req->src,
1158                                              req->nbytes - *next_buflen);
1159                 if (src_nents < 0) {
1160                         dev_err(jrdev, "Invalid number of src SG.\n");
1161                         return src_nents;
1162                 }
1163
1164                 if (src_nents) {
1165                         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1166                                                   DMA_TO_DEVICE);
1167                         if (!mapped_nents) {
1168                                 dev_err(jrdev, "unable to DMA map source\n");
1169                                 return -ENOMEM;
1170                         }
1171                 } else {
1172                         mapped_nents = 0;
1173                 }
1174
1175                 sec4_sg_bytes = (1 + mapped_nents) *
1176                                 sizeof(struct sec4_sg_entry);
1177
1178                 /*
1179                  * allocate space for base edesc and hw desc commands,
1180                  * link tables
1181                  */
1182                 edesc = ahash_edesc_alloc(ctx, 1 + mapped_nents,
1183                                           ctx->sh_desc_update_first,
1184                                           ctx->sh_desc_update_first_dma,
1185                                           flags);
1186                 if (!edesc) {
1187                         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1188                         return -ENOMEM;
1189                 }
1190
1191                 edesc->src_nents = src_nents;
1192                 edesc->sec4_sg_bytes = sec4_sg_bytes;
1193
1194                 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1195                 if (ret)
1196                         goto unmap_ctx;
1197
1198                 sg_to_sec4_sg_last(req->src, mapped_nents,
1199                                    edesc->sec4_sg + 1, 0);
1200
1201                 if (*next_buflen) {
1202                         scatterwalk_map_and_copy(next_buf, req->src,
1203                                                  to_hash - *buflen,
1204                                                  *next_buflen, 0);
1205                 }
1206
1207                 desc = edesc->hw_desc;
1208
1209                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1210                                                     sec4_sg_bytes,
1211                                                     DMA_TO_DEVICE);
1212                 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1213                         dev_err(jrdev, "unable to map S/G table\n");
1214                         ret = -ENOMEM;
1215                         goto unmap_ctx;
1216                 }
1217
1218                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1219
1220                 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1221                 if (ret)
1222                         goto unmap_ctx;
1223
1224 #ifdef DEBUG
1225                 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1226                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
1227                                desc_bytes(desc), 1);
1228 #endif
1229
1230                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1231                 if (ret)
1232                         goto unmap_ctx;
1233
1234                 ret = -EINPROGRESS;
1235                 state->update = ahash_update_ctx;
1236                 state->finup = ahash_finup_ctx;
1237                 state->final = ahash_final_ctx;
1238         } else if (*next_buflen) {
1239                 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1240                                          req->nbytes, 0);
1241                 *buflen = *next_buflen;
1242                 *next_buflen = 0;
1243         }
1244 #ifdef DEBUG
1245         print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
1246                        DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1247         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1248                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1249                        *next_buflen, 1);
1250 #endif
1251
1252         return ret;
1253  unmap_ctx:
1254         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1255         kfree(edesc);
1256         return ret;
1257 }
1258
1259 /* submit ahash finup if it the first job descriptor after update */
1260 static int ahash_finup_no_ctx(struct ahash_request *req)
1261 {
1262         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1263         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1264         struct caam_hash_state *state = ahash_request_ctx(req);
1265         struct device *jrdev = ctx->jrdev;
1266         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1267                        GFP_KERNEL : GFP_ATOMIC;
1268         int buflen = *current_buflen(state);
1269         u32 *desc;
1270         int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1271         int digestsize = crypto_ahash_digestsize(ahash);
1272         struct ahash_edesc *edesc;
1273         int ret;
1274
1275         src_nents = sg_nents_for_len(req->src, req->nbytes);
1276         if (src_nents < 0) {
1277                 dev_err(jrdev, "Invalid number of src SG.\n");
1278                 return src_nents;
1279         }
1280
1281         if (src_nents) {
1282                 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1283                                           DMA_TO_DEVICE);
1284                 if (!mapped_nents) {
1285                         dev_err(jrdev, "unable to DMA map source\n");
1286                         return -ENOMEM;
1287                 }
1288         } else {
1289                 mapped_nents = 0;
1290         }
1291
1292         sec4_sg_src_index = 2;
1293         sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1294                          sizeof(struct sec4_sg_entry);
1295
1296         /* allocate space for base edesc and hw desc commands, link tables */
1297         edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1298                                   ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1299                                   flags);
1300         if (!edesc) {
1301                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1302                 return -ENOMEM;
1303         }
1304
1305         desc = edesc->hw_desc;
1306
1307         edesc->src_nents = src_nents;
1308         edesc->sec4_sg_bytes = sec4_sg_bytes;
1309
1310         ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1311         if (ret)
1312                 goto unmap;
1313
1314         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1315                                   req->nbytes);
1316         if (ret) {
1317                 dev_err(jrdev, "unable to map S/G table\n");
1318                 goto unmap;
1319         }
1320
1321         ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1322         if (ret)
1323                 goto unmap;
1324
1325 #ifdef DEBUG
1326         print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1327                        DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1328 #endif
1329
1330         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1331         if (!ret) {
1332                 ret = -EINPROGRESS;
1333         } else {
1334                 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1335                 kfree(edesc);
1336         }
1337
1338         return ret;
1339  unmap:
1340         ahash_unmap(jrdev, edesc, req, digestsize);
1341         kfree(edesc);
1342         return -ENOMEM;
1343
1344 }
1345
1346 /* submit first update job descriptor after init */
1347 static int ahash_update_first(struct ahash_request *req)
1348 {
1349         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1350         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1351         struct caam_hash_state *state = ahash_request_ctx(req);
1352         struct device *jrdev = ctx->jrdev;
1353         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1354                        GFP_KERNEL : GFP_ATOMIC;
1355         u8 *next_buf = alt_buf(state);
1356         int *next_buflen = alt_buflen(state);
1357         int to_hash;
1358         u32 *desc;
1359         int src_nents, mapped_nents;
1360         struct ahash_edesc *edesc;
1361         int ret = 0;
1362
1363         *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1364                                       1);
1365         to_hash = req->nbytes - *next_buflen;
1366
1367         if (to_hash) {
1368                 src_nents = sg_nents_for_len(req->src,
1369                                              req->nbytes - *next_buflen);
1370                 if (src_nents < 0) {
1371                         dev_err(jrdev, "Invalid number of src SG.\n");
1372                         return src_nents;
1373                 }
1374
1375                 if (src_nents) {
1376                         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1377                                                   DMA_TO_DEVICE);
1378                         if (!mapped_nents) {
1379                                 dev_err(jrdev, "unable to map source for DMA\n");
1380                                 return -ENOMEM;
1381                         }
1382                 } else {
1383                         mapped_nents = 0;
1384                 }
1385
1386                 /*
1387                  * allocate space for base edesc and hw desc commands,
1388                  * link tables
1389                  */
1390                 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1391                                           mapped_nents : 0,
1392                                           ctx->sh_desc_update_first,
1393                                           ctx->sh_desc_update_first_dma,
1394                                           flags);
1395                 if (!edesc) {
1396                         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1397                         return -ENOMEM;
1398                 }
1399
1400                 edesc->src_nents = src_nents;
1401
1402                 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1403                                           to_hash);
1404                 if (ret)
1405                         goto unmap_ctx;
1406
1407                 if (*next_buflen)
1408                         scatterwalk_map_and_copy(next_buf, req->src, to_hash,
1409                                                  *next_buflen, 0);
1410
1411                 desc = edesc->hw_desc;
1412
1413                 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1414                 if (ret)
1415                         goto unmap_ctx;
1416
1417 #ifdef DEBUG
1418                 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
1419                                DUMP_PREFIX_ADDRESS, 16, 4, desc,
1420                                desc_bytes(desc), 1);
1421 #endif
1422
1423                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1424                 if (ret)
1425                         goto unmap_ctx;
1426
1427                 ret = -EINPROGRESS;
1428                 state->update = ahash_update_ctx;
1429                 state->finup = ahash_finup_ctx;
1430                 state->final = ahash_final_ctx;
1431         } else if (*next_buflen) {
1432                 state->update = ahash_update_no_ctx;
1433                 state->finup = ahash_finup_no_ctx;
1434                 state->final = ahash_final_no_ctx;
1435                 scatterwalk_map_and_copy(next_buf, req->src, 0,
1436                                          req->nbytes, 0);
1437                 switch_buf(state);
1438         }
1439 #ifdef DEBUG
1440         print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
1441                        DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1442                        *next_buflen, 1);
1443 #endif
1444
1445         return ret;
1446  unmap_ctx:
1447         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1448         kfree(edesc);
1449         return ret;
1450 }
1451
1452 static int ahash_finup_first(struct ahash_request *req)
1453 {
1454         return ahash_digest(req);
1455 }
1456
1457 static int ahash_init(struct ahash_request *req)
1458 {
1459         struct caam_hash_state *state = ahash_request_ctx(req);
1460
1461         state->update = ahash_update_first;
1462         state->finup = ahash_finup_first;
1463         state->final = ahash_final_no_ctx;
1464
1465         state->ctx_dma = 0;
1466         state->ctx_dma_len = 0;
1467         state->current_buf = 0;
1468         state->buf_dma = 0;
1469         state->buflen_0 = 0;
1470         state->buflen_1 = 0;
1471
1472         return 0;
1473 }
1474
1475 static int ahash_update(struct ahash_request *req)
1476 {
1477         struct caam_hash_state *state = ahash_request_ctx(req);
1478
1479         return state->update(req);
1480 }
1481
1482 static int ahash_finup(struct ahash_request *req)
1483 {
1484         struct caam_hash_state *state = ahash_request_ctx(req);
1485
1486         return state->finup(req);
1487 }
1488
1489 static int ahash_final(struct ahash_request *req)
1490 {
1491         struct caam_hash_state *state = ahash_request_ctx(req);
1492
1493         return state->final(req);
1494 }
1495
1496 static int ahash_export(struct ahash_request *req, void *out)
1497 {
1498         struct caam_hash_state *state = ahash_request_ctx(req);
1499         struct caam_export_state *export = out;
1500         int len;
1501         u8 *buf;
1502
1503         if (state->current_buf) {
1504                 buf = state->buf_1;
1505                 len = state->buflen_1;
1506         } else {
1507                 buf = state->buf_0;
1508                 len = state->buflen_0;
1509         }
1510
1511         memcpy(export->buf, buf, len);
1512         memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1513         export->buflen = len;
1514         export->update = state->update;
1515         export->final = state->final;
1516         export->finup = state->finup;
1517
1518         return 0;
1519 }
1520
1521 static int ahash_import(struct ahash_request *req, const void *in)
1522 {
1523         struct caam_hash_state *state = ahash_request_ctx(req);
1524         const struct caam_export_state *export = in;
1525
1526         memset(state, 0, sizeof(*state));
1527         memcpy(state->buf_0, export->buf, export->buflen);
1528         memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1529         state->buflen_0 = export->buflen;
1530         state->update = export->update;
1531         state->final = export->final;
1532         state->finup = export->finup;
1533
1534         return 0;
1535 }
1536
1537 struct caam_hash_template {
1538         char name[CRYPTO_MAX_ALG_NAME];
1539         char driver_name[CRYPTO_MAX_ALG_NAME];
1540         char hmac_name[CRYPTO_MAX_ALG_NAME];
1541         char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1542         unsigned int blocksize;
1543         struct ahash_alg template_ahash;
1544         u32 alg_type;
1545 };
1546
1547 /* ahash descriptors */
1548 static struct caam_hash_template driver_hash[] = {
1549         {
1550                 .name = "sha1",
1551                 .driver_name = "sha1-caam",
1552                 .hmac_name = "hmac(sha1)",
1553                 .hmac_driver_name = "hmac-sha1-caam",
1554                 .blocksize = SHA1_BLOCK_SIZE,
1555                 .template_ahash = {
1556                         .init = ahash_init,
1557                         .update = ahash_update,
1558                         .final = ahash_final,
1559                         .finup = ahash_finup,
1560                         .digest = ahash_digest,
1561                         .export = ahash_export,
1562                         .import = ahash_import,
1563                         .setkey = ahash_setkey,
1564                         .halg = {
1565                                 .digestsize = SHA1_DIGEST_SIZE,
1566                                 .statesize = sizeof(struct caam_export_state),
1567                         },
1568                 },
1569                 .alg_type = OP_ALG_ALGSEL_SHA1,
1570         }, {
1571                 .name = "sha224",
1572                 .driver_name = "sha224-caam",
1573                 .hmac_name = "hmac(sha224)",
1574                 .hmac_driver_name = "hmac-sha224-caam",
1575                 .blocksize = SHA224_BLOCK_SIZE,
1576                 .template_ahash = {
1577                         .init = ahash_init,
1578                         .update = ahash_update,
1579                         .final = ahash_final,
1580                         .finup = ahash_finup,
1581                         .digest = ahash_digest,
1582                         .export = ahash_export,
1583                         .import = ahash_import,
1584                         .setkey = ahash_setkey,
1585                         .halg = {
1586                                 .digestsize = SHA224_DIGEST_SIZE,
1587                                 .statesize = sizeof(struct caam_export_state),
1588                         },
1589                 },
1590                 .alg_type = OP_ALG_ALGSEL_SHA224,
1591         }, {
1592                 .name = "sha256",
1593                 .driver_name = "sha256-caam",
1594                 .hmac_name = "hmac(sha256)",
1595                 .hmac_driver_name = "hmac-sha256-caam",
1596                 .blocksize = SHA256_BLOCK_SIZE,
1597                 .template_ahash = {
1598                         .init = ahash_init,
1599                         .update = ahash_update,
1600                         .final = ahash_final,
1601                         .finup = ahash_finup,
1602                         .digest = ahash_digest,
1603                         .export = ahash_export,
1604                         .import = ahash_import,
1605                         .setkey = ahash_setkey,
1606                         .halg = {
1607                                 .digestsize = SHA256_DIGEST_SIZE,
1608                                 .statesize = sizeof(struct caam_export_state),
1609                         },
1610                 },
1611                 .alg_type = OP_ALG_ALGSEL_SHA256,
1612         }, {
1613                 .name = "sha384",
1614                 .driver_name = "sha384-caam",
1615                 .hmac_name = "hmac(sha384)",
1616                 .hmac_driver_name = "hmac-sha384-caam",
1617                 .blocksize = SHA384_BLOCK_SIZE,
1618                 .template_ahash = {
1619                         .init = ahash_init,
1620                         .update = ahash_update,
1621                         .final = ahash_final,
1622                         .finup = ahash_finup,
1623                         .digest = ahash_digest,
1624                         .export = ahash_export,
1625                         .import = ahash_import,
1626                         .setkey = ahash_setkey,
1627                         .halg = {
1628                                 .digestsize = SHA384_DIGEST_SIZE,
1629                                 .statesize = sizeof(struct caam_export_state),
1630                         },
1631                 },
1632                 .alg_type = OP_ALG_ALGSEL_SHA384,
1633         }, {
1634                 .name = "sha512",
1635                 .driver_name = "sha512-caam",
1636                 .hmac_name = "hmac(sha512)",
1637                 .hmac_driver_name = "hmac-sha512-caam",
1638                 .blocksize = SHA512_BLOCK_SIZE,
1639                 .template_ahash = {
1640                         .init = ahash_init,
1641                         .update = ahash_update,
1642                         .final = ahash_final,
1643                         .finup = ahash_finup,
1644                         .digest = ahash_digest,
1645                         .export = ahash_export,
1646                         .import = ahash_import,
1647                         .setkey = ahash_setkey,
1648                         .halg = {
1649                                 .digestsize = SHA512_DIGEST_SIZE,
1650                                 .statesize = sizeof(struct caam_export_state),
1651                         },
1652                 },
1653                 .alg_type = OP_ALG_ALGSEL_SHA512,
1654         }, {
1655                 .name = "md5",
1656                 .driver_name = "md5-caam",
1657                 .hmac_name = "hmac(md5)",
1658                 .hmac_driver_name = "hmac-md5-caam",
1659                 .blocksize = MD5_BLOCK_WORDS * 4,
1660                 .template_ahash = {
1661                         .init = ahash_init,
1662                         .update = ahash_update,
1663                         .final = ahash_final,
1664                         .finup = ahash_finup,
1665                         .digest = ahash_digest,
1666                         .export = ahash_export,
1667                         .import = ahash_import,
1668                         .setkey = ahash_setkey,
1669                         .halg = {
1670                                 .digestsize = MD5_DIGEST_SIZE,
1671                                 .statesize = sizeof(struct caam_export_state),
1672                         },
1673                 },
1674                 .alg_type = OP_ALG_ALGSEL_MD5,
1675         },
1676 };
1677
1678 struct caam_hash_alg {
1679         struct list_head entry;
1680         int alg_type;
1681         struct ahash_alg ahash_alg;
1682 };
1683
1684 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1685 {
1686         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1687         struct crypto_alg *base = tfm->__crt_alg;
1688         struct hash_alg_common *halg =
1689                  container_of(base, struct hash_alg_common, base);
1690         struct ahash_alg *alg =
1691                  container_of(halg, struct ahash_alg, halg);
1692         struct caam_hash_alg *caam_hash =
1693                  container_of(alg, struct caam_hash_alg, ahash_alg);
1694         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1695         /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1696         static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1697                                          HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1698                                          HASH_MSG_LEN + 32,
1699                                          HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1700                                          HASH_MSG_LEN + 64,
1701                                          HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1702         dma_addr_t dma_addr;
1703         struct caam_drv_private *priv;
1704
1705         /*
1706          * Get a Job ring from Job Ring driver to ensure in-order
1707          * crypto request processing per tfm
1708          */
1709         ctx->jrdev = caam_jr_alloc();
1710         if (IS_ERR(ctx->jrdev)) {
1711                 pr_err("Job Ring Device allocation for transform failed\n");
1712                 return PTR_ERR(ctx->jrdev);
1713         }
1714
1715         priv = dev_get_drvdata(ctx->jrdev->parent);
1716         ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1717
1718         dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1719                                         offsetof(struct caam_hash_ctx,
1720                                                  sh_desc_update_dma),
1721                                         ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1722         if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1723                 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1724                 caam_jr_free(ctx->jrdev);
1725                 return -ENOMEM;
1726         }
1727
1728         ctx->sh_desc_update_dma = dma_addr;
1729         ctx->sh_desc_update_first_dma = dma_addr +
1730                                         offsetof(struct caam_hash_ctx,
1731                                                  sh_desc_update_first);
1732         ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1733                                                    sh_desc_fin);
1734         ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1735                                                       sh_desc_digest);
1736
1737         /* copy descriptor header template value */
1738         ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1739
1740         ctx->ctx_len = runninglen[(ctx->adata.algtype &
1741                                    OP_ALG_ALGSEL_SUBMASK) >>
1742                                   OP_ALG_ALGSEL_SHIFT];
1743
1744         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1745                                  sizeof(struct caam_hash_state));
1746         return ahash_set_sh_desc(ahash);
1747 }
1748
1749 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1750 {
1751         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1752
1753         dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1754                                offsetof(struct caam_hash_ctx,
1755                                         sh_desc_update_dma),
1756                                ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1757         caam_jr_free(ctx->jrdev);
1758 }
1759
1760 static void __exit caam_algapi_hash_exit(void)
1761 {
1762         struct caam_hash_alg *t_alg, *n;
1763
1764         if (!hash_list.next)
1765                 return;
1766
1767         list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1768                 crypto_unregister_ahash(&t_alg->ahash_alg);
1769                 list_del(&t_alg->entry);
1770                 kfree(t_alg);
1771         }
1772 }
1773
1774 static struct caam_hash_alg *
1775 caam_hash_alloc(struct caam_hash_template *template,
1776                 bool keyed)
1777 {
1778         struct caam_hash_alg *t_alg;
1779         struct ahash_alg *halg;
1780         struct crypto_alg *alg;
1781
1782         t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1783         if (!t_alg) {
1784                 pr_err("failed to allocate t_alg\n");
1785                 return ERR_PTR(-ENOMEM);
1786         }
1787
1788         t_alg->ahash_alg = template->template_ahash;
1789         halg = &t_alg->ahash_alg;
1790         alg = &halg->halg.base;
1791
1792         if (keyed) {
1793                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1794                          template->hmac_name);
1795                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1796                          template->hmac_driver_name);
1797         } else {
1798                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1799                          template->name);
1800                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1801                          template->driver_name);
1802                 t_alg->ahash_alg.setkey = NULL;
1803         }
1804         alg->cra_module = THIS_MODULE;
1805         alg->cra_init = caam_hash_cra_init;
1806         alg->cra_exit = caam_hash_cra_exit;
1807         alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1808         alg->cra_priority = CAAM_CRA_PRIORITY;
1809         alg->cra_blocksize = template->blocksize;
1810         alg->cra_alignmask = 0;
1811         alg->cra_flags = CRYPTO_ALG_ASYNC;
1812
1813         t_alg->alg_type = template->alg_type;
1814
1815         return t_alg;
1816 }
1817
1818 static int __init caam_algapi_hash_init(void)
1819 {
1820         struct device_node *dev_node;
1821         struct platform_device *pdev;
1822         struct device *ctrldev;
1823         int i = 0, err = 0;
1824         struct caam_drv_private *priv;
1825         unsigned int md_limit = SHA512_DIGEST_SIZE;
1826         u32 cha_inst, cha_vid;
1827
1828         dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1829         if (!dev_node) {
1830                 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1831                 if (!dev_node)
1832                         return -ENODEV;
1833         }
1834
1835         pdev = of_find_device_by_node(dev_node);
1836         if (!pdev) {
1837                 of_node_put(dev_node);
1838                 return -ENODEV;
1839         }
1840
1841         ctrldev = &pdev->dev;
1842         priv = dev_get_drvdata(ctrldev);
1843         of_node_put(dev_node);
1844
1845         /*
1846          * If priv is NULL, it's probably because the caam driver wasn't
1847          * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1848          */
1849         if (!priv)
1850                 return -ENODEV;
1851
1852         /*
1853          * Register crypto algorithms the device supports.  First, identify
1854          * presence and attributes of MD block.
1855          */
1856         cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
1857         cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1858
1859         /*
1860          * Skip registration of any hashing algorithms if MD block
1861          * is not present.
1862          */
1863         if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
1864                 return -ENODEV;
1865
1866         /* Limit digest size based on LP256 */
1867         if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
1868                 md_limit = SHA256_DIGEST_SIZE;
1869
1870         INIT_LIST_HEAD(&hash_list);
1871
1872         /* register crypto algorithms the device supports */
1873         for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1874                 struct caam_hash_alg *t_alg;
1875                 struct caam_hash_template *alg = driver_hash + i;
1876
1877                 /* If MD size is not supported by device, skip registration */
1878                 if (alg->template_ahash.halg.digestsize > md_limit)
1879                         continue;
1880
1881                 /* register hmac version */
1882                 t_alg = caam_hash_alloc(alg, true);
1883                 if (IS_ERR(t_alg)) {
1884                         err = PTR_ERR(t_alg);
1885                         pr_warn("%s alg allocation failed\n", alg->driver_name);
1886                         continue;
1887                 }
1888
1889                 err = crypto_register_ahash(&t_alg->ahash_alg);
1890                 if (err) {
1891                         pr_warn("%s alg registration failed: %d\n",
1892                                 t_alg->ahash_alg.halg.base.cra_driver_name,
1893                                 err);
1894                         kfree(t_alg);
1895                 } else
1896                         list_add_tail(&t_alg->entry, &hash_list);
1897
1898                 /* register unkeyed version */
1899                 t_alg = caam_hash_alloc(alg, false);
1900                 if (IS_ERR(t_alg)) {
1901                         err = PTR_ERR(t_alg);
1902                         pr_warn("%s alg allocation failed\n", alg->driver_name);
1903                         continue;
1904                 }
1905
1906                 err = crypto_register_ahash(&t_alg->ahash_alg);
1907                 if (err) {
1908                         pr_warn("%s alg registration failed: %d\n",
1909                                 t_alg->ahash_alg.halg.base.cra_driver_name,
1910                                 err);
1911                         kfree(t_alg);
1912                 } else
1913                         list_add_tail(&t_alg->entry, &hash_list);
1914         }
1915
1916         return err;
1917 }
1918
1919 module_init(caam_algapi_hash_init);
1920 module_exit(caam_algapi_hash_exit);
1921
1922 MODULE_LICENSE("GPL");
1923 MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1924 MODULE_AUTHOR("Freescale Semiconductor - NMG");