GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / staging / ccree / ssi_aead.c
1 /*
2  * Copyright (C) 2012-2017 ARM Limited or its affiliates.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <crypto/algapi.h>
21 #include <crypto/internal/skcipher.h>
22 #include <crypto/internal/hash.h>
23 #include <crypto/internal/aead.h>
24 #include <crypto/sha.h>
25 #include <crypto/ctr.h>
26 #include <crypto/authenc.h>
27 #include <crypto/aes.h>
28 #include <crypto/des.h>
29 #include <linux/rtnetlink.h>
30 #include <linux/version.h>
31 #include "ssi_config.h"
32 #include "ssi_driver.h"
33 #include "ssi_buffer_mgr.h"
34 #include "ssi_aead.h"
35 #include "ssi_request_mgr.h"
36 #include "ssi_hash.h"
37 #include "ssi_sysfs.h"
38 #include "ssi_sram_mgr.h"
39
40 #define template_aead   template_u.aead
41
42 #define MAX_AEAD_SETKEY_SEQ 12
43 #define MAX_AEAD_PROCESS_SEQ 23
44
45 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
46 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
47
48 #define AES_CCM_RFC4309_NONCE_SIZE 3
49 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
50
51 /* Value of each ICV_CMP byte (of 8) in case of success */
52 #define ICV_VERIF_OK 0x01
53
54 struct ssi_aead_handle {
55         ssi_sram_addr_t sram_workspace_addr;
56         struct list_head aead_list;
57 };
58
59 struct cc_hmac_s {
60         u8 *padded_authkey;
61         u8 *ipad_opad; /* IPAD, OPAD*/
62         dma_addr_t padded_authkey_dma_addr;
63         dma_addr_t ipad_opad_dma_addr;
64 };
65
66 struct cc_xcbc_s {
67         u8 *xcbc_keys; /* K1,K2,K3 */
68         dma_addr_t xcbc_keys_dma_addr;
69 };
70
71 struct ssi_aead_ctx {
72         struct ssi_drvdata *drvdata;
73         u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
74         u8 *enckey;
75         dma_addr_t enckey_dma_addr;
76         union {
77                 struct cc_hmac_s hmac;
78                 struct cc_xcbc_s xcbc;
79         } auth_state;
80         unsigned int enc_keylen;
81         unsigned int auth_keylen;
82         unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
83         enum drv_cipher_mode cipher_mode;
84         enum cc_flow_mode flow_mode;
85         enum drv_hash_mode auth_mode;
86 };
87
88 static inline bool valid_assoclen(struct aead_request *req)
89 {
90         return ((req->assoclen == 16) || (req->assoclen == 20));
91 }
92
93 static void ssi_aead_exit(struct crypto_aead *tfm)
94 {
95         struct device *dev = NULL;
96         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
97
98         SSI_LOG_DEBUG("Clearing context @%p for %s\n",
99                       crypto_aead_ctx(tfm), crypto_tfm_alg_name(&tfm->base));
100
101         dev = &ctx->drvdata->plat_dev->dev;
102         /* Unmap enckey buffer */
103         if (ctx->enckey) {
104                 dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr);
105                 SSI_LOG_DEBUG("Freed enckey DMA buffer enckey_dma_addr=%pad\n",
106                               ctx->enckey_dma_addr);
107                 ctx->enckey_dma_addr = 0;
108                 ctx->enckey = NULL;
109         }
110
111         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
112                 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
113
114                 if (xcbc->xcbc_keys) {
115                         dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
116                                           xcbc->xcbc_keys,
117                                           xcbc->xcbc_keys_dma_addr);
118                 }
119                 SSI_LOG_DEBUG("Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
120                               xcbc->xcbc_keys_dma_addr);
121                 xcbc->xcbc_keys_dma_addr = 0;
122                 xcbc->xcbc_keys = NULL;
123         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
124                 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
125
126                 if (hmac->ipad_opad) {
127                         dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
128                                           hmac->ipad_opad,
129                                           hmac->ipad_opad_dma_addr);
130                         SSI_LOG_DEBUG("Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
131                                       hmac->ipad_opad_dma_addr);
132                         hmac->ipad_opad_dma_addr = 0;
133                         hmac->ipad_opad = NULL;
134                 }
135                 if (hmac->padded_authkey) {
136                         dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
137                                           hmac->padded_authkey,
138                                           hmac->padded_authkey_dma_addr);
139                         SSI_LOG_DEBUG("Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
140                                       hmac->padded_authkey_dma_addr);
141                         hmac->padded_authkey_dma_addr = 0;
142                         hmac->padded_authkey = NULL;
143                 }
144         }
145 }
146
147 static int ssi_aead_init(struct crypto_aead *tfm)
148 {
149         struct device *dev;
150         struct aead_alg *alg = crypto_aead_alg(tfm);
151         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
152         struct ssi_crypto_alg *ssi_alg =
153                         container_of(alg, struct ssi_crypto_alg, aead_alg);
154         SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx, crypto_tfm_alg_name(&tfm->base));
155
156         /* Initialize modes in instance */
157         ctx->cipher_mode = ssi_alg->cipher_mode;
158         ctx->flow_mode = ssi_alg->flow_mode;
159         ctx->auth_mode = ssi_alg->auth_mode;
160         ctx->drvdata = ssi_alg->drvdata;
161         dev = &ctx->drvdata->plat_dev->dev;
162         crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
163
164         /* Allocate key buffer, cache line aligned */
165         ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
166                                          &ctx->enckey_dma_addr, GFP_KERNEL);
167         if (!ctx->enckey) {
168                 SSI_LOG_ERR("Failed allocating key buffer\n");
169                 goto init_failed;
170         }
171         SSI_LOG_DEBUG("Allocated enckey buffer in context ctx->enckey=@%p\n", ctx->enckey);
172
173         /* Set default authlen value */
174
175         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
176                 struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
177                 const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
178
179                 /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
180                 /* (and temporary for user key - up to 256b) */
181                 xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
182                                                      &xcbc->xcbc_keys_dma_addr,
183                                                      GFP_KERNEL);
184                 if (!xcbc->xcbc_keys) {
185                         SSI_LOG_ERR("Failed allocating buffer for XCBC keys\n");
186                         goto init_failed;
187                 }
188         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
189                 struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
190                 const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
191                 dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
192
193                 /* Allocate dma-coherent buffer for IPAD + OPAD */
194                 hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
195                                                      &hmac->ipad_opad_dma_addr,
196                                                      GFP_KERNEL);
197
198                 if (!hmac->ipad_opad) {
199                         SSI_LOG_ERR("Failed allocating IPAD/OPAD buffer\n");
200                         goto init_failed;
201                 }
202
203                 SSI_LOG_DEBUG("Allocated authkey buffer in context ctx->authkey=@%p\n",
204                               hmac->ipad_opad);
205
206                 hmac->padded_authkey = dma_alloc_coherent(dev,
207                                                           MAX_HMAC_BLOCK_SIZE,
208                                                           pkey_dma,
209                                                           GFP_KERNEL);
210
211                 if (!hmac->padded_authkey) {
212                         SSI_LOG_ERR("failed to allocate padded_authkey\n");
213                         goto init_failed;
214                 }
215         } else {
216                 ctx->auth_state.hmac.ipad_opad = NULL;
217                 ctx->auth_state.hmac.padded_authkey = NULL;
218         }
219
220         return 0;
221
222 init_failed:
223         ssi_aead_exit(tfm);
224         return -ENOMEM;
225 }
226
227 static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
228 {
229         struct aead_request *areq = (struct aead_request *)ssi_req;
230         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
231         struct crypto_aead *tfm = crypto_aead_reqtfm(ssi_req);
232         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
233         int err = 0;
234
235         ssi_buffer_mgr_unmap_aead_request(dev, areq);
236
237         /* Restore ordinary iv pointer */
238         areq->iv = areq_ctx->backup_iv;
239
240         if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
241                 if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
242                            ctx->authsize) != 0) {
243                         SSI_LOG_DEBUG("Payload authentication failure, "
244                                 "(auth-size=%d, cipher=%d).\n",
245                                 ctx->authsize, ctx->cipher_mode);
246                         /* In case of payload authentication failure, MUST NOT
247                          * revealed the decrypted message --> zero its memory.
248                          */
249                         ssi_buffer_mgr_zero_sgl(areq->dst, areq_ctx->cryptlen);
250                         err = -EBADMSG;
251                 }
252         } else { /*ENCRYPT*/
253                 if (unlikely(areq_ctx->is_icv_fragmented))
254                         ssi_buffer_mgr_copy_scatterlist_portion(
255                                 areq_ctx->mac_buf, areq_ctx->dst_sgl, areq->cryptlen + areq_ctx->dst_offset,
256                                 areq->cryptlen + areq_ctx->dst_offset + ctx->authsize, SSI_SG_FROM_BUF);
257
258                 /* If an IV was generated, copy it back to the user provided buffer. */
259                 if (areq_ctx->backup_giv) {
260                         if (ctx->cipher_mode == DRV_CIPHER_CTR)
261                                 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_IV_SIZE);
262                         else if (ctx->cipher_mode == DRV_CIPHER_CCM)
263                                 memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
264                 }
265         }
266
267         aead_request_complete(areq, err);
268 }
269
270 static int xcbc_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
271 {
272         /* Load the AES key */
273         hw_desc_init(&desc[0]);
274         /* We are using for the source/user key the same buffer as for the output keys,
275          * because after this key loading it is not needed anymore
276          */
277         set_din_type(&desc[0], DMA_DLLI,
278                      ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
279                      NS_BIT);
280         set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
281         set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
282         set_key_size_aes(&desc[0], ctx->auth_keylen);
283         set_flow_mode(&desc[0], S_DIN_to_AES);
284         set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
285
286         hw_desc_init(&desc[1]);
287         set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
288         set_flow_mode(&desc[1], DIN_AES_DOUT);
289         set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
290                       AES_KEYSIZE_128, NS_BIT, 0);
291
292         hw_desc_init(&desc[2]);
293         set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
294         set_flow_mode(&desc[2], DIN_AES_DOUT);
295         set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
296                                          + AES_KEYSIZE_128),
297                               AES_KEYSIZE_128, NS_BIT, 0);
298
299         hw_desc_init(&desc[3]);
300         set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
301         set_flow_mode(&desc[3], DIN_AES_DOUT);
302         set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
303                                           + 2 * AES_KEYSIZE_128),
304                               AES_KEYSIZE_128, NS_BIT, 0);
305
306         return 4;
307 }
308
309 static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
310 {
311         unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
312         unsigned int digest_ofs = 0;
313         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
314                         DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
315         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
316                         CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
317         struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
318
319         int idx = 0;
320         int i;
321
322         /* calc derived HMAC key */
323         for (i = 0; i < 2; i++) {
324                 /* Load hash initial state */
325                 hw_desc_init(&desc[idx]);
326                 set_cipher_mode(&desc[idx], hash_mode);
327                 set_din_sram(&desc[idx],
328                              ssi_ahash_get_larval_digest_sram_addr(
329                                 ctx->drvdata, ctx->auth_mode),
330                              digest_size);
331                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
332                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
333                 idx++;
334
335                 /* Load the hash current length*/
336                 hw_desc_init(&desc[idx]);
337                 set_cipher_mode(&desc[idx], hash_mode);
338                 set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
339                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
340                 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
341                 idx++;
342
343                 /* Prepare ipad key */
344                 hw_desc_init(&desc[idx]);
345                 set_xor_val(&desc[idx], hmac_pad_const[i]);
346                 set_cipher_mode(&desc[idx], hash_mode);
347                 set_flow_mode(&desc[idx], S_DIN_to_HASH);
348                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
349                 idx++;
350
351                 /* Perform HASH update */
352                 hw_desc_init(&desc[idx]);
353                 set_din_type(&desc[idx], DMA_DLLI,
354                              hmac->padded_authkey_dma_addr,
355                              SHA256_BLOCK_SIZE, NS_BIT);
356                 set_cipher_mode(&desc[idx], hash_mode);
357                 set_xor_active(&desc[idx]);
358                 set_flow_mode(&desc[idx], DIN_HASH);
359                 idx++;
360
361                 /* Get the digset */
362                 hw_desc_init(&desc[idx]);
363                 set_cipher_mode(&desc[idx], hash_mode);
364                 set_dout_dlli(&desc[idx],
365                               (hmac->ipad_opad_dma_addr + digest_ofs),
366                               digest_size, NS_BIT, 0);
367                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
368                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
369                 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
370                 idx++;
371
372                 digest_ofs += digest_size;
373         }
374
375         return idx;
376 }
377
378 static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
379 {
380         SSI_LOG_DEBUG("enc_keylen=%u  authkeylen=%u\n",
381                       ctx->enc_keylen, ctx->auth_keylen);
382
383         switch (ctx->auth_mode) {
384         case DRV_HASH_SHA1:
385         case DRV_HASH_SHA256:
386                 break;
387         case DRV_HASH_XCBC_MAC:
388                 if ((ctx->auth_keylen != AES_KEYSIZE_128) &&
389                     (ctx->auth_keylen != AES_KEYSIZE_192) &&
390                     (ctx->auth_keylen != AES_KEYSIZE_256))
391                         return -ENOTSUPP;
392                 break;
393         case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
394                 if (ctx->auth_keylen > 0)
395                         return -EINVAL;
396                 break;
397         default:
398                 SSI_LOG_ERR("Invalid auth_mode=%d\n", ctx->auth_mode);
399                 return -EINVAL;
400         }
401         /* Check cipher key size */
402         if (unlikely(ctx->flow_mode == S_DIN_to_DES)) {
403                 if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
404                         SSI_LOG_ERR("Invalid cipher(3DES) key size: %u\n",
405                                     ctx->enc_keylen);
406                         return -EINVAL;
407                 }
408         } else { /* Default assumed to be AES ciphers */
409                 if ((ctx->enc_keylen != AES_KEYSIZE_128) &&
410                     (ctx->enc_keylen != AES_KEYSIZE_192) &&
411                     (ctx->enc_keylen != AES_KEYSIZE_256)) {
412                         SSI_LOG_ERR("Invalid cipher(AES) key size: %u\n",
413                                     ctx->enc_keylen);
414                         return -EINVAL;
415                 }
416         }
417
418         return 0; /* All tests of keys sizes passed */
419 }
420
421 /* This function prepers the user key so it can pass to the hmac processing
422  * (copy to intenral buffer or hash in case of key longer than block
423  */
424 static int
425 ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
426 {
427         dma_addr_t key_dma_addr = 0;
428         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
429         struct device *dev = &ctx->drvdata->plat_dev->dev;
430         u32 larval_addr = ssi_ahash_get_larval_digest_sram_addr(
431                                         ctx->drvdata, ctx->auth_mode);
432         struct ssi_crypto_req ssi_req = {};
433         unsigned int blocksize;
434         unsigned int digestsize;
435         unsigned int hashmode;
436         unsigned int idx = 0;
437         int rc = 0;
438         struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
439         dma_addr_t padded_authkey_dma_addr =
440                 ctx->auth_state.hmac.padded_authkey_dma_addr;
441
442         switch (ctx->auth_mode) { /* auth_key required and >0 */
443         case DRV_HASH_SHA1:
444                 blocksize = SHA1_BLOCK_SIZE;
445                 digestsize = SHA1_DIGEST_SIZE;
446                 hashmode = DRV_HASH_HW_SHA1;
447                 break;
448         case DRV_HASH_SHA256:
449         default:
450                 blocksize = SHA256_BLOCK_SIZE;
451                 digestsize = SHA256_DIGEST_SIZE;
452                 hashmode = DRV_HASH_HW_SHA256;
453         }
454
455         if (likely(keylen != 0)) {
456                 key_dma_addr = dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
457                 if (unlikely(dma_mapping_error(dev, key_dma_addr))) {
458                         SSI_LOG_ERR("Mapping key va=0x%p len=%u for"
459                                    " DMA failed\n", key, keylen);
460                         return -ENOMEM;
461                 }
462                 if (keylen > blocksize) {
463                         /* Load hash initial state */
464                         hw_desc_init(&desc[idx]);
465                         set_cipher_mode(&desc[idx], hashmode);
466                         set_din_sram(&desc[idx], larval_addr, digestsize);
467                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
468                         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
469                         idx++;
470
471                         /* Load the hash current length*/
472                         hw_desc_init(&desc[idx]);
473                         set_cipher_mode(&desc[idx], hashmode);
474                         set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
475                         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
476                         set_flow_mode(&desc[idx], S_DIN_to_HASH);
477                         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
478                         idx++;
479
480                         hw_desc_init(&desc[idx]);
481                         set_din_type(&desc[idx], DMA_DLLI,
482                                      key_dma_addr, keylen, NS_BIT);
483                         set_flow_mode(&desc[idx], DIN_HASH);
484                         idx++;
485
486                         /* Get hashed key */
487                         hw_desc_init(&desc[idx]);
488                         set_cipher_mode(&desc[idx], hashmode);
489                         set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
490                                       digestsize, NS_BIT, 0);
491                         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
492                         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
493                         set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
494                         set_cipher_config0(&desc[idx],
495                                            HASH_DIGEST_RESULT_LITTLE_ENDIAN);
496                         idx++;
497
498                         hw_desc_init(&desc[idx]);
499                         set_din_const(&desc[idx], 0, (blocksize - digestsize));
500                         set_flow_mode(&desc[idx], BYPASS);
501                         set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
502                                       digestsize), (blocksize - digestsize),
503                                       NS_BIT, 0);
504                         idx++;
505                 } else {
506                         hw_desc_init(&desc[idx]);
507                         set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
508                                      keylen, NS_BIT);
509                         set_flow_mode(&desc[idx], BYPASS);
510                         set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
511                                       keylen, NS_BIT, 0);
512                         idx++;
513
514                         if ((blocksize - keylen) != 0) {
515                                 hw_desc_init(&desc[idx]);
516                                 set_din_const(&desc[idx], 0,
517                                               (blocksize - keylen));
518                                 set_flow_mode(&desc[idx], BYPASS);
519                                 set_dout_dlli(&desc[idx],
520                                               (padded_authkey_dma_addr +
521                                                keylen),
522                                               (blocksize - keylen), NS_BIT, 0);
523                                 idx++;
524                         }
525                 }
526         } else {
527                 hw_desc_init(&desc[idx]);
528                 set_din_const(&desc[idx], 0, (blocksize - keylen));
529                 set_flow_mode(&desc[idx], BYPASS);
530                 set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
531                               blocksize, NS_BIT, 0);
532                 idx++;
533         }
534
535         rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
536         if (unlikely(rc != 0))
537                 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
538
539         if (likely(key_dma_addr != 0))
540                 dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
541
542         return rc;
543 }
544
545 static int
546 ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
547 {
548         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
549         struct rtattr *rta = (struct rtattr *)key;
550         struct ssi_crypto_req ssi_req = {};
551         struct crypto_authenc_key_param *param;
552         struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
553         int seq_len = 0, rc = -EINVAL;
554
555         SSI_LOG_DEBUG("Setting key in context @%p for %s. key=%p keylen=%u\n",
556                       ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)),
557                       key, keylen);
558
559         /* STAT_PHASE_0: Init and sanity checks */
560
561         if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
562                 if (!RTA_OK(rta, keylen))
563                         goto badkey;
564                 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
565                         goto badkey;
566                 if (RTA_PAYLOAD(rta) < sizeof(*param))
567                         goto badkey;
568                 param = RTA_DATA(rta);
569                 ctx->enc_keylen = be32_to_cpu(param->enckeylen);
570                 key += RTA_ALIGN(rta->rta_len);
571                 keylen -= RTA_ALIGN(rta->rta_len);
572                 if (keylen < ctx->enc_keylen)
573                         goto badkey;
574                 ctx->auth_keylen = keylen - ctx->enc_keylen;
575
576                 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
577                         /* the nonce is stored in bytes at end of key */
578                         if (ctx->enc_keylen <
579                             (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
580                                 goto badkey;
581                         /* Copy nonce from last 4 bytes in CTR key to
582                          *  first 4 bytes in CTR IV
583                          */
584                         memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + ctx->enc_keylen -
585                                 CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
586                         /* Set CTR key size */
587                         ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
588                 }
589         } else { /* non-authenc - has just one key */
590                 ctx->enc_keylen = keylen;
591                 ctx->auth_keylen = 0;
592         }
593
594         rc = validate_keys_sizes(ctx);
595         if (unlikely(rc != 0))
596                 goto badkey;
597
598         /* STAT_PHASE_1: Copy key to ctx */
599
600         /* Get key material */
601         memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
602         if (ctx->enc_keylen == 24)
603                 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
604         if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
605                 memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
606         } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
607                 rc = ssi_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
608                 if (rc != 0)
609                         goto badkey;
610         }
611
612         /* STAT_PHASE_2: Create sequence */
613
614         switch (ctx->auth_mode) {
615         case DRV_HASH_SHA1:
616         case DRV_HASH_SHA256:
617                 seq_len = hmac_setkey(desc, ctx);
618                 break;
619         case DRV_HASH_XCBC_MAC:
620                 seq_len = xcbc_setkey(desc, ctx);
621                 break;
622         case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
623                 break; /* No auth. key setup */
624         default:
625                 SSI_LOG_ERR("Unsupported authenc (%d)\n", ctx->auth_mode);
626                 rc = -ENOTSUPP;
627                 goto badkey;
628         }
629
630         /* STAT_PHASE_3: Submit sequence to HW */
631
632         if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
633                 rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0);
634                 if (unlikely(rc != 0)) {
635                         SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
636                         goto setkey_error;
637                 }
638         }
639
640         /* Update STAT_PHASE_3 */
641         return rc;
642
643 badkey:
644         crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
645
646 setkey_error:
647         return rc;
648 }
649
650 #if SSI_CC_HAS_AES_CCM
651 static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
652 {
653         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
654         int rc = 0;
655
656         if (keylen < 3)
657                 return -EINVAL;
658
659         keylen -= 3;
660         memcpy(ctx->ctr_nonce, key + keylen, 3);
661
662         rc = ssi_aead_setkey(tfm, key, keylen);
663
664         return rc;
665 }
666 #endif /*SSI_CC_HAS_AES_CCM*/
667
668 static int ssi_aead_setauthsize(
669         struct crypto_aead *authenc,
670         unsigned int authsize)
671 {
672         struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
673
674         /* Unsupported auth. sizes */
675         if ((authsize == 0) ||
676             (authsize > crypto_aead_maxauthsize(authenc))) {
677                 return -ENOTSUPP;
678         }
679
680         ctx->authsize = authsize;
681         SSI_LOG_DEBUG("authlen=%d\n", ctx->authsize);
682
683         return 0;
684 }
685
686 #if SSI_CC_HAS_AES_CCM
687 static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
688                                        unsigned int authsize)
689 {
690         switch (authsize) {
691         case 8:
692         case 12:
693         case 16:
694                 break;
695         default:
696                 return -EINVAL;
697         }
698
699         return ssi_aead_setauthsize(authenc, authsize);
700 }
701
702 static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
703                                unsigned int authsize)
704 {
705         switch (authsize) {
706         case 4:
707         case 6:
708         case 8:
709         case 10:
710         case 12:
711         case 14:
712         case 16:
713                 break;
714         default:
715                 return -EINVAL;
716         }
717
718         return ssi_aead_setauthsize(authenc, authsize);
719 }
720 #endif /*SSI_CC_HAS_AES_CCM*/
721
722 static inline void
723 ssi_aead_create_assoc_desc(
724         struct aead_request *areq,
725         unsigned int flow_mode,
726         struct cc_hw_desc desc[],
727         unsigned int *seq_size)
728 {
729         struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
730         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
731         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
732         enum ssi_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
733         unsigned int idx = *seq_size;
734
735         switch (assoc_dma_type) {
736         case SSI_DMA_BUF_DLLI:
737                 SSI_LOG_DEBUG("ASSOC buffer type DLLI\n");
738                 hw_desc_init(&desc[idx]);
739                 set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
740                              areq->assoclen, NS_BIT); set_flow_mode(&desc[idx],
741                              flow_mode);
742                 if ((ctx->auth_mode == DRV_HASH_XCBC_MAC) &&
743                     (areq_ctx->cryptlen > 0))
744                         set_din_not_last_indication(&desc[idx]);
745                 break;
746         case SSI_DMA_BUF_MLLI:
747                 SSI_LOG_DEBUG("ASSOC buffer type MLLI\n");
748                 hw_desc_init(&desc[idx]);
749                 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
750                              areq_ctx->assoc.mlli_nents, NS_BIT);
751                 set_flow_mode(&desc[idx], flow_mode);
752                 if ((ctx->auth_mode == DRV_HASH_XCBC_MAC) &&
753                     (areq_ctx->cryptlen > 0))
754                         set_din_not_last_indication(&desc[idx]);
755                 break;
756         case SSI_DMA_BUF_NULL:
757         default:
758                 SSI_LOG_ERR("Invalid ASSOC buffer type\n");
759         }
760
761         *seq_size = (++idx);
762 }
763
764 static inline void
765 ssi_aead_process_authenc_data_desc(
766         struct aead_request *areq,
767         unsigned int flow_mode,
768         struct cc_hw_desc desc[],
769         unsigned int *seq_size,
770         int direct)
771 {
772         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
773         enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
774         unsigned int idx = *seq_size;
775
776         switch (data_dma_type) {
777         case SSI_DMA_BUF_DLLI:
778         {
779                 struct scatterlist *cipher =
780                         (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
781                         areq_ctx->dst_sgl : areq_ctx->src_sgl;
782
783                 unsigned int offset =
784                         (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
785                         areq_ctx->dst_offset : areq_ctx->src_offset;
786                 SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type DLLI\n");
787                 hw_desc_init(&desc[idx]);
788                 set_din_type(&desc[idx], DMA_DLLI,
789                              (sg_dma_address(cipher) + offset),
790                              areq_ctx->cryptlen, NS_BIT);
791                 set_flow_mode(&desc[idx], flow_mode);
792                 break;
793         }
794         case SSI_DMA_BUF_MLLI:
795         {
796                 /* DOUBLE-PASS flow (as default)
797                  * assoc. + iv + data -compact in one table
798                  * if assoclen is ZERO only IV perform
799                  */
800                 ssi_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
801                 u32 mlli_nents = areq_ctx->assoc.mlli_nents;
802
803                 if (likely(areq_ctx->is_single_pass)) {
804                         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
805                                 mlli_addr = areq_ctx->dst.sram_addr;
806                                 mlli_nents = areq_ctx->dst.mlli_nents;
807                         } else {
808                                 mlli_addr = areq_ctx->src.sram_addr;
809                                 mlli_nents = areq_ctx->src.mlli_nents;
810                         }
811                 }
812
813                 SSI_LOG_DEBUG("AUTHENC: SRC/DST buffer type MLLI\n");
814                 hw_desc_init(&desc[idx]);
815                 set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
816                              NS_BIT);
817                 set_flow_mode(&desc[idx], flow_mode);
818                 break;
819         }
820         case SSI_DMA_BUF_NULL:
821         default:
822                 SSI_LOG_ERR("AUTHENC: Invalid SRC/DST buffer type\n");
823         }
824
825         *seq_size = (++idx);
826 }
827
828 static inline void
829 ssi_aead_process_cipher_data_desc(
830         struct aead_request *areq,
831         unsigned int flow_mode,
832         struct cc_hw_desc desc[],
833         unsigned int *seq_size)
834 {
835         unsigned int idx = *seq_size;
836         struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
837         enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
838
839         if (areq_ctx->cryptlen == 0)
840                 return; /*null processing*/
841
842         switch (data_dma_type) {
843         case SSI_DMA_BUF_DLLI:
844                 SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type DLLI\n");
845                 hw_desc_init(&desc[idx]);
846                 set_din_type(&desc[idx], DMA_DLLI,
847                              (sg_dma_address(areq_ctx->src_sgl) +
848                               areq_ctx->src_offset), areq_ctx->cryptlen, NS_BIT);
849                 set_dout_dlli(&desc[idx],
850                               (sg_dma_address(areq_ctx->dst_sgl) +
851                                areq_ctx->dst_offset),
852                               areq_ctx->cryptlen, NS_BIT, 0);
853                 set_flow_mode(&desc[idx], flow_mode);
854                 break;
855         case SSI_DMA_BUF_MLLI:
856                 SSI_LOG_DEBUG("CIPHER: SRC/DST buffer type MLLI\n");
857                 hw_desc_init(&desc[idx]);
858                 set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
859                              areq_ctx->src.mlli_nents, NS_BIT);
860                 set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
861                               areq_ctx->dst.mlli_nents, NS_BIT, 0);
862                 set_flow_mode(&desc[idx], flow_mode);
863                 break;
864         case SSI_DMA_BUF_NULL:
865         default:
866                 SSI_LOG_ERR("CIPHER: Invalid SRC/DST buffer type\n");
867         }
868
869         *seq_size = (++idx);
870 }
871
872 static inline void ssi_aead_process_digest_result_desc(
873         struct aead_request *req,
874         struct cc_hw_desc desc[],
875         unsigned int *seq_size)
876 {
877         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
878         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
879         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
880         unsigned int idx = *seq_size;
881         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
882                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
883         int direct = req_ctx->gen_ctx.op_type;
884
885         /* Get final ICV result */
886         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
887                 hw_desc_init(&desc[idx]);
888                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
889                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
890                 set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
891                               NS_BIT, 1);
892                 set_queue_last_ind(&desc[idx]);
893                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
894                         set_aes_not_hash_mode(&desc[idx]);
895                         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
896                 } else {
897                         set_cipher_config0(&desc[idx],
898                                            HASH_DIGEST_RESULT_LITTLE_ENDIAN);
899                         set_cipher_mode(&desc[idx], hash_mode);
900                 }
901         } else { /*Decrypt*/
902                 /* Get ICV out from hardware */
903                 hw_desc_init(&desc[idx]);
904                 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
905                 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
906                 set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
907                               ctx->authsize, NS_BIT, 1);
908                 set_queue_last_ind(&desc[idx]);
909                 set_cipher_config0(&desc[idx],
910                                    HASH_DIGEST_RESULT_LITTLE_ENDIAN);
911                 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
912                 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
913                         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
914                         set_aes_not_hash_mode(&desc[idx]);
915                 } else {
916                         set_cipher_mode(&desc[idx], hash_mode);
917                 }
918         }
919
920         *seq_size = (++idx);
921 }
922
923 static inline void ssi_aead_setup_cipher_desc(
924         struct aead_request *req,
925         struct cc_hw_desc desc[],
926         unsigned int *seq_size)
927 {
928         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
929         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
930         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
931         unsigned int hw_iv_size = req_ctx->hw_iv_size;
932         unsigned int idx = *seq_size;
933         int direct = req_ctx->gen_ctx.op_type;
934
935         /* Setup cipher state */
936         hw_desc_init(&desc[idx]);
937         set_cipher_config0(&desc[idx], direct);
938         set_flow_mode(&desc[idx], ctx->flow_mode);
939         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
940                      hw_iv_size, NS_BIT);
941         if (ctx->cipher_mode == DRV_CIPHER_CTR)
942                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
943         else
944                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
945         set_cipher_mode(&desc[idx], ctx->cipher_mode);
946         idx++;
947
948         /* Setup enc. key */
949         hw_desc_init(&desc[idx]);
950         set_cipher_config0(&desc[idx], direct);
951         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
952         set_flow_mode(&desc[idx], ctx->flow_mode);
953         if (ctx->flow_mode == S_DIN_to_AES) {
954                 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
955                              ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
956                               ctx->enc_keylen), NS_BIT);
957                 set_key_size_aes(&desc[idx], ctx->enc_keylen);
958         } else {
959                 set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
960                              ctx->enc_keylen, NS_BIT);
961                 set_key_size_des(&desc[idx], ctx->enc_keylen);
962         }
963         set_cipher_mode(&desc[idx], ctx->cipher_mode);
964         idx++;
965
966         *seq_size = idx;
967 }
968
969 static inline void ssi_aead_process_cipher(
970         struct aead_request *req,
971         struct cc_hw_desc desc[],
972         unsigned int *seq_size,
973         unsigned int data_flow_mode)
974 {
975         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
976         int direct = req_ctx->gen_ctx.op_type;
977         unsigned int idx = *seq_size;
978
979         if (req_ctx->cryptlen == 0)
980                 return; /*null processing*/
981
982         ssi_aead_setup_cipher_desc(req, desc, &idx);
983         ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, &idx);
984         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
985                 /* We must wait for DMA to write all cipher */
986                 hw_desc_init(&desc[idx]);
987                 set_din_no_dma(&desc[idx], 0, 0xfffff0);
988                 set_dout_no_dma(&desc[idx], 0, 0, 1);
989                 idx++;
990         }
991
992         *seq_size = idx;
993 }
994
995 static inline void ssi_aead_hmac_setup_digest_desc(
996         struct aead_request *req,
997         struct cc_hw_desc desc[],
998         unsigned int *seq_size)
999 {
1000         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1001         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1002         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1003                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1004         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1005                                 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1006         unsigned int idx = *seq_size;
1007
1008         /* Loading hash ipad xor key state */
1009         hw_desc_init(&desc[idx]);
1010         set_cipher_mode(&desc[idx], hash_mode);
1011         set_din_type(&desc[idx], DMA_DLLI,
1012                      ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
1013                      NS_BIT);
1014         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1015         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1016         idx++;
1017
1018         /* Load init. digest len (64 bytes) */
1019         hw_desc_init(&desc[idx]);
1020         set_cipher_mode(&desc[idx], hash_mode);
1021         set_din_sram(&desc[idx],
1022                      ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata,
1023                                                                 hash_mode),
1024                                                                 HASH_LEN_SIZE);
1025         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1026         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1027         idx++;
1028
1029         *seq_size = idx;
1030 }
1031
1032 static inline void ssi_aead_xcbc_setup_digest_desc(
1033         struct aead_request *req,
1034         struct cc_hw_desc desc[],
1035         unsigned int *seq_size)
1036 {
1037         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1038         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1039         unsigned int idx = *seq_size;
1040
1041         /* Loading MAC state */
1042         hw_desc_init(&desc[idx]);
1043         set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1044         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1045         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1046         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1047         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1048         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1049         set_aes_not_hash_mode(&desc[idx]);
1050         idx++;
1051
1052         /* Setup XCBC MAC K1 */
1053         hw_desc_init(&desc[idx]);
1054         set_din_type(&desc[idx], DMA_DLLI,
1055                      ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1056                      AES_KEYSIZE_128, NS_BIT);
1057         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1058         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1059         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1060         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1061         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1062         set_aes_not_hash_mode(&desc[idx]);
1063         idx++;
1064
1065         /* Setup XCBC MAC K2 */
1066         hw_desc_init(&desc[idx]);
1067         set_din_type(&desc[idx], DMA_DLLI,
1068                      (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1069                       AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1070         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1071         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1072         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1073         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1074         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1075         set_aes_not_hash_mode(&desc[idx]);
1076         idx++;
1077
1078         /* Setup XCBC MAC K3 */
1079         hw_desc_init(&desc[idx]);
1080         set_din_type(&desc[idx], DMA_DLLI,
1081                      (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1082                       2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1083         set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1084         set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1085         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1086         set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1087         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1088         set_aes_not_hash_mode(&desc[idx]);
1089         idx++;
1090
1091         *seq_size = idx;
1092 }
1093
1094 static inline void ssi_aead_process_digest_header_desc(
1095         struct aead_request *req,
1096         struct cc_hw_desc desc[],
1097         unsigned int *seq_size)
1098 {
1099         unsigned int idx = *seq_size;
1100         /* Hash associated data */
1101         if (req->assoclen > 0)
1102                 ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
1103
1104         /* Hash IV */
1105         *seq_size = idx;
1106 }
1107
1108 static inline void ssi_aead_process_digest_scheme_desc(
1109         struct aead_request *req,
1110         struct cc_hw_desc desc[],
1111         unsigned int *seq_size)
1112 {
1113         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1114         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1115         struct ssi_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1116         unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1117                                 DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1118         unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1119                                 CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1120         unsigned int idx = *seq_size;
1121
1122         hw_desc_init(&desc[idx]);
1123         set_cipher_mode(&desc[idx], hash_mode);
1124         set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1125                       HASH_LEN_SIZE);
1126         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1127         set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1128         set_cipher_do(&desc[idx], DO_PAD);
1129         idx++;
1130
1131         /* Get final ICV result */
1132         hw_desc_init(&desc[idx]);
1133         set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1134                       digest_size);
1135         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1136         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1137         set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1138         set_cipher_mode(&desc[idx], hash_mode);
1139         idx++;
1140
1141         /* Loading hash opad xor key state */
1142         hw_desc_init(&desc[idx]);
1143         set_cipher_mode(&desc[idx], hash_mode);
1144         set_din_type(&desc[idx], DMA_DLLI,
1145                      (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1146                      digest_size, NS_BIT);
1147         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1148         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1149         idx++;
1150
1151         /* Load init. digest len (64 bytes) */
1152         hw_desc_init(&desc[idx]);
1153         set_cipher_mode(&desc[idx], hash_mode);
1154         set_din_sram(&desc[idx],
1155                      ssi_ahash_get_initial_digest_len_sram_addr(ctx->drvdata,
1156                                                                 hash_mode),
1157                      HASH_LEN_SIZE);
1158         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1159         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1160         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1161         idx++;
1162
1163         /* Perform HASH update */
1164         hw_desc_init(&desc[idx]);
1165         set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1166                      digest_size);
1167         set_flow_mode(&desc[idx], DIN_HASH);
1168         idx++;
1169
1170         *seq_size = idx;
1171 }
1172
1173 static inline void ssi_aead_load_mlli_to_sram(
1174         struct aead_request *req,
1175         struct cc_hw_desc desc[],
1176         unsigned int *seq_size)
1177 {
1178         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1179         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1180         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1181
1182         if (unlikely(
1183                 (req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
1184                 (req_ctx->data_buff_type == SSI_DMA_BUF_MLLI) ||
1185                 !req_ctx->is_single_pass)) {
1186                 SSI_LOG_DEBUG("Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1187                               (unsigned int)ctx->drvdata->mlli_sram_addr,
1188                               req_ctx->mlli_params.mlli_len);
1189                 /* Copy MLLI table host-to-sram */
1190                 hw_desc_init(&desc[*seq_size]);
1191                 set_din_type(&desc[*seq_size], DMA_DLLI,
1192                              req_ctx->mlli_params.mlli_dma_addr,
1193                              req_ctx->mlli_params.mlli_len, NS_BIT);
1194                 set_dout_sram(&desc[*seq_size],
1195                               ctx->drvdata->mlli_sram_addr,
1196                               req_ctx->mlli_params.mlli_len);
1197                 set_flow_mode(&desc[*seq_size], BYPASS);
1198                 (*seq_size)++;
1199         }
1200 }
1201
1202 static inline enum cc_flow_mode ssi_aead_get_data_flow_mode(
1203         enum drv_crypto_direction direct,
1204         enum cc_flow_mode setup_flow_mode,
1205         bool is_single_pass)
1206 {
1207         enum cc_flow_mode data_flow_mode;
1208
1209         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1210                 if (setup_flow_mode == S_DIN_to_AES)
1211                         data_flow_mode = likely(is_single_pass) ?
1212                                 AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1213                 else
1214                         data_flow_mode = likely(is_single_pass) ?
1215                                 DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1216         } else { /* Decrypt */
1217                 if (setup_flow_mode == S_DIN_to_AES)
1218                         data_flow_mode = likely(is_single_pass) ?
1219                                         AES_and_HASH : DIN_AES_DOUT;
1220                 else
1221                         data_flow_mode = likely(is_single_pass) ?
1222                                         DES_and_HASH : DIN_DES_DOUT;
1223         }
1224
1225         return data_flow_mode;
1226 }
1227
1228 static inline void ssi_aead_hmac_authenc(
1229         struct aead_request *req,
1230         struct cc_hw_desc desc[],
1231         unsigned int *seq_size)
1232 {
1233         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1234         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1235         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1236         int direct = req_ctx->gen_ctx.op_type;
1237         unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
1238                 direct, ctx->flow_mode, req_ctx->is_single_pass);
1239
1240         if (req_ctx->is_single_pass) {
1241                 /**
1242                  * Single-pass flow
1243                  */
1244                 ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
1245                 ssi_aead_setup_cipher_desc(req, desc, seq_size);
1246                 ssi_aead_process_digest_header_desc(req, desc, seq_size);
1247                 ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
1248                 ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
1249                 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1250                 return;
1251         }
1252
1253         /**
1254          * Double-pass flow
1255          * Fallback for unsupported single-pass modes,
1256          * i.e. using assoc. data of non-word-multiple
1257          */
1258         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1259                 /* encrypt first.. */
1260                 ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
1261                 /* authenc after..*/
1262                 ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
1263                 ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
1264                 ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
1265                 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1266
1267         } else { /*DECRYPT*/
1268                 /* authenc first..*/
1269                 ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
1270                 ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
1271                 ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
1272                 /* decrypt after.. */
1273                 ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
1274                 /* read the digest result with setting the completion bit
1275                  * must be after the cipher operation
1276                  */
1277                 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1278         }
1279 }
1280
1281 static inline void
1282 ssi_aead_xcbc_authenc(
1283         struct aead_request *req,
1284         struct cc_hw_desc desc[],
1285         unsigned int *seq_size)
1286 {
1287         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1288         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1289         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1290         int direct = req_ctx->gen_ctx.op_type;
1291         unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
1292                 direct, ctx->flow_mode, req_ctx->is_single_pass);
1293
1294         if (req_ctx->is_single_pass) {
1295                 /**
1296                  * Single-pass flow
1297                  */
1298                 ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
1299                 ssi_aead_setup_cipher_desc(req, desc, seq_size);
1300                 ssi_aead_process_digest_header_desc(req, desc, seq_size);
1301                 ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, seq_size);
1302                 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1303                 return;
1304         }
1305
1306         /**
1307          * Double-pass flow
1308          * Fallback for unsupported single-pass modes,
1309          * i.e. using assoc. data of non-word-multiple
1310          */
1311         if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1312                 /* encrypt first.. */
1313                 ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
1314                 /* authenc after.. */
1315                 ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
1316                 ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
1317                 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1318         } else { /*DECRYPT*/
1319                 /* authenc first.. */
1320                 ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
1321                 ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc, seq_size, direct);
1322                 /* decrypt after..*/
1323                 ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
1324                 /* read the digest result with setting the completion bit
1325                  * must be after the cipher operation
1326                  */
1327                 ssi_aead_process_digest_result_desc(req, desc, seq_size);
1328         }
1329 }
1330
1331 static int validate_data_size(struct ssi_aead_ctx *ctx,
1332                               enum drv_crypto_direction direct,
1333                               struct aead_request *req)
1334 {
1335         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1336         unsigned int assoclen = req->assoclen;
1337         unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1338                         (req->cryptlen - ctx->authsize) : req->cryptlen;
1339
1340         if (unlikely((direct == DRV_CRYPTO_DIRECTION_DECRYPT) &&
1341                      (req->cryptlen < ctx->authsize)))
1342                 goto data_size_err;
1343
1344         areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1345
1346         switch (ctx->flow_mode) {
1347         case S_DIN_to_AES:
1348                 if (unlikely((ctx->cipher_mode == DRV_CIPHER_CBC) &&
1349                              !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
1350                         goto data_size_err;
1351                 if (ctx->cipher_mode == DRV_CIPHER_CCM)
1352                         break;
1353                 if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1354                         if (areq_ctx->plaintext_authenticate_only)
1355                                 areq_ctx->is_single_pass = false;
1356                         break;
1357                 }
1358
1359                 if (!IS_ALIGNED(assoclen, sizeof(u32)))
1360                         areq_ctx->is_single_pass = false;
1361
1362                 if ((ctx->cipher_mode == DRV_CIPHER_CTR) &&
1363                     !IS_ALIGNED(cipherlen, sizeof(u32)))
1364                         areq_ctx->is_single_pass = false;
1365
1366                 break;
1367         case S_DIN_to_DES:
1368                 if (unlikely(!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE)))
1369                         goto data_size_err;
1370                 if (unlikely(!IS_ALIGNED(assoclen, DES_BLOCK_SIZE)))
1371                         areq_ctx->is_single_pass = false;
1372                 break;
1373         default:
1374                 SSI_LOG_ERR("Unexpected flow mode (%d)\n", ctx->flow_mode);
1375                 goto data_size_err;
1376         }
1377
1378         return 0;
1379
1380 data_size_err:
1381         return -EINVAL;
1382 }
1383
1384 #if SSI_CC_HAS_AES_CCM
1385 static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1386 {
1387         unsigned int len = 0;
1388
1389         if (header_size == 0)
1390                 return 0;
1391
1392         if (header_size < ((1UL << 16) - (1UL << 8))) {
1393                 len = 2;
1394
1395                 pa0_buff[0] = (header_size >> 8) & 0xFF;
1396                 pa0_buff[1] = header_size & 0xFF;
1397         } else {
1398                 len = 6;
1399
1400                 pa0_buff[0] = 0xFF;
1401                 pa0_buff[1] = 0xFE;
1402                 pa0_buff[2] = (header_size >> 24) & 0xFF;
1403                 pa0_buff[3] = (header_size >> 16) & 0xFF;
1404                 pa0_buff[4] = (header_size >> 8) & 0xFF;
1405                 pa0_buff[5] = header_size & 0xFF;
1406         }
1407
1408         return len;
1409 }
1410
1411 static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1412 {
1413         __be32 data;
1414
1415         memset(block, 0, csize);
1416         block += csize;
1417
1418         if (csize >= 4)
1419                 csize = 4;
1420         else if (msglen > (1 << (8 * csize)))
1421                 return -EOVERFLOW;
1422
1423         data = cpu_to_be32(msglen);
1424         memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1425
1426         return 0;
1427 }
1428
1429 static inline int ssi_aead_ccm(
1430         struct aead_request *req,
1431         struct cc_hw_desc desc[],
1432         unsigned int *seq_size)
1433 {
1434         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1435         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1436         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1437         unsigned int idx = *seq_size;
1438         unsigned int cipher_flow_mode;
1439         dma_addr_t mac_result;
1440
1441         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1442                 cipher_flow_mode = AES_to_HASH_and_DOUT;
1443                 mac_result = req_ctx->mac_buf_dma_addr;
1444         } else { /* Encrypt */
1445                 cipher_flow_mode = AES_and_HASH;
1446                 mac_result = req_ctx->icv_dma_addr;
1447         }
1448
1449         /* load key */
1450         hw_desc_init(&desc[idx]);
1451         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1452         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1453                      ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1454                       ctx->enc_keylen), NS_BIT);
1455         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1456         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1457         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1458         set_flow_mode(&desc[idx], S_DIN_to_AES);
1459         idx++;
1460
1461         /* load ctr state */
1462         hw_desc_init(&desc[idx]);
1463         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1464         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1465         set_din_type(&desc[idx], DMA_DLLI,
1466                      req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1467         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1468         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1469         set_flow_mode(&desc[idx], S_DIN_to_AES);
1470         idx++;
1471
1472         /* load MAC key */
1473         hw_desc_init(&desc[idx]);
1474         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1475         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1476                      ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1477                       ctx->enc_keylen), NS_BIT);
1478         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1479         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1480         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1481         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1482         set_aes_not_hash_mode(&desc[idx]);
1483         idx++;
1484
1485         /* load MAC state */
1486         hw_desc_init(&desc[idx]);
1487         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1488         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1489         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1490                      AES_BLOCK_SIZE, NS_BIT);
1491         set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1492         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1493         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1494         set_aes_not_hash_mode(&desc[idx]);
1495         idx++;
1496
1497         /* process assoc data */
1498         if (req->assoclen > 0) {
1499                 ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
1500         } else {
1501                 hw_desc_init(&desc[idx]);
1502                 set_din_type(&desc[idx], DMA_DLLI,
1503                              sg_dma_address(&req_ctx->ccm_adata_sg),
1504                              AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1505                 set_flow_mode(&desc[idx], DIN_HASH);
1506                 idx++;
1507         }
1508
1509         /* process the cipher */
1510         if (req_ctx->cryptlen != 0)
1511                 ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, &idx);
1512
1513         /* Read temporal MAC */
1514         hw_desc_init(&desc[idx]);
1515         set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1516         set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1517                       NS_BIT, 0);
1518         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1519         set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1520         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1521         set_aes_not_hash_mode(&desc[idx]);
1522         idx++;
1523
1524         /* load AES-CTR state (for last MAC calculation)*/
1525         hw_desc_init(&desc[idx]);
1526         set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1527         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1528         set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1529                      AES_BLOCK_SIZE, NS_BIT);
1530         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1531         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1532         set_flow_mode(&desc[idx], S_DIN_to_AES);
1533         idx++;
1534
1535         hw_desc_init(&desc[idx]);
1536         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1537         set_dout_no_dma(&desc[idx], 0, 0, 1);
1538         idx++;
1539
1540         /* encrypt the "T" value and store MAC in mac_state */
1541         hw_desc_init(&desc[idx]);
1542         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1543                      ctx->authsize, NS_BIT);
1544         set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1545         set_queue_last_ind(&desc[idx]);
1546         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1547         idx++;
1548
1549         *seq_size = idx;
1550         return 0;
1551 }
1552
1553 static int config_ccm_adata(struct aead_request *req)
1554 {
1555         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1556         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1557         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1558         //unsigned int size_of_a = 0, rem_a_size = 0;
1559         unsigned int lp = req->iv[0];
1560         /* Note: The code assume that req->iv[0] already contains the value of L' of RFC3610 */
1561         unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
1562         unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
1563         u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1564         u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1565         u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1566         unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1567                                  DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1568                                 req->cryptlen :
1569                                 (req->cryptlen - ctx->authsize);
1570         int rc;
1571
1572         memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1573         memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1574
1575         /* taken from crypto/ccm.c */
1576         /* 2 <= L <= 8, so 1 <= L' <= 7. */
1577         if (l < 2 || l > 8) {
1578                 SSI_LOG_ERR("illegal iv value %X\n", req->iv[0]);
1579                 return -EINVAL;
1580         }
1581         memcpy(b0, req->iv, AES_BLOCK_SIZE);
1582
1583         /* format control info per RFC 3610 and
1584          * NIST Special Publication 800-38C
1585          */
1586         *b0 |= (8 * ((m - 2) / 2));
1587         if (req->assoclen > 0)
1588                 *b0 |= 64;  /* Enable bit 6 if Adata exists. */
1589
1590         rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
1591         if (rc != 0)
1592                 return rc;
1593          /* END of "taken from crypto/ccm.c" */
1594
1595         /* l(a) - size of associated data. */
1596         req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
1597
1598         memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1599         req->iv[15] = 1;
1600
1601         memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1602         ctr_count_0[15] = 0;
1603
1604         return 0;
1605 }
1606
1607 static void ssi_rfc4309_ccm_process(struct aead_request *req)
1608 {
1609         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1610         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1611         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1612
1613         /* L' */
1614         memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1615         areq_ctx->ctr_iv[0] = 3;  /* For RFC 4309, always use 4 bytes for message length (at most 2^32-1 bytes). */
1616
1617         /* In RFC 4309 there is an 11-bytes nonce+IV part, that we build here. */
1618         memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce, CCM_BLOCK_NONCE_SIZE);
1619         memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET,    req->iv,        CCM_BLOCK_IV_SIZE);
1620         req->iv = areq_ctx->ctr_iv;
1621         req->assoclen -= CCM_BLOCK_IV_SIZE;
1622 }
1623 #endif /*SSI_CC_HAS_AES_CCM*/
1624
1625 #if SSI_CC_HAS_AES_GCM
1626
1627 static inline void ssi_aead_gcm_setup_ghash_desc(
1628         struct aead_request *req,
1629         struct cc_hw_desc desc[],
1630         unsigned int *seq_size)
1631 {
1632         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1633         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1634         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1635         unsigned int idx = *seq_size;
1636
1637         /* load key to AES*/
1638         hw_desc_init(&desc[idx]);
1639         set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1640         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1641         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1642                      ctx->enc_keylen, NS_BIT);
1643         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1644         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1645         set_flow_mode(&desc[idx], S_DIN_to_AES);
1646         idx++;
1647
1648         /* process one zero block to generate hkey */
1649         hw_desc_init(&desc[idx]);
1650         set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1651         set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1652                       NS_BIT, 0);
1653         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1654         idx++;
1655
1656         /* Memory Barrier */
1657         hw_desc_init(&desc[idx]);
1658         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1659         set_dout_no_dma(&desc[idx], 0, 0, 1);
1660         idx++;
1661
1662         /* Load GHASH subkey */
1663         hw_desc_init(&desc[idx]);
1664         set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1665                      AES_BLOCK_SIZE, NS_BIT);
1666         set_dout_no_dma(&desc[idx], 0, 0, 1);
1667         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1668         set_aes_not_hash_mode(&desc[idx]);
1669         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1670         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1671         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1672         idx++;
1673
1674         /* Configure Hash Engine to work with GHASH.
1675          * Since it was not possible to extend HASH submodes to add GHASH,
1676          * The following command is necessary in order to
1677          * select GHASH (according to HW designers)
1678          */
1679         hw_desc_init(&desc[idx]);
1680         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1681         set_dout_no_dma(&desc[idx], 0, 0, 1);
1682         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1683         set_aes_not_hash_mode(&desc[idx]);
1684         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1685         set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1686         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1687         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1688         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1689         idx++;
1690
1691         /* Load GHASH initial STATE (which is 0). (for any hash there is an initial state) */
1692         hw_desc_init(&desc[idx]);
1693         set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1694         set_dout_no_dma(&desc[idx], 0, 0, 1);
1695         set_flow_mode(&desc[idx], S_DIN_to_HASH);
1696         set_aes_not_hash_mode(&desc[idx]);
1697         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1698         set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1699         set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1700         idx++;
1701
1702         *seq_size = idx;
1703 }
1704
1705 static inline void ssi_aead_gcm_setup_gctr_desc(
1706         struct aead_request *req,
1707         struct cc_hw_desc desc[],
1708         unsigned int *seq_size)
1709 {
1710         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1711         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1712         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1713         unsigned int idx = *seq_size;
1714
1715         /* load key to AES*/
1716         hw_desc_init(&desc[idx]);
1717         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1718         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1719         set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1720                      ctx->enc_keylen, NS_BIT);
1721         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1722         set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1723         set_flow_mode(&desc[idx], S_DIN_to_AES);
1724         idx++;
1725
1726         if ((req_ctx->cryptlen != 0) && (!req_ctx->plaintext_authenticate_only)) {
1727                 /* load AES/CTR initial CTR value inc by 2*/
1728                 hw_desc_init(&desc[idx]);
1729                 set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1730                 set_key_size_aes(&desc[idx], ctx->enc_keylen);
1731                 set_din_type(&desc[idx], DMA_DLLI,
1732                              req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1733                              NS_BIT);
1734                 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1735                 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1736                 set_flow_mode(&desc[idx], S_DIN_to_AES);
1737                 idx++;
1738         }
1739
1740         *seq_size = idx;
1741 }
1742
1743 static inline void ssi_aead_process_gcm_result_desc(
1744         struct aead_request *req,
1745         struct cc_hw_desc desc[],
1746         unsigned int *seq_size)
1747 {
1748         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1749         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1750         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1751         dma_addr_t mac_result;
1752         unsigned int idx = *seq_size;
1753
1754         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1755                 mac_result = req_ctx->mac_buf_dma_addr;
1756         } else { /* Encrypt */
1757                 mac_result = req_ctx->icv_dma_addr;
1758         }
1759
1760         /* process(ghash) gcm_block_len */
1761         hw_desc_init(&desc[idx]);
1762         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1763                      AES_BLOCK_SIZE, NS_BIT);
1764         set_flow_mode(&desc[idx], DIN_HASH);
1765         idx++;
1766
1767         /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1768         hw_desc_init(&desc[idx]);
1769         set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1770         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1771         set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1772                       NS_BIT, 0);
1773         set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1774         set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1775         set_aes_not_hash_mode(&desc[idx]);
1776
1777         idx++;
1778
1779         /* load AES/CTR initial CTR value inc by 1*/
1780         hw_desc_init(&desc[idx]);
1781         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1782         set_key_size_aes(&desc[idx], ctx->enc_keylen);
1783         set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1784                      AES_BLOCK_SIZE, NS_BIT);
1785         set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1786         set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1787         set_flow_mode(&desc[idx], S_DIN_to_AES);
1788         idx++;
1789
1790         /* Memory Barrier */
1791         hw_desc_init(&desc[idx]);
1792         set_din_no_dma(&desc[idx], 0, 0xfffff0);
1793         set_dout_no_dma(&desc[idx], 0, 0, 1);
1794         idx++;
1795
1796         /* process GCTR on stored GHASH and store MAC in mac_state*/
1797         hw_desc_init(&desc[idx]);
1798         set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1799         set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1800                      AES_BLOCK_SIZE, NS_BIT);
1801         set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1802         set_queue_last_ind(&desc[idx]);
1803         set_flow_mode(&desc[idx], DIN_AES_DOUT);
1804         idx++;
1805
1806         *seq_size = idx;
1807 }
1808
1809 static inline int ssi_aead_gcm(
1810         struct aead_request *req,
1811         struct cc_hw_desc desc[],
1812         unsigned int *seq_size)
1813 {
1814         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1815         unsigned int idx = *seq_size;
1816         unsigned int cipher_flow_mode;
1817
1818         if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1819                 cipher_flow_mode = AES_and_HASH;
1820         } else { /* Encrypt */
1821                 cipher_flow_mode = AES_to_HASH_and_DOUT;
1822         }
1823
1824         //in RFC4543 no data to encrypt. just copy data from src to dest.
1825         if (req_ctx->plaintext_authenticate_only) {
1826                 ssi_aead_process_cipher_data_desc(req, BYPASS, desc, seq_size);
1827                 ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
1828                 /* process(ghash) assoc data */
1829                 ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
1830                 ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
1831                 ssi_aead_process_gcm_result_desc(req, desc, seq_size);
1832                 idx = *seq_size;
1833                 return 0;
1834         }
1835
1836         // for gcm and rfc4106.
1837         ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
1838         /* process(ghash) assoc data */
1839         if (req->assoclen > 0)
1840                 ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
1841         ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
1842         /* process(gctr+ghash) */
1843         if (req_ctx->cryptlen != 0)
1844                 ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, seq_size);
1845         ssi_aead_process_gcm_result_desc(req, desc, seq_size);
1846
1847         idx = *seq_size;
1848         return 0;
1849 }
1850
1851 #ifdef CC_DEBUG
1852 static inline void ssi_aead_dump_gcm(
1853         const char *title,
1854         struct aead_request *req)
1855 {
1856         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1857         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1858         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1859
1860         if (ctx->cipher_mode != DRV_CIPHER_GCTR)
1861                 return;
1862
1863         if (title) {
1864                 SSI_LOG_DEBUG("----------------------------------------------------------------------------------");
1865                 SSI_LOG_DEBUG("%s\n", title);
1866         }
1867
1868         SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n",
1869                       ctx->cipher_mode, ctx->authsize, ctx->enc_keylen,
1870                       req->assoclen, req_ctx->cryptlen);
1871
1872         if (ctx->enckey)
1873                 dump_byte_array("mac key", ctx->enckey, 16);
1874
1875         dump_byte_array("req->iv", req->iv, AES_BLOCK_SIZE);
1876
1877         dump_byte_array("gcm_iv_inc1", req_ctx->gcm_iv_inc1, AES_BLOCK_SIZE);
1878
1879         dump_byte_array("gcm_iv_inc2", req_ctx->gcm_iv_inc2, AES_BLOCK_SIZE);
1880
1881         dump_byte_array("hkey", req_ctx->hkey, AES_BLOCK_SIZE);
1882
1883         dump_byte_array("mac_buf", req_ctx->mac_buf, AES_BLOCK_SIZE);
1884
1885         dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.len_a, AES_BLOCK_SIZE);
1886
1887         if (req->src && req->cryptlen)
1888                 dump_byte_array("req->src", sg_virt(req->src), req->cryptlen + req->assoclen);
1889
1890         if (req->dst)
1891                 dump_byte_array("req->dst", sg_virt(req->dst), req->cryptlen + ctx->authsize + req->assoclen);
1892 }
1893 #endif
1894
1895 static int config_gcm_context(struct aead_request *req)
1896 {
1897         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1898         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1899         struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1900
1901         unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1902                                  DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1903                                 req->cryptlen :
1904                                 (req->cryptlen - ctx->authsize);
1905         __be32 counter = cpu_to_be32(2);
1906
1907         SSI_LOG_DEBUG("%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n", __func__, cryptlen, req->assoclen, ctx->authsize);
1908
1909         memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1910
1911         memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1912
1913         memcpy(req->iv + 12, &counter, 4);
1914         memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1915
1916         counter = cpu_to_be32(1);
1917         memcpy(req->iv + 12, &counter, 4);
1918         memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1919
1920         if (!req_ctx->plaintext_authenticate_only) {
1921                 __be64 temp64;
1922
1923                 temp64 = cpu_to_be64(req->assoclen * 8);
1924                 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1925                 temp64 = cpu_to_be64(cryptlen * 8);
1926                 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1927         } else { //rfc4543=>  all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted.
1928                 __be64 temp64;
1929
1930                 temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
1931                 memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1932                 temp64 = 0;
1933                 memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1934         }
1935
1936         return 0;
1937 }
1938
1939 static void ssi_rfc4_gcm_process(struct aead_request *req)
1940 {
1941         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1942         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1943         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1944
1945         memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET, ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1946         memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET,    req->iv, GCM_BLOCK_RFC4_IV_SIZE);
1947         req->iv = areq_ctx->ctr_iv;
1948         req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1949 }
1950
1951 #endif /*SSI_CC_HAS_AES_GCM*/
1952
1953 static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction direct)
1954 {
1955         int rc = 0;
1956         int seq_len = 0;
1957         struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1958         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1959         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
1960         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1961         struct device *dev = &ctx->drvdata->plat_dev->dev;
1962         struct ssi_crypto_req ssi_req = {};
1963
1964         SSI_LOG_DEBUG("%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1965                       ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"),
1966                       ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1967                       sg_virt(req->dst), req->dst->offset, req->cryptlen);
1968
1969         /* STAT_PHASE_0: Init and sanity checks */
1970
1971         /* Check data length according to mode */
1972         if (unlikely(validate_data_size(ctx, direct, req) != 0)) {
1973                 SSI_LOG_ERR("Unsupported crypt/assoc len %d/%d.\n",
1974                             req->cryptlen, req->assoclen);
1975                 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
1976                 return -EINVAL;
1977         }
1978
1979         /* Setup DX request structure */
1980         ssi_req.user_cb = (void *)ssi_aead_complete;
1981         ssi_req.user_arg = (void *)req;
1982
1983         /* Setup request context */
1984         areq_ctx->gen_ctx.op_type = direct;
1985         areq_ctx->req_authsize = ctx->authsize;
1986         areq_ctx->cipher_mode = ctx->cipher_mode;
1987
1988         /* STAT_PHASE_1: Map buffers */
1989
1990         if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1991                 /* Build CTR IV - Copy nonce from last 4 bytes in
1992                  * CTR key to first 4 bytes in CTR IV
1993                  */
1994                 memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE);
1995                 if (!areq_ctx->backup_giv) /*User none-generated IV*/
1996                         memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
1997                                req->iv, CTR_RFC3686_IV_SIZE);
1998                 /* Initialize counter portion of counter block */
1999                 *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
2000                             CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2001
2002                 /* Replace with counter iv */
2003                 req->iv = areq_ctx->ctr_iv;
2004                 areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
2005         } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
2006                    (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
2007                 areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
2008                 if (areq_ctx->ctr_iv != req->iv) {
2009                         memcpy(areq_ctx->ctr_iv, req->iv, crypto_aead_ivsize(tfm));
2010                         req->iv = areq_ctx->ctr_iv;
2011                 }
2012         }  else {
2013                 areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
2014         }
2015
2016 #if SSI_CC_HAS_AES_CCM
2017         if (ctx->cipher_mode == DRV_CIPHER_CCM) {
2018                 rc = config_ccm_adata(req);
2019                 if (unlikely(rc != 0)) {
2020                         SSI_LOG_ERR("config_ccm_adata() returned with a failure %d!", rc);
2021                         goto exit;
2022                 }
2023         } else {
2024                 areq_ctx->ccm_hdr_size = ccm_header_size_null;
2025         }
2026 #else
2027         areq_ctx->ccm_hdr_size = ccm_header_size_null;
2028 #endif /*SSI_CC_HAS_AES_CCM*/
2029
2030 #if SSI_CC_HAS_AES_GCM
2031         if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
2032                 rc = config_gcm_context(req);
2033                 if (unlikely(rc != 0)) {
2034                         SSI_LOG_ERR("config_gcm_context() returned with a failure %d!", rc);
2035                         goto exit;
2036                 }
2037         }
2038 #endif /*SSI_CC_HAS_AES_GCM*/
2039
2040         rc = ssi_buffer_mgr_map_aead_request(ctx->drvdata, req);
2041         if (unlikely(rc != 0)) {
2042                 SSI_LOG_ERR("map_request() failed\n");
2043                 goto exit;
2044         }
2045
2046         /* do we need to generate IV? */
2047         if (areq_ctx->backup_giv) {
2048                 /* set the DMA mapped IV address*/
2049                 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
2050                         ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CTR_RFC3686_NONCE_SIZE;
2051                         ssi_req.ivgen_dma_addr_len = 1;
2052                 } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
2053                         /* In ccm, the IV needs to exist both inside B0 and inside the counter.
2054                          * It is also copied to iv_dma_addr for other reasons (like returning
2055                          * it to the user).
2056                          * So, using 3 (identical) IV outputs.
2057                          */
2058                         ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CCM_BLOCK_IV_OFFSET;
2059                         ssi_req.ivgen_dma_addr[1] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_B0_OFFSET          + CCM_BLOCK_IV_OFFSET;
2060                         ssi_req.ivgen_dma_addr[2] = sg_dma_address(&areq_ctx->ccm_adata_sg) + CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
2061                         ssi_req.ivgen_dma_addr_len = 3;
2062                 } else {
2063                         ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr;
2064                         ssi_req.ivgen_dma_addr_len = 1;
2065                 }
2066
2067                 /* set the IV size (8/16 B long)*/
2068                 ssi_req.ivgen_size = crypto_aead_ivsize(tfm);
2069         }
2070
2071         /* STAT_PHASE_2: Create sequence */
2072
2073         /* Load MLLI tables to SRAM if necessary */
2074         ssi_aead_load_mlli_to_sram(req, desc, &seq_len);
2075
2076         /*TODO: move seq len by reference */
2077         switch (ctx->auth_mode) {
2078         case DRV_HASH_SHA1:
2079         case DRV_HASH_SHA256:
2080                 ssi_aead_hmac_authenc(req, desc, &seq_len);
2081                 break;
2082         case DRV_HASH_XCBC_MAC:
2083                 ssi_aead_xcbc_authenc(req, desc, &seq_len);
2084                 break;
2085 #if (SSI_CC_HAS_AES_CCM || SSI_CC_HAS_AES_GCM)
2086         case DRV_HASH_NULL:
2087 #if SSI_CC_HAS_AES_CCM
2088                 if (ctx->cipher_mode == DRV_CIPHER_CCM)
2089                         ssi_aead_ccm(req, desc, &seq_len);
2090 #endif /*SSI_CC_HAS_AES_CCM*/
2091 #if SSI_CC_HAS_AES_GCM
2092                 if (ctx->cipher_mode == DRV_CIPHER_GCTR)
2093                         ssi_aead_gcm(req, desc, &seq_len);
2094 #endif /*SSI_CC_HAS_AES_GCM*/
2095                         break;
2096 #endif
2097         default:
2098                 SSI_LOG_ERR("Unsupported authenc (%d)\n", ctx->auth_mode);
2099                 ssi_buffer_mgr_unmap_aead_request(dev, req);
2100                 rc = -ENOTSUPP;
2101                 goto exit;
2102         }
2103
2104         /* STAT_PHASE_3: Lock HW and push sequence */
2105
2106         rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
2107
2108         if (unlikely(rc != -EINPROGRESS)) {
2109                 SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
2110                 ssi_buffer_mgr_unmap_aead_request(dev, req);
2111         }
2112
2113 exit:
2114         return rc;
2115 }
2116
2117 static int ssi_aead_encrypt(struct aead_request *req)
2118 {
2119         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2120         int rc;
2121
2122         /* No generated IV required */
2123         areq_ctx->backup_iv = req->iv;
2124         areq_ctx->backup_giv = NULL;
2125         areq_ctx->is_gcm4543 = false;
2126
2127         areq_ctx->plaintext_authenticate_only = false;
2128
2129         rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2130         if (rc != -EINPROGRESS)
2131                 req->iv = areq_ctx->backup_iv;
2132
2133         return rc;
2134 }
2135
2136 #if SSI_CC_HAS_AES_CCM
2137 static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
2138 {
2139         /* Very similar to ssi_aead_encrypt() above. */
2140
2141         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2142         int rc = -EINVAL;
2143
2144         if (!valid_assoclen(req)) {
2145                 SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
2146                 goto out;
2147         }
2148
2149         /* No generated IV required */
2150         areq_ctx->backup_iv = req->iv;
2151         areq_ctx->backup_giv = NULL;
2152         areq_ctx->is_gcm4543 = true;
2153
2154         ssi_rfc4309_ccm_process(req);
2155
2156         rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2157         if (rc != -EINPROGRESS)
2158                 req->iv = areq_ctx->backup_iv;
2159 out:
2160         return rc;
2161 }
2162 #endif /* SSI_CC_HAS_AES_CCM */
2163
2164 static int ssi_aead_decrypt(struct aead_request *req)
2165 {
2166         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2167         int rc;
2168
2169         /* No generated IV required */
2170         areq_ctx->backup_iv = req->iv;
2171         areq_ctx->backup_giv = NULL;
2172         areq_ctx->is_gcm4543 = false;
2173
2174         areq_ctx->plaintext_authenticate_only = false;
2175
2176         rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2177         if (rc != -EINPROGRESS)
2178                 req->iv = areq_ctx->backup_iv;
2179
2180         return rc;
2181 }
2182
2183 #if SSI_CC_HAS_AES_CCM
2184 static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
2185 {
2186         /* Very similar to ssi_aead_decrypt() above. */
2187
2188         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2189         int rc = -EINVAL;
2190
2191         if (!valid_assoclen(req)) {
2192                 SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
2193                 goto out;
2194         }
2195
2196         /* No generated IV required */
2197         areq_ctx->backup_iv = req->iv;
2198         areq_ctx->backup_giv = NULL;
2199
2200         areq_ctx->is_gcm4543 = true;
2201         ssi_rfc4309_ccm_process(req);
2202
2203         rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2204         if (rc != -EINPROGRESS)
2205                 req->iv = areq_ctx->backup_iv;
2206
2207 out:
2208         return rc;
2209 }
2210 #endif /* SSI_CC_HAS_AES_CCM */
2211
2212 #if SSI_CC_HAS_AES_GCM
2213
2214 static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
2215 {
2216         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
2217         int rc = 0;
2218
2219         SSI_LOG_DEBUG("%s()  keylen %d, key %p\n", __func__, keylen, key);
2220
2221         if (keylen < 4)
2222                 return -EINVAL;
2223
2224         keylen -= 4;
2225         memcpy(ctx->ctr_nonce, key + keylen, 4);
2226
2227         rc = ssi_aead_setkey(tfm, key, keylen);
2228
2229         return rc;
2230 }
2231
2232 static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
2233 {
2234         struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
2235         int rc = 0;
2236
2237         SSI_LOG_DEBUG("%s()  keylen %d, key %p\n", __func__, keylen, key);
2238
2239         if (keylen < 4)
2240                 return -EINVAL;
2241
2242         keylen -= 4;
2243         memcpy(ctx->ctr_nonce, key + keylen, 4);
2244
2245         rc = ssi_aead_setkey(tfm, key, keylen);
2246
2247         return rc;
2248 }
2249
2250 static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
2251                                unsigned int authsize)
2252 {
2253         switch (authsize) {
2254         case 4:
2255         case 8:
2256         case 12:
2257         case 13:
2258         case 14:
2259         case 15:
2260         case 16:
2261                 break;
2262         default:
2263                 return -EINVAL;
2264         }
2265
2266         return ssi_aead_setauthsize(authenc, authsize);
2267 }
2268
2269 static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2270                                        unsigned int authsize)
2271 {
2272         SSI_LOG_DEBUG("authsize %d\n", authsize);
2273
2274         switch (authsize) {
2275         case 8:
2276         case 12:
2277         case 16:
2278                 break;
2279         default:
2280                 return -EINVAL;
2281         }
2282
2283         return ssi_aead_setauthsize(authenc, authsize);
2284 }
2285
2286 static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2287                                        unsigned int authsize)
2288 {
2289         SSI_LOG_DEBUG("authsize %d\n", authsize);
2290
2291         if (authsize != 16)
2292                 return -EINVAL;
2293
2294         return ssi_aead_setauthsize(authenc, authsize);
2295 }
2296
2297 static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
2298 {
2299         /* Very similar to ssi_aead_encrypt() above. */
2300
2301         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2302         int rc = -EINVAL;
2303
2304         if (!valid_assoclen(req)) {
2305                 SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
2306                 goto out;
2307         }
2308
2309         /* No generated IV required */
2310         areq_ctx->backup_iv = req->iv;
2311         areq_ctx->backup_giv = NULL;
2312
2313         areq_ctx->plaintext_authenticate_only = false;
2314
2315         ssi_rfc4_gcm_process(req);
2316         areq_ctx->is_gcm4543 = true;
2317
2318         rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2319         if (rc != -EINPROGRESS)
2320                 req->iv = areq_ctx->backup_iv;
2321 out:
2322         return rc;
2323 }
2324
2325 static int ssi_rfc4543_gcm_encrypt(struct aead_request *req)
2326 {
2327         /* Very similar to ssi_aead_encrypt() above. */
2328
2329         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2330         int rc;
2331
2332         //plaintext is not encryped with rfc4543
2333         areq_ctx->plaintext_authenticate_only = true;
2334
2335         /* No generated IV required */
2336         areq_ctx->backup_iv = req->iv;
2337         areq_ctx->backup_giv = NULL;
2338
2339         ssi_rfc4_gcm_process(req);
2340         areq_ctx->is_gcm4543 = true;
2341
2342         rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2343         if (rc != -EINPROGRESS)
2344                 req->iv = areq_ctx->backup_iv;
2345
2346         return rc;
2347 }
2348
2349 static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
2350 {
2351         /* Very similar to ssi_aead_decrypt() above. */
2352
2353         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2354         int rc = -EINVAL;
2355
2356         if (!valid_assoclen(req)) {
2357                 SSI_LOG_ERR("invalid Assoclen:%u\n", req->assoclen);
2358                 goto out;
2359         }
2360
2361         /* No generated IV required */
2362         areq_ctx->backup_iv = req->iv;
2363         areq_ctx->backup_giv = NULL;
2364
2365         areq_ctx->plaintext_authenticate_only = false;
2366
2367         ssi_rfc4_gcm_process(req);
2368         areq_ctx->is_gcm4543 = true;
2369
2370         rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2371         if (rc != -EINPROGRESS)
2372                 req->iv = areq_ctx->backup_iv;
2373 out:
2374         return rc;
2375 }
2376
2377 static int ssi_rfc4543_gcm_decrypt(struct aead_request *req)
2378 {
2379         /* Very similar to ssi_aead_decrypt() above. */
2380
2381         struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2382         int rc;
2383
2384         //plaintext is not decryped with rfc4543
2385         areq_ctx->plaintext_authenticate_only = true;
2386
2387         /* No generated IV required */
2388         areq_ctx->backup_iv = req->iv;
2389         areq_ctx->backup_giv = NULL;
2390
2391         ssi_rfc4_gcm_process(req);
2392         areq_ctx->is_gcm4543 = true;
2393
2394         rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2395         if (rc != -EINPROGRESS)
2396                 req->iv = areq_ctx->backup_iv;
2397
2398         return rc;
2399 }
2400 #endif /* SSI_CC_HAS_AES_GCM */
2401
2402 /* DX Block aead alg */
2403 static struct ssi_alg_template aead_algs[] = {
2404         {
2405                 .name = "authenc(hmac(sha1),cbc(aes))",
2406                 .driver_name = "authenc-hmac-sha1-cbc-aes-dx",
2407                 .blocksize = AES_BLOCK_SIZE,
2408                 .type = CRYPTO_ALG_TYPE_AEAD,
2409                 .template_aead = {
2410                         .setkey = ssi_aead_setkey,
2411                         .setauthsize = ssi_aead_setauthsize,
2412                         .encrypt = ssi_aead_encrypt,
2413                         .decrypt = ssi_aead_decrypt,
2414                         .init = ssi_aead_init,
2415                         .exit = ssi_aead_exit,
2416                         .ivsize = AES_BLOCK_SIZE,
2417                         .maxauthsize = SHA1_DIGEST_SIZE,
2418                 },
2419                 .cipher_mode = DRV_CIPHER_CBC,
2420                 .flow_mode = S_DIN_to_AES,
2421                 .auth_mode = DRV_HASH_SHA1,
2422         },
2423         {
2424                 .name = "authenc(hmac(sha1),cbc(des3_ede))",
2425                 .driver_name = "authenc-hmac-sha1-cbc-des3-dx",
2426                 .blocksize = DES3_EDE_BLOCK_SIZE,
2427                 .type = CRYPTO_ALG_TYPE_AEAD,
2428                 .template_aead = {
2429                         .setkey = ssi_aead_setkey,
2430                         .setauthsize = ssi_aead_setauthsize,
2431                         .encrypt = ssi_aead_encrypt,
2432                         .decrypt = ssi_aead_decrypt,
2433                         .init = ssi_aead_init,
2434                         .exit = ssi_aead_exit,
2435                         .ivsize = DES3_EDE_BLOCK_SIZE,
2436                         .maxauthsize = SHA1_DIGEST_SIZE,
2437                 },
2438                 .cipher_mode = DRV_CIPHER_CBC,
2439                 .flow_mode = S_DIN_to_DES,
2440                 .auth_mode = DRV_HASH_SHA1,
2441         },
2442         {
2443                 .name = "authenc(hmac(sha256),cbc(aes))",
2444                 .driver_name = "authenc-hmac-sha256-cbc-aes-dx",
2445                 .blocksize = AES_BLOCK_SIZE,
2446                 .type = CRYPTO_ALG_TYPE_AEAD,
2447                 .template_aead = {
2448                         .setkey = ssi_aead_setkey,
2449                         .setauthsize = ssi_aead_setauthsize,
2450                         .encrypt = ssi_aead_encrypt,
2451                         .decrypt = ssi_aead_decrypt,
2452                         .init = ssi_aead_init,
2453                         .exit = ssi_aead_exit,
2454                         .ivsize = AES_BLOCK_SIZE,
2455                         .maxauthsize = SHA256_DIGEST_SIZE,
2456                 },
2457                 .cipher_mode = DRV_CIPHER_CBC,
2458                 .flow_mode = S_DIN_to_AES,
2459                 .auth_mode = DRV_HASH_SHA256,
2460         },
2461         {
2462                 .name = "authenc(hmac(sha256),cbc(des3_ede))",
2463                 .driver_name = "authenc-hmac-sha256-cbc-des3-dx",
2464                 .blocksize = DES3_EDE_BLOCK_SIZE,
2465                 .type = CRYPTO_ALG_TYPE_AEAD,
2466                 .template_aead = {
2467                         .setkey = ssi_aead_setkey,
2468                         .setauthsize = ssi_aead_setauthsize,
2469                         .encrypt = ssi_aead_encrypt,
2470                         .decrypt = ssi_aead_decrypt,
2471                         .init = ssi_aead_init,
2472                         .exit = ssi_aead_exit,
2473                         .ivsize = DES3_EDE_BLOCK_SIZE,
2474                         .maxauthsize = SHA256_DIGEST_SIZE,
2475                 },
2476                 .cipher_mode = DRV_CIPHER_CBC,
2477                 .flow_mode = S_DIN_to_DES,
2478                 .auth_mode = DRV_HASH_SHA256,
2479         },
2480         {
2481                 .name = "authenc(xcbc(aes),cbc(aes))",
2482                 .driver_name = "authenc-xcbc-aes-cbc-aes-dx",
2483                 .blocksize = AES_BLOCK_SIZE,
2484                 .type = CRYPTO_ALG_TYPE_AEAD,
2485                 .template_aead = {
2486                         .setkey = ssi_aead_setkey,
2487                         .setauthsize = ssi_aead_setauthsize,
2488                         .encrypt = ssi_aead_encrypt,
2489                         .decrypt = ssi_aead_decrypt,
2490                         .init = ssi_aead_init,
2491                         .exit = ssi_aead_exit,
2492                         .ivsize = AES_BLOCK_SIZE,
2493                         .maxauthsize = AES_BLOCK_SIZE,
2494                 },
2495                 .cipher_mode = DRV_CIPHER_CBC,
2496                 .flow_mode = S_DIN_to_AES,
2497                 .auth_mode = DRV_HASH_XCBC_MAC,
2498         },
2499         {
2500                 .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2501                 .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-dx",
2502                 .blocksize = 1,
2503                 .type = CRYPTO_ALG_TYPE_AEAD,
2504                 .template_aead = {
2505                         .setkey = ssi_aead_setkey,
2506                         .setauthsize = ssi_aead_setauthsize,
2507                         .encrypt = ssi_aead_encrypt,
2508                         .decrypt = ssi_aead_decrypt,
2509                         .init = ssi_aead_init,
2510                         .exit = ssi_aead_exit,
2511                         .ivsize = CTR_RFC3686_IV_SIZE,
2512                         .maxauthsize = SHA1_DIGEST_SIZE,
2513                 },
2514                 .cipher_mode = DRV_CIPHER_CTR,
2515                 .flow_mode = S_DIN_to_AES,
2516                 .auth_mode = DRV_HASH_SHA1,
2517         },
2518         {
2519                 .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2520                 .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-dx",
2521                 .blocksize = 1,
2522                 .type = CRYPTO_ALG_TYPE_AEAD,
2523                 .template_aead = {
2524                         .setkey = ssi_aead_setkey,
2525                         .setauthsize = ssi_aead_setauthsize,
2526                         .encrypt = ssi_aead_encrypt,
2527                         .decrypt = ssi_aead_decrypt,
2528                         .init = ssi_aead_init,
2529                         .exit = ssi_aead_exit,
2530                         .ivsize = CTR_RFC3686_IV_SIZE,
2531                         .maxauthsize = SHA256_DIGEST_SIZE,
2532                 },
2533                 .cipher_mode = DRV_CIPHER_CTR,
2534                 .flow_mode = S_DIN_to_AES,
2535                 .auth_mode = DRV_HASH_SHA256,
2536         },
2537         {
2538                 .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2539                 .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-dx",
2540                 .blocksize = 1,
2541                 .type = CRYPTO_ALG_TYPE_AEAD,
2542                 .template_aead = {
2543                         .setkey = ssi_aead_setkey,
2544                         .setauthsize = ssi_aead_setauthsize,
2545                         .encrypt = ssi_aead_encrypt,
2546                         .decrypt = ssi_aead_decrypt,
2547                         .init = ssi_aead_init,
2548                         .exit = ssi_aead_exit,
2549                         .ivsize = CTR_RFC3686_IV_SIZE,
2550                         .maxauthsize = AES_BLOCK_SIZE,
2551                 },
2552                 .cipher_mode = DRV_CIPHER_CTR,
2553                 .flow_mode = S_DIN_to_AES,
2554                 .auth_mode = DRV_HASH_XCBC_MAC,
2555         },
2556 #if SSI_CC_HAS_AES_CCM
2557         {
2558                 .name = "ccm(aes)",
2559                 .driver_name = "ccm-aes-dx",
2560                 .blocksize = 1,
2561                 .type = CRYPTO_ALG_TYPE_AEAD,
2562                 .template_aead = {
2563                         .setkey = ssi_aead_setkey,
2564                         .setauthsize = ssi_ccm_setauthsize,
2565                         .encrypt = ssi_aead_encrypt,
2566                         .decrypt = ssi_aead_decrypt,
2567                         .init = ssi_aead_init,
2568                         .exit = ssi_aead_exit,
2569                         .ivsize = AES_BLOCK_SIZE,
2570                         .maxauthsize = AES_BLOCK_SIZE,
2571                 },
2572                 .cipher_mode = DRV_CIPHER_CCM,
2573                 .flow_mode = S_DIN_to_AES,
2574                 .auth_mode = DRV_HASH_NULL,
2575         },
2576         {
2577                 .name = "rfc4309(ccm(aes))",
2578                 .driver_name = "rfc4309-ccm-aes-dx",
2579                 .blocksize = 1,
2580                 .type = CRYPTO_ALG_TYPE_AEAD,
2581                 .template_aead = {
2582                         .setkey = ssi_rfc4309_ccm_setkey,
2583                         .setauthsize = ssi_rfc4309_ccm_setauthsize,
2584                         .encrypt = ssi_rfc4309_ccm_encrypt,
2585                         .decrypt = ssi_rfc4309_ccm_decrypt,
2586                         .init = ssi_aead_init,
2587                         .exit = ssi_aead_exit,
2588                         .ivsize = CCM_BLOCK_IV_SIZE,
2589                         .maxauthsize = AES_BLOCK_SIZE,
2590                 },
2591                 .cipher_mode = DRV_CIPHER_CCM,
2592                 .flow_mode = S_DIN_to_AES,
2593                 .auth_mode = DRV_HASH_NULL,
2594         },
2595 #endif /*SSI_CC_HAS_AES_CCM*/
2596 #if SSI_CC_HAS_AES_GCM
2597         {
2598                 .name = "gcm(aes)",
2599                 .driver_name = "gcm-aes-dx",
2600                 .blocksize = 1,
2601                 .type = CRYPTO_ALG_TYPE_AEAD,
2602                 .template_aead = {
2603                         .setkey = ssi_aead_setkey,
2604                         .setauthsize = ssi_gcm_setauthsize,
2605                         .encrypt = ssi_aead_encrypt,
2606                         .decrypt = ssi_aead_decrypt,
2607                         .init = ssi_aead_init,
2608                         .exit = ssi_aead_exit,
2609                         .ivsize = 12,
2610                         .maxauthsize = AES_BLOCK_SIZE,
2611                 },
2612                 .cipher_mode = DRV_CIPHER_GCTR,
2613                 .flow_mode = S_DIN_to_AES,
2614                 .auth_mode = DRV_HASH_NULL,
2615         },
2616         {
2617                 .name = "rfc4106(gcm(aes))",
2618                 .driver_name = "rfc4106-gcm-aes-dx",
2619                 .blocksize = 1,
2620                 .type = CRYPTO_ALG_TYPE_AEAD,
2621                 .template_aead = {
2622                         .setkey = ssi_rfc4106_gcm_setkey,
2623                         .setauthsize = ssi_rfc4106_gcm_setauthsize,
2624                         .encrypt = ssi_rfc4106_gcm_encrypt,
2625                         .decrypt = ssi_rfc4106_gcm_decrypt,
2626                         .init = ssi_aead_init,
2627                         .exit = ssi_aead_exit,
2628                         .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2629                         .maxauthsize = AES_BLOCK_SIZE,
2630                 },
2631                 .cipher_mode = DRV_CIPHER_GCTR,
2632                 .flow_mode = S_DIN_to_AES,
2633                 .auth_mode = DRV_HASH_NULL,
2634         },
2635         {
2636                 .name = "rfc4543(gcm(aes))",
2637                 .driver_name = "rfc4543-gcm-aes-dx",
2638                 .blocksize = 1,
2639                 .type = CRYPTO_ALG_TYPE_AEAD,
2640                 .template_aead = {
2641                         .setkey = ssi_rfc4543_gcm_setkey,
2642                         .setauthsize = ssi_rfc4543_gcm_setauthsize,
2643                         .encrypt = ssi_rfc4543_gcm_encrypt,
2644                         .decrypt = ssi_rfc4543_gcm_decrypt,
2645                         .init = ssi_aead_init,
2646                         .exit = ssi_aead_exit,
2647                         .ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2648                         .maxauthsize = AES_BLOCK_SIZE,
2649                 },
2650                 .cipher_mode = DRV_CIPHER_GCTR,
2651                 .flow_mode = S_DIN_to_AES,
2652                 .auth_mode = DRV_HASH_NULL,
2653         },
2654 #endif /*SSI_CC_HAS_AES_GCM*/
2655 };
2656
2657 static struct ssi_crypto_alg *ssi_aead_create_alg(struct ssi_alg_template *template)
2658 {
2659         struct ssi_crypto_alg *t_alg;
2660         struct aead_alg *alg;
2661
2662         t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2663         if (!t_alg) {
2664                 SSI_LOG_ERR("failed to allocate t_alg\n");
2665                 return ERR_PTR(-ENOMEM);
2666         }
2667         alg = &template->template_aead;
2668
2669         snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2670         snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2671                  template->driver_name);
2672         alg->base.cra_module = THIS_MODULE;
2673         alg->base.cra_priority = SSI_CRA_PRIO;
2674
2675         alg->base.cra_ctxsize = sizeof(struct ssi_aead_ctx);
2676         alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2677                          template->type;
2678         alg->init = ssi_aead_init;
2679         alg->exit = ssi_aead_exit;
2680
2681         t_alg->aead_alg = *alg;
2682
2683         t_alg->cipher_mode = template->cipher_mode;
2684         t_alg->flow_mode = template->flow_mode;
2685         t_alg->auth_mode = template->auth_mode;
2686
2687         return t_alg;
2688 }
2689
2690 int ssi_aead_free(struct ssi_drvdata *drvdata)
2691 {
2692         struct ssi_crypto_alg *t_alg, *n;
2693         struct ssi_aead_handle *aead_handle =
2694                 (struct ssi_aead_handle *)drvdata->aead_handle;
2695
2696         if (aead_handle) {
2697                 /* Remove registered algs */
2698                 list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
2699                         crypto_unregister_aead(&t_alg->aead_alg);
2700                         list_del(&t_alg->entry);
2701                         kfree(t_alg);
2702                 }
2703                 kfree(aead_handle);
2704                 drvdata->aead_handle = NULL;
2705         }
2706
2707         return 0;
2708 }
2709
2710 int ssi_aead_alloc(struct ssi_drvdata *drvdata)
2711 {
2712         struct ssi_aead_handle *aead_handle;
2713         struct ssi_crypto_alg *t_alg;
2714         int rc = -ENOMEM;
2715         int alg;
2716
2717         aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
2718         if (!aead_handle) {
2719                 rc = -ENOMEM;
2720                 goto fail0;
2721         }
2722
2723         drvdata->aead_handle = aead_handle;
2724
2725         aead_handle->sram_workspace_addr = ssi_sram_mgr_alloc(
2726                 drvdata, MAX_HMAC_DIGEST_SIZE);
2727         if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2728                 SSI_LOG_ERR("SRAM pool exhausted\n");
2729                 rc = -ENOMEM;
2730                 goto fail1;
2731         }
2732
2733         INIT_LIST_HEAD(&aead_handle->aead_list);
2734
2735         /* Linux crypto */
2736         for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2737                 t_alg = ssi_aead_create_alg(&aead_algs[alg]);
2738                 if (IS_ERR(t_alg)) {
2739                         rc = PTR_ERR(t_alg);
2740                         SSI_LOG_ERR("%s alg allocation failed\n",
2741                                     aead_algs[alg].driver_name);
2742                         goto fail1;
2743                 }
2744                 t_alg->drvdata = drvdata;
2745                 rc = crypto_register_aead(&t_alg->aead_alg);
2746                 if (unlikely(rc != 0)) {
2747                         SSI_LOG_ERR("%s alg registration failed\n",
2748                                     t_alg->aead_alg.base.cra_driver_name);
2749                         goto fail2;
2750                 } else {
2751                         list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2752                         SSI_LOG_DEBUG("Registered %s\n", t_alg->aead_alg.base.cra_driver_name);
2753                 }
2754         }
2755
2756         return 0;
2757
2758 fail2:
2759         kfree(t_alg);
2760 fail1:
2761         ssi_aead_free(drvdata);
2762 fail0:
2763         return rc;
2764 }