GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / crypto / qat / qat_common / qat_algs.c
1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15
16   Contact Information:
17   qat-linux@intel.com
18
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/hmac.h>
55 #include <crypto/algapi.h>
56 #include <crypto/authenc.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
65
66 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
67         ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
68                                        ICP_QAT_HW_CIPHER_NO_CONVERT, \
69                                        ICP_QAT_HW_CIPHER_ENCRYPT)
70
71 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
72         ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
73                                        ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74                                        ICP_QAT_HW_CIPHER_DECRYPT)
75
76 static DEFINE_MUTEX(algs_lock);
77 static unsigned int active_devs;
78
79 struct qat_alg_buf {
80         uint32_t len;
81         uint32_t resrvd;
82         uint64_t addr;
83 } __packed;
84
85 struct qat_alg_buf_list {
86         uint64_t resrvd;
87         uint32_t num_bufs;
88         uint32_t num_mapped_bufs;
89         struct qat_alg_buf bufers[];
90 } __packed __aligned(64);
91
92 /* Common content descriptor */
93 struct qat_alg_cd {
94         union {
95                 struct qat_enc { /* Encrypt content desc */
96                         struct icp_qat_hw_cipher_algo_blk cipher;
97                         struct icp_qat_hw_auth_algo_blk hash;
98                 } qat_enc_cd;
99                 struct qat_dec { /* Decrytp content desc */
100                         struct icp_qat_hw_auth_algo_blk hash;
101                         struct icp_qat_hw_cipher_algo_blk cipher;
102                 } qat_dec_cd;
103         };
104 } __aligned(64);
105
106 struct qat_alg_aead_ctx {
107         struct qat_alg_cd *enc_cd;
108         struct qat_alg_cd *dec_cd;
109         dma_addr_t enc_cd_paddr;
110         dma_addr_t dec_cd_paddr;
111         struct icp_qat_fw_la_bulk_req enc_fw_req;
112         struct icp_qat_fw_la_bulk_req dec_fw_req;
113         struct crypto_shash *hash_tfm;
114         enum icp_qat_hw_auth_algo qat_hash_alg;
115         struct qat_crypto_instance *inst;
116 };
117
118 struct qat_alg_ablkcipher_ctx {
119         struct icp_qat_hw_cipher_algo_blk *enc_cd;
120         struct icp_qat_hw_cipher_algo_blk *dec_cd;
121         dma_addr_t enc_cd_paddr;
122         dma_addr_t dec_cd_paddr;
123         struct icp_qat_fw_la_bulk_req enc_fw_req;
124         struct icp_qat_fw_la_bulk_req dec_fw_req;
125         struct qat_crypto_instance *inst;
126         struct crypto_tfm *tfm;
127         spinlock_t lock;        /* protects qat_alg_ablkcipher_ctx struct */
128 };
129
130 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
131 {
132         switch (qat_hash_alg) {
133         case ICP_QAT_HW_AUTH_ALGO_SHA1:
134                 return ICP_QAT_HW_SHA1_STATE1_SZ;
135         case ICP_QAT_HW_AUTH_ALGO_SHA256:
136                 return ICP_QAT_HW_SHA256_STATE1_SZ;
137         case ICP_QAT_HW_AUTH_ALGO_SHA512:
138                 return ICP_QAT_HW_SHA512_STATE1_SZ;
139         default:
140                 return -EFAULT;
141         };
142         return -EFAULT;
143 }
144
145 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
146                                   struct qat_alg_aead_ctx *ctx,
147                                   const uint8_t *auth_key,
148                                   unsigned int auth_keylen)
149 {
150         SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
151         struct sha1_state sha1;
152         struct sha256_state sha256;
153         struct sha512_state sha512;
154         int block_size = crypto_shash_blocksize(ctx->hash_tfm);
155         int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
156         char ipad[block_size];
157         char opad[block_size];
158         __be32 *hash_state_out;
159         __be64 *hash512_state_out;
160         int i, offset;
161
162         memset(ipad, 0, block_size);
163         memset(opad, 0, block_size);
164         shash->tfm = ctx->hash_tfm;
165         shash->flags = 0x0;
166
167         if (auth_keylen > block_size) {
168                 int ret = crypto_shash_digest(shash, auth_key,
169                                               auth_keylen, ipad);
170                 if (ret)
171                         return ret;
172
173                 memcpy(opad, ipad, digest_size);
174         } else {
175                 memcpy(ipad, auth_key, auth_keylen);
176                 memcpy(opad, auth_key, auth_keylen);
177         }
178
179         for (i = 0; i < block_size; i++) {
180                 char *ipad_ptr = ipad + i;
181                 char *opad_ptr = opad + i;
182                 *ipad_ptr ^= HMAC_IPAD_VALUE;
183                 *opad_ptr ^= HMAC_OPAD_VALUE;
184         }
185
186         if (crypto_shash_init(shash))
187                 return -EFAULT;
188
189         if (crypto_shash_update(shash, ipad, block_size))
190                 return -EFAULT;
191
192         hash_state_out = (__be32 *)hash->sha.state1;
193         hash512_state_out = (__be64 *)hash_state_out;
194
195         switch (ctx->qat_hash_alg) {
196         case ICP_QAT_HW_AUTH_ALGO_SHA1:
197                 if (crypto_shash_export(shash, &sha1))
198                         return -EFAULT;
199                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
200                         *hash_state_out = cpu_to_be32(*(sha1.state + i));
201                 break;
202         case ICP_QAT_HW_AUTH_ALGO_SHA256:
203                 if (crypto_shash_export(shash, &sha256))
204                         return -EFAULT;
205                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
206                         *hash_state_out = cpu_to_be32(*(sha256.state + i));
207                 break;
208         case ICP_QAT_HW_AUTH_ALGO_SHA512:
209                 if (crypto_shash_export(shash, &sha512))
210                         return -EFAULT;
211                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
212                         *hash512_state_out = cpu_to_be64(*(sha512.state + i));
213                 break;
214         default:
215                 return -EFAULT;
216         }
217
218         if (crypto_shash_init(shash))
219                 return -EFAULT;
220
221         if (crypto_shash_update(shash, opad, block_size))
222                 return -EFAULT;
223
224         offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
225         hash_state_out = (__be32 *)(hash->sha.state1 + offset);
226         hash512_state_out = (__be64 *)hash_state_out;
227
228         switch (ctx->qat_hash_alg) {
229         case ICP_QAT_HW_AUTH_ALGO_SHA1:
230                 if (crypto_shash_export(shash, &sha1))
231                         return -EFAULT;
232                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
233                         *hash_state_out = cpu_to_be32(*(sha1.state + i));
234                 break;
235         case ICP_QAT_HW_AUTH_ALGO_SHA256:
236                 if (crypto_shash_export(shash, &sha256))
237                         return -EFAULT;
238                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
239                         *hash_state_out = cpu_to_be32(*(sha256.state + i));
240                 break;
241         case ICP_QAT_HW_AUTH_ALGO_SHA512:
242                 if (crypto_shash_export(shash, &sha512))
243                         return -EFAULT;
244                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
245                         *hash512_state_out = cpu_to_be64(*(sha512.state + i));
246                 break;
247         default:
248                 return -EFAULT;
249         }
250         memzero_explicit(ipad, block_size);
251         memzero_explicit(opad, block_size);
252         return 0;
253 }
254
255 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
256 {
257         header->hdr_flags =
258                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
259         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
260         header->comn_req_flags =
261                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
262                                             QAT_COMN_PTR_TYPE_SGL);
263         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
264                                   ICP_QAT_FW_LA_PARTIAL_NONE);
265         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
266                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
267         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
268                                 ICP_QAT_FW_LA_NO_PROTO);
269         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
270                                        ICP_QAT_FW_LA_NO_UPDATE_STATE);
271 }
272
273 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
274                                          int alg,
275                                          struct crypto_authenc_keys *keys,
276                                          int mode)
277 {
278         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
279         unsigned int digestsize = crypto_aead_authsize(aead_tfm);
280         struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
281         struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
282         struct icp_qat_hw_auth_algo_blk *hash =
283                 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
284                 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
285         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
286         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
287         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
288         void *ptr = &req_tmpl->cd_ctrl;
289         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
290         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
291
292         /* CD setup */
293         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
294         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
295         hash->sha.inner_setup.auth_config.config =
296                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
297                                              ctx->qat_hash_alg, digestsize);
298         hash->sha.inner_setup.auth_counter.counter =
299                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
300
301         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
302                 return -EFAULT;
303
304         /* Request setup */
305         qat_alg_init_common_hdr(header);
306         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
307         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
308                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
309         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
310                                    ICP_QAT_FW_LA_RET_AUTH_RES);
311         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
312                                    ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
313         cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
314         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
315
316         /* Cipher CD config setup */
317         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
318         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
319         cipher_cd_ctrl->cipher_cfg_offset = 0;
320         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
321         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
322         /* Auth CD config setup */
323         hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
324         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
325         hash_cd_ctrl->inner_res_sz = digestsize;
326         hash_cd_ctrl->final_sz = digestsize;
327
328         switch (ctx->qat_hash_alg) {
329         case ICP_QAT_HW_AUTH_ALGO_SHA1:
330                 hash_cd_ctrl->inner_state1_sz =
331                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
332                 hash_cd_ctrl->inner_state2_sz =
333                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
334                 break;
335         case ICP_QAT_HW_AUTH_ALGO_SHA256:
336                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
337                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
338                 break;
339         case ICP_QAT_HW_AUTH_ALGO_SHA512:
340                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
341                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
342                 break;
343         default:
344                 break;
345         }
346         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
347                         ((sizeof(struct icp_qat_hw_auth_setup) +
348                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
349         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
350         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
351         return 0;
352 }
353
354 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
355                                          int alg,
356                                          struct crypto_authenc_keys *keys,
357                                          int mode)
358 {
359         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
360         unsigned int digestsize = crypto_aead_authsize(aead_tfm);
361         struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
362         struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
363         struct icp_qat_hw_cipher_algo_blk *cipher =
364                 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
365                 sizeof(struct icp_qat_hw_auth_setup) +
366                 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
367         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
368         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
369         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
370         void *ptr = &req_tmpl->cd_ctrl;
371         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
372         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
373         struct icp_qat_fw_la_auth_req_params *auth_param =
374                 (struct icp_qat_fw_la_auth_req_params *)
375                 ((char *)&req_tmpl->serv_specif_rqpars +
376                 sizeof(struct icp_qat_fw_la_cipher_req_params));
377
378         /* CD setup */
379         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
380         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
381         hash->sha.inner_setup.auth_config.config =
382                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
383                                              ctx->qat_hash_alg,
384                                              digestsize);
385         hash->sha.inner_setup.auth_counter.counter =
386                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
387
388         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
389                 return -EFAULT;
390
391         /* Request setup */
392         qat_alg_init_common_hdr(header);
393         header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
394         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
395                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
396         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
397                                    ICP_QAT_FW_LA_NO_RET_AUTH_RES);
398         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
399                                    ICP_QAT_FW_LA_CMP_AUTH_RES);
400         cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
401         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
402
403         /* Cipher CD config setup */
404         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
405         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
406         cipher_cd_ctrl->cipher_cfg_offset =
407                 (sizeof(struct icp_qat_hw_auth_setup) +
408                  roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
409         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
410         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
411
412         /* Auth CD config setup */
413         hash_cd_ctrl->hash_cfg_offset = 0;
414         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
415         hash_cd_ctrl->inner_res_sz = digestsize;
416         hash_cd_ctrl->final_sz = digestsize;
417
418         switch (ctx->qat_hash_alg) {
419         case ICP_QAT_HW_AUTH_ALGO_SHA1:
420                 hash_cd_ctrl->inner_state1_sz =
421                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
422                 hash_cd_ctrl->inner_state2_sz =
423                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
424                 break;
425         case ICP_QAT_HW_AUTH_ALGO_SHA256:
426                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
427                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
428                 break;
429         case ICP_QAT_HW_AUTH_ALGO_SHA512:
430                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
431                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
432                 break;
433         default:
434                 break;
435         }
436
437         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
438                         ((sizeof(struct icp_qat_hw_auth_setup) +
439                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
440         auth_param->auth_res_sz = digestsize;
441         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
442         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
443         return 0;
444 }
445
446 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
447                                         struct icp_qat_fw_la_bulk_req *req,
448                                         struct icp_qat_hw_cipher_algo_blk *cd,
449                                         const uint8_t *key, unsigned int keylen)
450 {
451         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
452         struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
453         struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
454
455         memcpy(cd->aes.key, key, keylen);
456         qat_alg_init_common_hdr(header);
457         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
458         cd_pars->u.s.content_desc_params_sz =
459                                 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
460         /* Cipher CD config setup */
461         cd_ctrl->cipher_key_sz = keylen >> 3;
462         cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
463         cd_ctrl->cipher_cfg_offset = 0;
464         ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
465         ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
466 }
467
468 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
469                                         int alg, const uint8_t *key,
470                                         unsigned int keylen, int mode)
471 {
472         struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
473         struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
474         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
475
476         qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
477         cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
478         enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
479 }
480
481 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
482                                         int alg, const uint8_t *key,
483                                         unsigned int keylen, int mode)
484 {
485         struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
486         struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
487         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
488
489         qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
490         cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
491
492         if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
493                 dec_cd->aes.cipher_config.val =
494                                         QAT_AES_HW_CONFIG_DEC(alg, mode);
495         else
496                 dec_cd->aes.cipher_config.val =
497                                         QAT_AES_HW_CONFIG_ENC(alg, mode);
498 }
499
500 static int qat_alg_validate_key(int key_len, int *alg, int mode)
501 {
502         if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
503                 switch (key_len) {
504                 case AES_KEYSIZE_128:
505                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
506                         break;
507                 case AES_KEYSIZE_192:
508                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
509                         break;
510                 case AES_KEYSIZE_256:
511                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
512                         break;
513                 default:
514                         return -EINVAL;
515                 }
516         } else {
517                 switch (key_len) {
518                 case AES_KEYSIZE_128 << 1:
519                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
520                         break;
521                 case AES_KEYSIZE_256 << 1:
522                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
523                         break;
524                 default:
525                         return -EINVAL;
526                 }
527         }
528         return 0;
529 }
530
531 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
532                                       unsigned int keylen,  int mode)
533 {
534         struct crypto_authenc_keys keys;
535         int alg;
536
537         if (crypto_authenc_extractkeys(&keys, key, keylen))
538                 goto bad_key;
539
540         if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
541                 goto bad_key;
542
543         if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
544                 goto error;
545
546         if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
547                 goto error;
548
549         memzero_explicit(&keys, sizeof(keys));
550         return 0;
551 bad_key:
552         crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
553         memzero_explicit(&keys, sizeof(keys));
554         return -EINVAL;
555 error:
556         memzero_explicit(&keys, sizeof(keys));
557         return -EFAULT;
558 }
559
560 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
561                                             const uint8_t *key,
562                                             unsigned int keylen,
563                                             int mode)
564 {
565         int alg;
566
567         if (qat_alg_validate_key(keylen, &alg, mode))
568                 goto bad_key;
569
570         qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
571         qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
572         return 0;
573 bad_key:
574         crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
575         return -EINVAL;
576 }
577
578 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
579                                unsigned int keylen)
580 {
581         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
582         struct device *dev;
583
584         if (ctx->enc_cd) {
585                 /* rekeying */
586                 dev = &GET_DEV(ctx->inst->accel_dev);
587                 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
588                 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
589                 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
590                 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
591         } else {
592                 /* new key */
593                 int node = get_current_node();
594                 struct qat_crypto_instance *inst =
595                                 qat_crypto_get_instance_node(node);
596                 if (!inst) {
597                         return -EINVAL;
598                 }
599
600                 dev = &GET_DEV(inst->accel_dev);
601                 ctx->inst = inst;
602                 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
603                                                   &ctx->enc_cd_paddr,
604                                                   GFP_ATOMIC);
605                 if (!ctx->enc_cd) {
606                         return -ENOMEM;
607                 }
608                 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
609                                                   &ctx->dec_cd_paddr,
610                                                   GFP_ATOMIC);
611                 if (!ctx->dec_cd) {
612                         goto out_free_enc;
613                 }
614         }
615         if (qat_alg_aead_init_sessions(tfm, key, keylen,
616                                        ICP_QAT_HW_CIPHER_CBC_MODE))
617                 goto out_free_all;
618
619         return 0;
620
621 out_free_all:
622         memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
623         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
624                           ctx->dec_cd, ctx->dec_cd_paddr);
625         ctx->dec_cd = NULL;
626 out_free_enc:
627         memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
628         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
629                           ctx->enc_cd, ctx->enc_cd_paddr);
630         ctx->enc_cd = NULL;
631         return -ENOMEM;
632 }
633
634 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
635                               struct qat_crypto_request *qat_req)
636 {
637         struct device *dev = &GET_DEV(inst->accel_dev);
638         struct qat_alg_buf_list *bl = qat_req->buf.bl;
639         struct qat_alg_buf_list *blout = qat_req->buf.blout;
640         dma_addr_t blp = qat_req->buf.blp;
641         dma_addr_t blpout = qat_req->buf.bloutp;
642         size_t sz = qat_req->buf.sz;
643         size_t sz_out = qat_req->buf.sz_out;
644         int i;
645
646         for (i = 0; i < bl->num_bufs; i++)
647                 dma_unmap_single(dev, bl->bufers[i].addr,
648                                  bl->bufers[i].len, DMA_BIDIRECTIONAL);
649
650         dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
651         kfree(bl);
652         if (blp != blpout) {
653                 /* If out of place operation dma unmap only data */
654                 int bufless = blout->num_bufs - blout->num_mapped_bufs;
655
656                 for (i = bufless; i < blout->num_bufs; i++) {
657                         dma_unmap_single(dev, blout->bufers[i].addr,
658                                          blout->bufers[i].len,
659                                          DMA_BIDIRECTIONAL);
660                 }
661                 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
662                 kfree(blout);
663         }
664 }
665
666 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
667                                struct scatterlist *sgl,
668                                struct scatterlist *sglout,
669                                struct qat_crypto_request *qat_req)
670 {
671         struct device *dev = &GET_DEV(inst->accel_dev);
672         int i, sg_nctr = 0;
673         int n = sg_nents(sgl);
674         struct qat_alg_buf_list *bufl;
675         struct qat_alg_buf_list *buflout = NULL;
676         dma_addr_t blp;
677         dma_addr_t bloutp = 0;
678         struct scatterlist *sg;
679         size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
680                         ((1 + n) * sizeof(struct qat_alg_buf));
681
682         if (unlikely(!n))
683                 return -EINVAL;
684
685         bufl = kzalloc_node(sz, GFP_ATOMIC,
686                             dev_to_node(&GET_DEV(inst->accel_dev)));
687         if (unlikely(!bufl))
688                 return -ENOMEM;
689
690         blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
691         if (unlikely(dma_mapping_error(dev, blp)))
692                 goto err_in;
693
694         for_each_sg(sgl, sg, n, i) {
695                 int y = sg_nctr;
696
697                 if (!sg->length)
698                         continue;
699
700                 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
701                                                       sg->length,
702                                                       DMA_BIDIRECTIONAL);
703                 bufl->bufers[y].len = sg->length;
704                 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
705                         goto err_in;
706                 sg_nctr++;
707         }
708         bufl->num_bufs = sg_nctr;
709         qat_req->buf.bl = bufl;
710         qat_req->buf.blp = blp;
711         qat_req->buf.sz = sz;
712         /* Handle out of place operation */
713         if (sgl != sglout) {
714                 struct qat_alg_buf *bufers;
715
716                 n = sg_nents(sglout);
717                 sz_out = sizeof(struct qat_alg_buf_list) +
718                         ((1 + n) * sizeof(struct qat_alg_buf));
719                 sg_nctr = 0;
720                 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
721                                        dev_to_node(&GET_DEV(inst->accel_dev)));
722                 if (unlikely(!buflout))
723                         goto err_in;
724                 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
725                 if (unlikely(dma_mapping_error(dev, bloutp)))
726                         goto err_out;
727                 bufers = buflout->bufers;
728                 for_each_sg(sglout, sg, n, i) {
729                         int y = sg_nctr;
730
731                         if (!sg->length)
732                                 continue;
733
734                         bufers[y].addr = dma_map_single(dev, sg_virt(sg),
735                                                         sg->length,
736                                                         DMA_BIDIRECTIONAL);
737                         if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
738                                 goto err_out;
739                         bufers[y].len = sg->length;
740                         sg_nctr++;
741                 }
742                 buflout->num_bufs = sg_nctr;
743                 buflout->num_mapped_bufs = sg_nctr;
744                 qat_req->buf.blout = buflout;
745                 qat_req->buf.bloutp = bloutp;
746                 qat_req->buf.sz_out = sz_out;
747         } else {
748                 /* Otherwise set the src and dst to the same address */
749                 qat_req->buf.bloutp = qat_req->buf.blp;
750                 qat_req->buf.sz_out = 0;
751         }
752         return 0;
753
754 err_out:
755         n = sg_nents(sglout);
756         for (i = 0; i < n; i++)
757                 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
758                         dma_unmap_single(dev, buflout->bufers[i].addr,
759                                          buflout->bufers[i].len,
760                                          DMA_BIDIRECTIONAL);
761         if (!dma_mapping_error(dev, bloutp))
762                 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
763         kfree(buflout);
764
765 err_in:
766         n = sg_nents(sgl);
767         for (i = 0; i < n; i++)
768                 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
769                         dma_unmap_single(dev, bufl->bufers[i].addr,
770                                          bufl->bufers[i].len,
771                                          DMA_BIDIRECTIONAL);
772
773         if (!dma_mapping_error(dev, blp))
774                 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
775         kfree(bufl);
776
777         dev_err(dev, "Failed to map buf for dma\n");
778         return -ENOMEM;
779 }
780
781 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
782                                   struct qat_crypto_request *qat_req)
783 {
784         struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
785         struct qat_crypto_instance *inst = ctx->inst;
786         struct aead_request *areq = qat_req->aead_req;
787         uint8_t stat_filed = qat_resp->comn_resp.comn_status;
788         int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
789
790         qat_alg_free_bufl(inst, qat_req);
791         if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
792                 res = -EBADMSG;
793         areq->base.complete(&areq->base, res);
794 }
795
796 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
797                                         struct qat_crypto_request *qat_req)
798 {
799         struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
800         struct qat_crypto_instance *inst = ctx->inst;
801         struct ablkcipher_request *areq = qat_req->ablkcipher_req;
802         uint8_t stat_filed = qat_resp->comn_resp.comn_status;
803         int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
804
805         qat_alg_free_bufl(inst, qat_req);
806         if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
807                 res = -EINVAL;
808         areq->base.complete(&areq->base, res);
809 }
810
811 void qat_alg_callback(void *resp)
812 {
813         struct icp_qat_fw_la_resp *qat_resp = resp;
814         struct qat_crypto_request *qat_req =
815                                 (void *)(__force long)qat_resp->opaque_data;
816
817         qat_req->cb(qat_resp, qat_req);
818 }
819
820 static int qat_alg_aead_dec(struct aead_request *areq)
821 {
822         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
823         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
824         struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
825         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
826         struct icp_qat_fw_la_cipher_req_params *cipher_param;
827         struct icp_qat_fw_la_auth_req_params *auth_param;
828         struct icp_qat_fw_la_bulk_req *msg;
829         int digst_size = crypto_aead_authsize(aead_tfm);
830         int ret, ctr = 0;
831         u32 cipher_len;
832
833         cipher_len = areq->cryptlen - digst_size;
834         if (cipher_len % AES_BLOCK_SIZE != 0)
835                 return -EINVAL;
836
837         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
838         if (unlikely(ret))
839                 return ret;
840
841         msg = &qat_req->req;
842         *msg = ctx->dec_fw_req;
843         qat_req->aead_ctx = ctx;
844         qat_req->aead_req = areq;
845         qat_req->cb = qat_aead_alg_callback;
846         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
847         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
848         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
849         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
850         cipher_param->cipher_length = cipher_len;
851         cipher_param->cipher_offset = areq->assoclen;
852         memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
853         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
854         auth_param->auth_off = 0;
855         auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
856         do {
857                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
858         } while (ret == -EAGAIN && ctr++ < 10);
859
860         if (ret == -EAGAIN) {
861                 qat_alg_free_bufl(ctx->inst, qat_req);
862                 return -EBUSY;
863         }
864         return -EINPROGRESS;
865 }
866
867 static int qat_alg_aead_enc(struct aead_request *areq)
868 {
869         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
870         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
871         struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
872         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
873         struct icp_qat_fw_la_cipher_req_params *cipher_param;
874         struct icp_qat_fw_la_auth_req_params *auth_param;
875         struct icp_qat_fw_la_bulk_req *msg;
876         uint8_t *iv = areq->iv;
877         int ret, ctr = 0;
878
879         if (areq->cryptlen % AES_BLOCK_SIZE != 0)
880                 return -EINVAL;
881
882         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
883         if (unlikely(ret))
884                 return ret;
885
886         msg = &qat_req->req;
887         *msg = ctx->enc_fw_req;
888         qat_req->aead_ctx = ctx;
889         qat_req->aead_req = areq;
890         qat_req->cb = qat_aead_alg_callback;
891         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
892         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
893         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
894         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
895         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
896
897         memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
898         cipher_param->cipher_length = areq->cryptlen;
899         cipher_param->cipher_offset = areq->assoclen;
900
901         auth_param->auth_off = 0;
902         auth_param->auth_len = areq->assoclen + areq->cryptlen;
903
904         do {
905                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
906         } while (ret == -EAGAIN && ctr++ < 10);
907
908         if (ret == -EAGAIN) {
909                 qat_alg_free_bufl(ctx->inst, qat_req);
910                 return -EBUSY;
911         }
912         return -EINPROGRESS;
913 }
914
915 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
916                                      const u8 *key, unsigned int keylen,
917                                      int mode)
918 {
919         struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
920         struct device *dev;
921
922         spin_lock(&ctx->lock);
923         if (ctx->enc_cd) {
924                 /* rekeying */
925                 dev = &GET_DEV(ctx->inst->accel_dev);
926                 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
927                 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
928                 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
929                 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
930         } else {
931                 /* new key */
932                 int node = get_current_node();
933                 struct qat_crypto_instance *inst =
934                                 qat_crypto_get_instance_node(node);
935                 if (!inst) {
936                         spin_unlock(&ctx->lock);
937                         return -EINVAL;
938                 }
939
940                 dev = &GET_DEV(inst->accel_dev);
941                 ctx->inst = inst;
942                 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
943                                                   &ctx->enc_cd_paddr,
944                                                   GFP_ATOMIC);
945                 if (!ctx->enc_cd) {
946                         spin_unlock(&ctx->lock);
947                         return -ENOMEM;
948                 }
949                 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
950                                                   &ctx->dec_cd_paddr,
951                                                   GFP_ATOMIC);
952                 if (!ctx->dec_cd) {
953                         spin_unlock(&ctx->lock);
954                         goto out_free_enc;
955                 }
956         }
957         spin_unlock(&ctx->lock);
958         if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
959                 goto out_free_all;
960
961         return 0;
962
963 out_free_all:
964         memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
965         dma_free_coherent(dev, sizeof(*ctx->dec_cd),
966                           ctx->dec_cd, ctx->dec_cd_paddr);
967         ctx->dec_cd = NULL;
968 out_free_enc:
969         memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
970         dma_free_coherent(dev, sizeof(*ctx->enc_cd),
971                           ctx->enc_cd, ctx->enc_cd_paddr);
972         ctx->enc_cd = NULL;
973         return -ENOMEM;
974 }
975
976 static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
977                                          const u8 *key, unsigned int keylen)
978 {
979         return qat_alg_ablkcipher_setkey(tfm, key, keylen,
980                                          ICP_QAT_HW_CIPHER_CBC_MODE);
981 }
982
983 static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
984                                          const u8 *key, unsigned int keylen)
985 {
986         return qat_alg_ablkcipher_setkey(tfm, key, keylen,
987                                          ICP_QAT_HW_CIPHER_CTR_MODE);
988 }
989
990 static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
991                                          const u8 *key, unsigned int keylen)
992 {
993         return qat_alg_ablkcipher_setkey(tfm, key, keylen,
994                                          ICP_QAT_HW_CIPHER_XTS_MODE);
995 }
996
997 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
998 {
999         struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1000         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1001         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1002         struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1003         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1004         struct icp_qat_fw_la_bulk_req *msg;
1005         int ret, ctr = 0;
1006
1007         ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1008         if (unlikely(ret))
1009                 return ret;
1010
1011         msg = &qat_req->req;
1012         *msg = ctx->enc_fw_req;
1013         qat_req->ablkcipher_ctx = ctx;
1014         qat_req->ablkcipher_req = req;
1015         qat_req->cb = qat_ablkcipher_alg_callback;
1016         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1017         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1018         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1019         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1020         cipher_param->cipher_length = req->nbytes;
1021         cipher_param->cipher_offset = 0;
1022         memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1023         do {
1024                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1025         } while (ret == -EAGAIN && ctr++ < 10);
1026
1027         if (ret == -EAGAIN) {
1028                 qat_alg_free_bufl(ctx->inst, qat_req);
1029                 return -EBUSY;
1030         }
1031         return -EINPROGRESS;
1032 }
1033
1034 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1035 {
1036         struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1037         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1038         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1039         struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1040         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1041         struct icp_qat_fw_la_bulk_req *msg;
1042         int ret, ctr = 0;
1043
1044         ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1045         if (unlikely(ret))
1046                 return ret;
1047
1048         msg = &qat_req->req;
1049         *msg = ctx->dec_fw_req;
1050         qat_req->ablkcipher_ctx = ctx;
1051         qat_req->ablkcipher_req = req;
1052         qat_req->cb = qat_ablkcipher_alg_callback;
1053         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1054         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1055         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1056         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1057         cipher_param->cipher_length = req->nbytes;
1058         cipher_param->cipher_offset = 0;
1059         memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1060         do {
1061                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1062         } while (ret == -EAGAIN && ctr++ < 10);
1063
1064         if (ret == -EAGAIN) {
1065                 qat_alg_free_bufl(ctx->inst, qat_req);
1066                 return -EBUSY;
1067         }
1068         return -EINPROGRESS;
1069 }
1070
1071 static int qat_alg_aead_init(struct crypto_aead *tfm,
1072                              enum icp_qat_hw_auth_algo hash,
1073                              const char *hash_name)
1074 {
1075         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1076
1077         ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1078         if (IS_ERR(ctx->hash_tfm))
1079                 return PTR_ERR(ctx->hash_tfm);
1080         ctx->qat_hash_alg = hash;
1081         crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1082         return 0;
1083 }
1084
1085 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1086 {
1087         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1088 }
1089
1090 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1091 {
1092         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1093 }
1094
1095 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1096 {
1097         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1098 }
1099
1100 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1101 {
1102         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1103         struct qat_crypto_instance *inst = ctx->inst;
1104         struct device *dev;
1105
1106         crypto_free_shash(ctx->hash_tfm);
1107
1108         if (!inst)
1109                 return;
1110
1111         dev = &GET_DEV(inst->accel_dev);
1112         if (ctx->enc_cd) {
1113                 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1114                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1115                                   ctx->enc_cd, ctx->enc_cd_paddr);
1116         }
1117         if (ctx->dec_cd) {
1118                 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1119                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1120                                   ctx->dec_cd, ctx->dec_cd_paddr);
1121         }
1122         qat_crypto_put_instance(inst);
1123 }
1124
1125 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1126 {
1127         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1128
1129         spin_lock_init(&ctx->lock);
1130         tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
1131         ctx->tfm = tfm;
1132         return 0;
1133 }
1134
1135 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1136 {
1137         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1138         struct qat_crypto_instance *inst = ctx->inst;
1139         struct device *dev;
1140
1141         if (!inst)
1142                 return;
1143
1144         dev = &GET_DEV(inst->accel_dev);
1145         if (ctx->enc_cd) {
1146                 memset(ctx->enc_cd, 0,
1147                        sizeof(struct icp_qat_hw_cipher_algo_blk));
1148                 dma_free_coherent(dev,
1149                                   sizeof(struct icp_qat_hw_cipher_algo_blk),
1150                                   ctx->enc_cd, ctx->enc_cd_paddr);
1151         }
1152         if (ctx->dec_cd) {
1153                 memset(ctx->dec_cd, 0,
1154                        sizeof(struct icp_qat_hw_cipher_algo_blk));
1155                 dma_free_coherent(dev,
1156                                   sizeof(struct icp_qat_hw_cipher_algo_blk),
1157                                   ctx->dec_cd, ctx->dec_cd_paddr);
1158         }
1159         qat_crypto_put_instance(inst);
1160 }
1161
1162
1163 static struct aead_alg qat_aeads[] = { {
1164         .base = {
1165                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1166                 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1167                 .cra_priority = 4001,
1168                 .cra_flags = CRYPTO_ALG_ASYNC,
1169                 .cra_blocksize = AES_BLOCK_SIZE,
1170                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1171                 .cra_module = THIS_MODULE,
1172         },
1173         .init = qat_alg_aead_sha1_init,
1174         .exit = qat_alg_aead_exit,
1175         .setkey = qat_alg_aead_setkey,
1176         .decrypt = qat_alg_aead_dec,
1177         .encrypt = qat_alg_aead_enc,
1178         .ivsize = AES_BLOCK_SIZE,
1179         .maxauthsize = SHA1_DIGEST_SIZE,
1180 }, {
1181         .base = {
1182                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1183                 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1184                 .cra_priority = 4001,
1185                 .cra_flags = CRYPTO_ALG_ASYNC,
1186                 .cra_blocksize = AES_BLOCK_SIZE,
1187                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1188                 .cra_module = THIS_MODULE,
1189         },
1190         .init = qat_alg_aead_sha256_init,
1191         .exit = qat_alg_aead_exit,
1192         .setkey = qat_alg_aead_setkey,
1193         .decrypt = qat_alg_aead_dec,
1194         .encrypt = qat_alg_aead_enc,
1195         .ivsize = AES_BLOCK_SIZE,
1196         .maxauthsize = SHA256_DIGEST_SIZE,
1197 }, {
1198         .base = {
1199                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1200                 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1201                 .cra_priority = 4001,
1202                 .cra_flags = CRYPTO_ALG_ASYNC,
1203                 .cra_blocksize = AES_BLOCK_SIZE,
1204                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1205                 .cra_module = THIS_MODULE,
1206         },
1207         .init = qat_alg_aead_sha512_init,
1208         .exit = qat_alg_aead_exit,
1209         .setkey = qat_alg_aead_setkey,
1210         .decrypt = qat_alg_aead_dec,
1211         .encrypt = qat_alg_aead_enc,
1212         .ivsize = AES_BLOCK_SIZE,
1213         .maxauthsize = SHA512_DIGEST_SIZE,
1214 } };
1215
1216 static struct crypto_alg qat_algs[] = { {
1217         .cra_name = "cbc(aes)",
1218         .cra_driver_name = "qat_aes_cbc",
1219         .cra_priority = 4001,
1220         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1221         .cra_blocksize = AES_BLOCK_SIZE,
1222         .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1223         .cra_alignmask = 0,
1224         .cra_type = &crypto_ablkcipher_type,
1225         .cra_module = THIS_MODULE,
1226         .cra_init = qat_alg_ablkcipher_init,
1227         .cra_exit = qat_alg_ablkcipher_exit,
1228         .cra_u = {
1229                 .ablkcipher = {
1230                         .setkey = qat_alg_ablkcipher_cbc_setkey,
1231                         .decrypt = qat_alg_ablkcipher_decrypt,
1232                         .encrypt = qat_alg_ablkcipher_encrypt,
1233                         .min_keysize = AES_MIN_KEY_SIZE,
1234                         .max_keysize = AES_MAX_KEY_SIZE,
1235                         .ivsize = AES_BLOCK_SIZE,
1236                 },
1237         },
1238 }, {
1239         .cra_name = "ctr(aes)",
1240         .cra_driver_name = "qat_aes_ctr",
1241         .cra_priority = 4001,
1242         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1243         .cra_blocksize = AES_BLOCK_SIZE,
1244         .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1245         .cra_alignmask = 0,
1246         .cra_type = &crypto_ablkcipher_type,
1247         .cra_module = THIS_MODULE,
1248         .cra_init = qat_alg_ablkcipher_init,
1249         .cra_exit = qat_alg_ablkcipher_exit,
1250         .cra_u = {
1251                 .ablkcipher = {
1252                         .setkey = qat_alg_ablkcipher_ctr_setkey,
1253                         .decrypt = qat_alg_ablkcipher_decrypt,
1254                         .encrypt = qat_alg_ablkcipher_encrypt,
1255                         .min_keysize = AES_MIN_KEY_SIZE,
1256                         .max_keysize = AES_MAX_KEY_SIZE,
1257                         .ivsize = AES_BLOCK_SIZE,
1258                 },
1259         },
1260 }, {
1261         .cra_name = "xts(aes)",
1262         .cra_driver_name = "qat_aes_xts",
1263         .cra_priority = 4001,
1264         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1265         .cra_blocksize = AES_BLOCK_SIZE,
1266         .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1267         .cra_alignmask = 0,
1268         .cra_type = &crypto_ablkcipher_type,
1269         .cra_module = THIS_MODULE,
1270         .cra_init = qat_alg_ablkcipher_init,
1271         .cra_exit = qat_alg_ablkcipher_exit,
1272         .cra_u = {
1273                 .ablkcipher = {
1274                         .setkey = qat_alg_ablkcipher_xts_setkey,
1275                         .decrypt = qat_alg_ablkcipher_decrypt,
1276                         .encrypt = qat_alg_ablkcipher_encrypt,
1277                         .min_keysize = 2 * AES_MIN_KEY_SIZE,
1278                         .max_keysize = 2 * AES_MAX_KEY_SIZE,
1279                         .ivsize = AES_BLOCK_SIZE,
1280                 },
1281         },
1282 } };
1283
1284 int qat_algs_register(void)
1285 {
1286         int ret = 0, i;
1287
1288         mutex_lock(&algs_lock);
1289         if (++active_devs != 1)
1290                 goto unlock;
1291
1292         for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1293                 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1294
1295         ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1296         if (ret)
1297                 goto unlock;
1298
1299         for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
1300                 qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
1301
1302         ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1303         if (ret)
1304                 goto unreg_algs;
1305
1306 unlock:
1307         mutex_unlock(&algs_lock);
1308         return ret;
1309
1310 unreg_algs:
1311         crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1312         goto unlock;
1313 }
1314
1315 void qat_algs_unregister(void)
1316 {
1317         mutex_lock(&algs_lock);
1318         if (--active_devs != 0)
1319                 goto unlock;
1320
1321         crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1322         crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1323
1324 unlock:
1325         mutex_unlock(&algs_lock);
1326 }