GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / crypto / qat / qat_common / qat_algs.c
1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15
16   Contact Information:
17   qat-linux@intel.com
18
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/hmac.h>
55 #include <crypto/algapi.h>
56 #include <crypto/authenc.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
65
66 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
67         ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
68                                        ICP_QAT_HW_CIPHER_NO_CONVERT, \
69                                        ICP_QAT_HW_CIPHER_ENCRYPT)
70
71 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
72         ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
73                                        ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74                                        ICP_QAT_HW_CIPHER_DECRYPT)
75
76 static DEFINE_MUTEX(algs_lock);
77 static unsigned int active_devs;
78
79 struct qat_alg_buf {
80         uint32_t len;
81         uint32_t resrvd;
82         uint64_t addr;
83 } __packed;
84
85 struct qat_alg_buf_list {
86         uint64_t resrvd;
87         uint32_t num_bufs;
88         uint32_t num_mapped_bufs;
89         struct qat_alg_buf bufers[];
90 } __packed __aligned(64);
91
92 /* Common content descriptor */
93 struct qat_alg_cd {
94         union {
95                 struct qat_enc { /* Encrypt content desc */
96                         struct icp_qat_hw_cipher_algo_blk cipher;
97                         struct icp_qat_hw_auth_algo_blk hash;
98                 } qat_enc_cd;
99                 struct qat_dec { /* Decrytp content desc */
100                         struct icp_qat_hw_auth_algo_blk hash;
101                         struct icp_qat_hw_cipher_algo_blk cipher;
102                 } qat_dec_cd;
103         };
104 } __aligned(64);
105
106 struct qat_alg_aead_ctx {
107         struct qat_alg_cd *enc_cd;
108         struct qat_alg_cd *dec_cd;
109         dma_addr_t enc_cd_paddr;
110         dma_addr_t dec_cd_paddr;
111         struct icp_qat_fw_la_bulk_req enc_fw_req;
112         struct icp_qat_fw_la_bulk_req dec_fw_req;
113         struct crypto_shash *hash_tfm;
114         enum icp_qat_hw_auth_algo qat_hash_alg;
115         struct qat_crypto_instance *inst;
116 };
117
118 struct qat_alg_ablkcipher_ctx {
119         struct icp_qat_hw_cipher_algo_blk *enc_cd;
120         struct icp_qat_hw_cipher_algo_blk *dec_cd;
121         dma_addr_t enc_cd_paddr;
122         dma_addr_t dec_cd_paddr;
123         struct icp_qat_fw_la_bulk_req enc_fw_req;
124         struct icp_qat_fw_la_bulk_req dec_fw_req;
125         struct qat_crypto_instance *inst;
126         struct crypto_tfm *tfm;
127         spinlock_t lock;        /* protects qat_alg_ablkcipher_ctx struct */
128 };
129
130 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
131 {
132         switch (qat_hash_alg) {
133         case ICP_QAT_HW_AUTH_ALGO_SHA1:
134                 return ICP_QAT_HW_SHA1_STATE1_SZ;
135         case ICP_QAT_HW_AUTH_ALGO_SHA256:
136                 return ICP_QAT_HW_SHA256_STATE1_SZ;
137         case ICP_QAT_HW_AUTH_ALGO_SHA512:
138                 return ICP_QAT_HW_SHA512_STATE1_SZ;
139         default:
140                 return -EFAULT;
141         };
142         return -EFAULT;
143 }
144
145 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
146                                   struct qat_alg_aead_ctx *ctx,
147                                   const uint8_t *auth_key,
148                                   unsigned int auth_keylen)
149 {
150         SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
151         struct sha1_state sha1;
152         struct sha256_state sha256;
153         struct sha512_state sha512;
154         int block_size = crypto_shash_blocksize(ctx->hash_tfm);
155         int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
156         char ipad[block_size];
157         char opad[block_size];
158         __be32 *hash_state_out;
159         __be64 *hash512_state_out;
160         int i, offset;
161
162         memset(ipad, 0, block_size);
163         memset(opad, 0, block_size);
164         shash->tfm = ctx->hash_tfm;
165         shash->flags = 0x0;
166
167         if (auth_keylen > block_size) {
168                 int ret = crypto_shash_digest(shash, auth_key,
169                                               auth_keylen, ipad);
170                 if (ret)
171                         return ret;
172
173                 memcpy(opad, ipad, digest_size);
174         } else {
175                 memcpy(ipad, auth_key, auth_keylen);
176                 memcpy(opad, auth_key, auth_keylen);
177         }
178
179         for (i = 0; i < block_size; i++) {
180                 char *ipad_ptr = ipad + i;
181                 char *opad_ptr = opad + i;
182                 *ipad_ptr ^= HMAC_IPAD_VALUE;
183                 *opad_ptr ^= HMAC_OPAD_VALUE;
184         }
185
186         if (crypto_shash_init(shash))
187                 return -EFAULT;
188
189         if (crypto_shash_update(shash, ipad, block_size))
190                 return -EFAULT;
191
192         hash_state_out = (__be32 *)hash->sha.state1;
193         hash512_state_out = (__be64 *)hash_state_out;
194
195         switch (ctx->qat_hash_alg) {
196         case ICP_QAT_HW_AUTH_ALGO_SHA1:
197                 if (crypto_shash_export(shash, &sha1))
198                         return -EFAULT;
199                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
200                         *hash_state_out = cpu_to_be32(*(sha1.state + i));
201                 break;
202         case ICP_QAT_HW_AUTH_ALGO_SHA256:
203                 if (crypto_shash_export(shash, &sha256))
204                         return -EFAULT;
205                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
206                         *hash_state_out = cpu_to_be32(*(sha256.state + i));
207                 break;
208         case ICP_QAT_HW_AUTH_ALGO_SHA512:
209                 if (crypto_shash_export(shash, &sha512))
210                         return -EFAULT;
211                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
212                         *hash512_state_out = cpu_to_be64(*(sha512.state + i));
213                 break;
214         default:
215                 return -EFAULT;
216         }
217
218         if (crypto_shash_init(shash))
219                 return -EFAULT;
220
221         if (crypto_shash_update(shash, opad, block_size))
222                 return -EFAULT;
223
224         offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
225         hash_state_out = (__be32 *)(hash->sha.state1 + offset);
226         hash512_state_out = (__be64 *)hash_state_out;
227
228         switch (ctx->qat_hash_alg) {
229         case ICP_QAT_HW_AUTH_ALGO_SHA1:
230                 if (crypto_shash_export(shash, &sha1))
231                         return -EFAULT;
232                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
233                         *hash_state_out = cpu_to_be32(*(sha1.state + i));
234                 break;
235         case ICP_QAT_HW_AUTH_ALGO_SHA256:
236                 if (crypto_shash_export(shash, &sha256))
237                         return -EFAULT;
238                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
239                         *hash_state_out = cpu_to_be32(*(sha256.state + i));
240                 break;
241         case ICP_QAT_HW_AUTH_ALGO_SHA512:
242                 if (crypto_shash_export(shash, &sha512))
243                         return -EFAULT;
244                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
245                         *hash512_state_out = cpu_to_be64(*(sha512.state + i));
246                 break;
247         default:
248                 return -EFAULT;
249         }
250         memzero_explicit(ipad, block_size);
251         memzero_explicit(opad, block_size);
252         return 0;
253 }
254
255 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
256 {
257         header->hdr_flags =
258                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
259         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
260         header->comn_req_flags =
261                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
262                                             QAT_COMN_PTR_TYPE_SGL);
263         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
264                                   ICP_QAT_FW_LA_PARTIAL_NONE);
265         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
266                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
267         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
268                                 ICP_QAT_FW_LA_NO_PROTO);
269         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
270                                        ICP_QAT_FW_LA_NO_UPDATE_STATE);
271 }
272
273 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
274                                          int alg,
275                                          struct crypto_authenc_keys *keys,
276                                          int mode)
277 {
278         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
279         unsigned int digestsize = crypto_aead_authsize(aead_tfm);
280         struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
281         struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
282         struct icp_qat_hw_auth_algo_blk *hash =
283                 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
284                 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
285         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
286         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
287         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
288         void *ptr = &req_tmpl->cd_ctrl;
289         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
290         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
291
292         /* CD setup */
293         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
294         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
295         hash->sha.inner_setup.auth_config.config =
296                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
297                                              ctx->qat_hash_alg, digestsize);
298         hash->sha.inner_setup.auth_counter.counter =
299                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
300
301         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
302                 return -EFAULT;
303
304         /* Request setup */
305         qat_alg_init_common_hdr(header);
306         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
307         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
308                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
309         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
310                                    ICP_QAT_FW_LA_RET_AUTH_RES);
311         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
312                                    ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
313         cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
314         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
315
316         /* Cipher CD config setup */
317         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
318         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
319         cipher_cd_ctrl->cipher_cfg_offset = 0;
320         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
321         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
322         /* Auth CD config setup */
323         hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
324         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
325         hash_cd_ctrl->inner_res_sz = digestsize;
326         hash_cd_ctrl->final_sz = digestsize;
327
328         switch (ctx->qat_hash_alg) {
329         case ICP_QAT_HW_AUTH_ALGO_SHA1:
330                 hash_cd_ctrl->inner_state1_sz =
331                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
332                 hash_cd_ctrl->inner_state2_sz =
333                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
334                 break;
335         case ICP_QAT_HW_AUTH_ALGO_SHA256:
336                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
337                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
338                 break;
339         case ICP_QAT_HW_AUTH_ALGO_SHA512:
340                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
341                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
342                 break;
343         default:
344                 break;
345         }
346         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
347                         ((sizeof(struct icp_qat_hw_auth_setup) +
348                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
349         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
350         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
351         return 0;
352 }
353
354 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
355                                          int alg,
356                                          struct crypto_authenc_keys *keys,
357                                          int mode)
358 {
359         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
360         unsigned int digestsize = crypto_aead_authsize(aead_tfm);
361         struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
362         struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
363         struct icp_qat_hw_cipher_algo_blk *cipher =
364                 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
365                 sizeof(struct icp_qat_hw_auth_setup) +
366                 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
367         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
368         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
369         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
370         void *ptr = &req_tmpl->cd_ctrl;
371         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
372         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
373         struct icp_qat_fw_la_auth_req_params *auth_param =
374                 (struct icp_qat_fw_la_auth_req_params *)
375                 ((char *)&req_tmpl->serv_specif_rqpars +
376                 sizeof(struct icp_qat_fw_la_cipher_req_params));
377
378         /* CD setup */
379         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
380         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
381         hash->sha.inner_setup.auth_config.config =
382                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
383                                              ctx->qat_hash_alg,
384                                              digestsize);
385         hash->sha.inner_setup.auth_counter.counter =
386                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
387
388         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
389                 return -EFAULT;
390
391         /* Request setup */
392         qat_alg_init_common_hdr(header);
393         header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
394         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
395                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
396         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
397                                    ICP_QAT_FW_LA_NO_RET_AUTH_RES);
398         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
399                                    ICP_QAT_FW_LA_CMP_AUTH_RES);
400         cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
401         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
402
403         /* Cipher CD config setup */
404         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
405         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
406         cipher_cd_ctrl->cipher_cfg_offset =
407                 (sizeof(struct icp_qat_hw_auth_setup) +
408                  roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
409         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
410         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
411
412         /* Auth CD config setup */
413         hash_cd_ctrl->hash_cfg_offset = 0;
414         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
415         hash_cd_ctrl->inner_res_sz = digestsize;
416         hash_cd_ctrl->final_sz = digestsize;
417
418         switch (ctx->qat_hash_alg) {
419         case ICP_QAT_HW_AUTH_ALGO_SHA1:
420                 hash_cd_ctrl->inner_state1_sz =
421                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
422                 hash_cd_ctrl->inner_state2_sz =
423                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
424                 break;
425         case ICP_QAT_HW_AUTH_ALGO_SHA256:
426                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
427                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
428                 break;
429         case ICP_QAT_HW_AUTH_ALGO_SHA512:
430                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
431                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
432                 break;
433         default:
434                 break;
435         }
436
437         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
438                         ((sizeof(struct icp_qat_hw_auth_setup) +
439                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
440         auth_param->auth_res_sz = digestsize;
441         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
442         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
443         return 0;
444 }
445
446 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
447                                         struct icp_qat_fw_la_bulk_req *req,
448                                         struct icp_qat_hw_cipher_algo_blk *cd,
449                                         const uint8_t *key, unsigned int keylen)
450 {
451         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
452         struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
453         struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
454
455         memcpy(cd->aes.key, key, keylen);
456         qat_alg_init_common_hdr(header);
457         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
458         cd_pars->u.s.content_desc_params_sz =
459                                 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
460         /* Cipher CD config setup */
461         cd_ctrl->cipher_key_sz = keylen >> 3;
462         cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
463         cd_ctrl->cipher_cfg_offset = 0;
464         ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
465         ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
466 }
467
468 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
469                                         int alg, const uint8_t *key,
470                                         unsigned int keylen, int mode)
471 {
472         struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
473         struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
474         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
475
476         qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
477         cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
478         enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
479 }
480
481 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
482                                         int alg, const uint8_t *key,
483                                         unsigned int keylen, int mode)
484 {
485         struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
486         struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
487         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
488
489         qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
490         cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
491
492         if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
493                 dec_cd->aes.cipher_config.val =
494                                         QAT_AES_HW_CONFIG_DEC(alg, mode);
495         else
496                 dec_cd->aes.cipher_config.val =
497                                         QAT_AES_HW_CONFIG_ENC(alg, mode);
498 }
499
500 static int qat_alg_validate_key(int key_len, int *alg, int mode)
501 {
502         if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
503                 switch (key_len) {
504                 case AES_KEYSIZE_128:
505                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
506                         break;
507                 case AES_KEYSIZE_192:
508                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
509                         break;
510                 case AES_KEYSIZE_256:
511                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
512                         break;
513                 default:
514                         return -EINVAL;
515                 }
516         } else {
517                 switch (key_len) {
518                 case AES_KEYSIZE_128 << 1:
519                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
520                         break;
521                 case AES_KEYSIZE_256 << 1:
522                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
523                         break;
524                 default:
525                         return -EINVAL;
526                 }
527         }
528         return 0;
529 }
530
531 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
532                                       unsigned int keylen,  int mode)
533 {
534         struct crypto_authenc_keys keys;
535         int alg;
536
537         if (crypto_authenc_extractkeys(&keys, key, keylen))
538                 goto bad_key;
539
540         if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
541                 goto bad_key;
542
543         if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
544                 goto error;
545
546         if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
547                 goto error;
548
549         return 0;
550 bad_key:
551         crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
552         return -EINVAL;
553 error:
554         return -EFAULT;
555 }
556
557 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
558                                             const uint8_t *key,
559                                             unsigned int keylen,
560                                             int mode)
561 {
562         int alg;
563
564         if (qat_alg_validate_key(keylen, &alg, mode))
565                 goto bad_key;
566
567         qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
568         qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
569         return 0;
570 bad_key:
571         crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
572         return -EINVAL;
573 }
574
575 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
576                                unsigned int keylen)
577 {
578         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
579         struct device *dev;
580
581         if (ctx->enc_cd) {
582                 /* rekeying */
583                 dev = &GET_DEV(ctx->inst->accel_dev);
584                 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
585                 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
586                 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
587                 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
588         } else {
589                 /* new key */
590                 int node = get_current_node();
591                 struct qat_crypto_instance *inst =
592                                 qat_crypto_get_instance_node(node);
593                 if (!inst) {
594                         return -EINVAL;
595                 }
596
597                 dev = &GET_DEV(inst->accel_dev);
598                 ctx->inst = inst;
599                 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
600                                                   &ctx->enc_cd_paddr,
601                                                   GFP_ATOMIC);
602                 if (!ctx->enc_cd) {
603                         return -ENOMEM;
604                 }
605                 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
606                                                   &ctx->dec_cd_paddr,
607                                                   GFP_ATOMIC);
608                 if (!ctx->dec_cd) {
609                         goto out_free_enc;
610                 }
611         }
612         if (qat_alg_aead_init_sessions(tfm, key, keylen,
613                                        ICP_QAT_HW_CIPHER_CBC_MODE))
614                 goto out_free_all;
615
616         return 0;
617
618 out_free_all:
619         memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
620         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
621                           ctx->dec_cd, ctx->dec_cd_paddr);
622         ctx->dec_cd = NULL;
623 out_free_enc:
624         memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
625         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
626                           ctx->enc_cd, ctx->enc_cd_paddr);
627         ctx->enc_cd = NULL;
628         return -ENOMEM;
629 }
630
631 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
632                               struct qat_crypto_request *qat_req)
633 {
634         struct device *dev = &GET_DEV(inst->accel_dev);
635         struct qat_alg_buf_list *bl = qat_req->buf.bl;
636         struct qat_alg_buf_list *blout = qat_req->buf.blout;
637         dma_addr_t blp = qat_req->buf.blp;
638         dma_addr_t blpout = qat_req->buf.bloutp;
639         size_t sz = qat_req->buf.sz;
640         size_t sz_out = qat_req->buf.sz_out;
641         int i;
642
643         for (i = 0; i < bl->num_bufs; i++)
644                 dma_unmap_single(dev, bl->bufers[i].addr,
645                                  bl->bufers[i].len, DMA_BIDIRECTIONAL);
646
647         dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
648         kfree(bl);
649         if (blp != blpout) {
650                 /* If out of place operation dma unmap only data */
651                 int bufless = blout->num_bufs - blout->num_mapped_bufs;
652
653                 for (i = bufless; i < blout->num_bufs; i++) {
654                         dma_unmap_single(dev, blout->bufers[i].addr,
655                                          blout->bufers[i].len,
656                                          DMA_BIDIRECTIONAL);
657                 }
658                 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
659                 kfree(blout);
660         }
661 }
662
663 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
664                                struct scatterlist *sgl,
665                                struct scatterlist *sglout,
666                                struct qat_crypto_request *qat_req)
667 {
668         struct device *dev = &GET_DEV(inst->accel_dev);
669         int i, sg_nctr = 0;
670         int n = sg_nents(sgl);
671         struct qat_alg_buf_list *bufl;
672         struct qat_alg_buf_list *buflout = NULL;
673         dma_addr_t blp;
674         dma_addr_t bloutp = 0;
675         struct scatterlist *sg;
676         size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
677                         ((1 + n) * sizeof(struct qat_alg_buf));
678
679         if (unlikely(!n))
680                 return -EINVAL;
681
682         bufl = kzalloc_node(sz, GFP_ATOMIC,
683                             dev_to_node(&GET_DEV(inst->accel_dev)));
684         if (unlikely(!bufl))
685                 return -ENOMEM;
686
687         blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
688         if (unlikely(dma_mapping_error(dev, blp)))
689                 goto err_in;
690
691         for_each_sg(sgl, sg, n, i) {
692                 int y = sg_nctr;
693
694                 if (!sg->length)
695                         continue;
696
697                 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
698                                                       sg->length,
699                                                       DMA_BIDIRECTIONAL);
700                 bufl->bufers[y].len = sg->length;
701                 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
702                         goto err_in;
703                 sg_nctr++;
704         }
705         bufl->num_bufs = sg_nctr;
706         qat_req->buf.bl = bufl;
707         qat_req->buf.blp = blp;
708         qat_req->buf.sz = sz;
709         /* Handle out of place operation */
710         if (sgl != sglout) {
711                 struct qat_alg_buf *bufers;
712
713                 n = sg_nents(sglout);
714                 sz_out = sizeof(struct qat_alg_buf_list) +
715                         ((1 + n) * sizeof(struct qat_alg_buf));
716                 sg_nctr = 0;
717                 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
718                                        dev_to_node(&GET_DEV(inst->accel_dev)));
719                 if (unlikely(!buflout))
720                         goto err_in;
721                 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
722                 if (unlikely(dma_mapping_error(dev, bloutp)))
723                         goto err_out;
724                 bufers = buflout->bufers;
725                 for_each_sg(sglout, sg, n, i) {
726                         int y = sg_nctr;
727
728                         if (!sg->length)
729                                 continue;
730
731                         bufers[y].addr = dma_map_single(dev, sg_virt(sg),
732                                                         sg->length,
733                                                         DMA_BIDIRECTIONAL);
734                         if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
735                                 goto err_out;
736                         bufers[y].len = sg->length;
737                         sg_nctr++;
738                 }
739                 buflout->num_bufs = sg_nctr;
740                 buflout->num_mapped_bufs = sg_nctr;
741                 qat_req->buf.blout = buflout;
742                 qat_req->buf.bloutp = bloutp;
743                 qat_req->buf.sz_out = sz_out;
744         } else {
745                 /* Otherwise set the src and dst to the same address */
746                 qat_req->buf.bloutp = qat_req->buf.blp;
747                 qat_req->buf.sz_out = 0;
748         }
749         return 0;
750
751 err_out:
752         n = sg_nents(sglout);
753         for (i = 0; i < n; i++)
754                 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
755                         dma_unmap_single(dev, buflout->bufers[i].addr,
756                                          buflout->bufers[i].len,
757                                          DMA_BIDIRECTIONAL);
758         if (!dma_mapping_error(dev, bloutp))
759                 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
760         kfree(buflout);
761
762 err_in:
763         n = sg_nents(sgl);
764         for (i = 0; i < n; i++)
765                 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
766                         dma_unmap_single(dev, bufl->bufers[i].addr,
767                                          bufl->bufers[i].len,
768                                          DMA_BIDIRECTIONAL);
769
770         if (!dma_mapping_error(dev, blp))
771                 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
772         kfree(bufl);
773
774         dev_err(dev, "Failed to map buf for dma\n");
775         return -ENOMEM;
776 }
777
778 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
779                                   struct qat_crypto_request *qat_req)
780 {
781         struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
782         struct qat_crypto_instance *inst = ctx->inst;
783         struct aead_request *areq = qat_req->aead_req;
784         uint8_t stat_filed = qat_resp->comn_resp.comn_status;
785         int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
786
787         qat_alg_free_bufl(inst, qat_req);
788         if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
789                 res = -EBADMSG;
790         areq->base.complete(&areq->base, res);
791 }
792
793 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
794                                         struct qat_crypto_request *qat_req)
795 {
796         struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
797         struct qat_crypto_instance *inst = ctx->inst;
798         struct ablkcipher_request *areq = qat_req->ablkcipher_req;
799         uint8_t stat_filed = qat_resp->comn_resp.comn_status;
800         int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
801
802         qat_alg_free_bufl(inst, qat_req);
803         if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
804                 res = -EINVAL;
805         areq->base.complete(&areq->base, res);
806 }
807
808 void qat_alg_callback(void *resp)
809 {
810         struct icp_qat_fw_la_resp *qat_resp = resp;
811         struct qat_crypto_request *qat_req =
812                                 (void *)(__force long)qat_resp->opaque_data;
813
814         qat_req->cb(qat_resp, qat_req);
815 }
816
817 static int qat_alg_aead_dec(struct aead_request *areq)
818 {
819         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
820         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
821         struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
822         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
823         struct icp_qat_fw_la_cipher_req_params *cipher_param;
824         struct icp_qat_fw_la_auth_req_params *auth_param;
825         struct icp_qat_fw_la_bulk_req *msg;
826         int digst_size = crypto_aead_authsize(aead_tfm);
827         int ret, ctr = 0;
828         u32 cipher_len;
829
830         cipher_len = areq->cryptlen - digst_size;
831         if (cipher_len % AES_BLOCK_SIZE != 0)
832                 return -EINVAL;
833
834         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
835         if (unlikely(ret))
836                 return ret;
837
838         msg = &qat_req->req;
839         *msg = ctx->dec_fw_req;
840         qat_req->aead_ctx = ctx;
841         qat_req->aead_req = areq;
842         qat_req->cb = qat_aead_alg_callback;
843         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
844         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
845         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
846         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
847         cipher_param->cipher_length = cipher_len;
848         cipher_param->cipher_offset = areq->assoclen;
849         memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
850         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
851         auth_param->auth_off = 0;
852         auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
853         do {
854                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
855         } while (ret == -EAGAIN && ctr++ < 10);
856
857         if (ret == -EAGAIN) {
858                 qat_alg_free_bufl(ctx->inst, qat_req);
859                 return -EBUSY;
860         }
861         return -EINPROGRESS;
862 }
863
864 static int qat_alg_aead_enc(struct aead_request *areq)
865 {
866         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
867         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
868         struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
869         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
870         struct icp_qat_fw_la_cipher_req_params *cipher_param;
871         struct icp_qat_fw_la_auth_req_params *auth_param;
872         struct icp_qat_fw_la_bulk_req *msg;
873         uint8_t *iv = areq->iv;
874         int ret, ctr = 0;
875
876         if (areq->cryptlen % AES_BLOCK_SIZE != 0)
877                 return -EINVAL;
878
879         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
880         if (unlikely(ret))
881                 return ret;
882
883         msg = &qat_req->req;
884         *msg = ctx->enc_fw_req;
885         qat_req->aead_ctx = ctx;
886         qat_req->aead_req = areq;
887         qat_req->cb = qat_aead_alg_callback;
888         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
889         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
890         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
891         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
892         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
893
894         memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
895         cipher_param->cipher_length = areq->cryptlen;
896         cipher_param->cipher_offset = areq->assoclen;
897
898         auth_param->auth_off = 0;
899         auth_param->auth_len = areq->assoclen + areq->cryptlen;
900
901         do {
902                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
903         } while (ret == -EAGAIN && ctr++ < 10);
904
905         if (ret == -EAGAIN) {
906                 qat_alg_free_bufl(ctx->inst, qat_req);
907                 return -EBUSY;
908         }
909         return -EINPROGRESS;
910 }
911
912 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
913                                      const u8 *key, unsigned int keylen,
914                                      int mode)
915 {
916         struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
917         struct device *dev;
918
919         spin_lock(&ctx->lock);
920         if (ctx->enc_cd) {
921                 /* rekeying */
922                 dev = &GET_DEV(ctx->inst->accel_dev);
923                 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
924                 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
925                 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
926                 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
927         } else {
928                 /* new key */
929                 int node = get_current_node();
930                 struct qat_crypto_instance *inst =
931                                 qat_crypto_get_instance_node(node);
932                 if (!inst) {
933                         spin_unlock(&ctx->lock);
934                         return -EINVAL;
935                 }
936
937                 dev = &GET_DEV(inst->accel_dev);
938                 ctx->inst = inst;
939                 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
940                                                   &ctx->enc_cd_paddr,
941                                                   GFP_ATOMIC);
942                 if (!ctx->enc_cd) {
943                         spin_unlock(&ctx->lock);
944                         return -ENOMEM;
945                 }
946                 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
947                                                   &ctx->dec_cd_paddr,
948                                                   GFP_ATOMIC);
949                 if (!ctx->dec_cd) {
950                         spin_unlock(&ctx->lock);
951                         goto out_free_enc;
952                 }
953         }
954         spin_unlock(&ctx->lock);
955         if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
956                 goto out_free_all;
957
958         return 0;
959
960 out_free_all:
961         memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
962         dma_free_coherent(dev, sizeof(*ctx->dec_cd),
963                           ctx->dec_cd, ctx->dec_cd_paddr);
964         ctx->dec_cd = NULL;
965 out_free_enc:
966         memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
967         dma_free_coherent(dev, sizeof(*ctx->enc_cd),
968                           ctx->enc_cd, ctx->enc_cd_paddr);
969         ctx->enc_cd = NULL;
970         return -ENOMEM;
971 }
972
973 static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
974                                          const u8 *key, unsigned int keylen)
975 {
976         return qat_alg_ablkcipher_setkey(tfm, key, keylen,
977                                          ICP_QAT_HW_CIPHER_CBC_MODE);
978 }
979
980 static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
981                                          const u8 *key, unsigned int keylen)
982 {
983         return qat_alg_ablkcipher_setkey(tfm, key, keylen,
984                                          ICP_QAT_HW_CIPHER_CTR_MODE);
985 }
986
987 static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
988                                          const u8 *key, unsigned int keylen)
989 {
990         return qat_alg_ablkcipher_setkey(tfm, key, keylen,
991                                          ICP_QAT_HW_CIPHER_XTS_MODE);
992 }
993
994 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
995 {
996         struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
997         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
998         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
999         struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1000         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1001         struct icp_qat_fw_la_bulk_req *msg;
1002         int ret, ctr = 0;
1003
1004         ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1005         if (unlikely(ret))
1006                 return ret;
1007
1008         msg = &qat_req->req;
1009         *msg = ctx->enc_fw_req;
1010         qat_req->ablkcipher_ctx = ctx;
1011         qat_req->ablkcipher_req = req;
1012         qat_req->cb = qat_ablkcipher_alg_callback;
1013         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1014         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1015         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1016         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1017         cipher_param->cipher_length = req->nbytes;
1018         cipher_param->cipher_offset = 0;
1019         memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1020         do {
1021                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1022         } while (ret == -EAGAIN && ctr++ < 10);
1023
1024         if (ret == -EAGAIN) {
1025                 qat_alg_free_bufl(ctx->inst, qat_req);
1026                 return -EBUSY;
1027         }
1028         return -EINPROGRESS;
1029 }
1030
1031 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1032 {
1033         struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1034         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1035         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1036         struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1037         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1038         struct icp_qat_fw_la_bulk_req *msg;
1039         int ret, ctr = 0;
1040
1041         ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1042         if (unlikely(ret))
1043                 return ret;
1044
1045         msg = &qat_req->req;
1046         *msg = ctx->dec_fw_req;
1047         qat_req->ablkcipher_ctx = ctx;
1048         qat_req->ablkcipher_req = req;
1049         qat_req->cb = qat_ablkcipher_alg_callback;
1050         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1051         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1052         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1053         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1054         cipher_param->cipher_length = req->nbytes;
1055         cipher_param->cipher_offset = 0;
1056         memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1057         do {
1058                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1059         } while (ret == -EAGAIN && ctr++ < 10);
1060
1061         if (ret == -EAGAIN) {
1062                 qat_alg_free_bufl(ctx->inst, qat_req);
1063                 return -EBUSY;
1064         }
1065         return -EINPROGRESS;
1066 }
1067
1068 static int qat_alg_aead_init(struct crypto_aead *tfm,
1069                              enum icp_qat_hw_auth_algo hash,
1070                              const char *hash_name)
1071 {
1072         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1073
1074         ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1075         if (IS_ERR(ctx->hash_tfm))
1076                 return PTR_ERR(ctx->hash_tfm);
1077         ctx->qat_hash_alg = hash;
1078         crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1079         return 0;
1080 }
1081
1082 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1083 {
1084         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1085 }
1086
1087 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1088 {
1089         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1090 }
1091
1092 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1093 {
1094         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1095 }
1096
1097 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1098 {
1099         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1100         struct qat_crypto_instance *inst = ctx->inst;
1101         struct device *dev;
1102
1103         crypto_free_shash(ctx->hash_tfm);
1104
1105         if (!inst)
1106                 return;
1107
1108         dev = &GET_DEV(inst->accel_dev);
1109         if (ctx->enc_cd) {
1110                 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1111                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1112                                   ctx->enc_cd, ctx->enc_cd_paddr);
1113         }
1114         if (ctx->dec_cd) {
1115                 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1116                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1117                                   ctx->dec_cd, ctx->dec_cd_paddr);
1118         }
1119         qat_crypto_put_instance(inst);
1120 }
1121
1122 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1123 {
1124         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1125
1126         spin_lock_init(&ctx->lock);
1127         tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
1128         ctx->tfm = tfm;
1129         return 0;
1130 }
1131
1132 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1133 {
1134         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1135         struct qat_crypto_instance *inst = ctx->inst;
1136         struct device *dev;
1137
1138         if (!inst)
1139                 return;
1140
1141         dev = &GET_DEV(inst->accel_dev);
1142         if (ctx->enc_cd) {
1143                 memset(ctx->enc_cd, 0,
1144                        sizeof(struct icp_qat_hw_cipher_algo_blk));
1145                 dma_free_coherent(dev,
1146                                   sizeof(struct icp_qat_hw_cipher_algo_blk),
1147                                   ctx->enc_cd, ctx->enc_cd_paddr);
1148         }
1149         if (ctx->dec_cd) {
1150                 memset(ctx->dec_cd, 0,
1151                        sizeof(struct icp_qat_hw_cipher_algo_blk));
1152                 dma_free_coherent(dev,
1153                                   sizeof(struct icp_qat_hw_cipher_algo_blk),
1154                                   ctx->dec_cd, ctx->dec_cd_paddr);
1155         }
1156         qat_crypto_put_instance(inst);
1157 }
1158
1159
1160 static struct aead_alg qat_aeads[] = { {
1161         .base = {
1162                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1163                 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1164                 .cra_priority = 4001,
1165                 .cra_flags = CRYPTO_ALG_ASYNC,
1166                 .cra_blocksize = AES_BLOCK_SIZE,
1167                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1168                 .cra_module = THIS_MODULE,
1169         },
1170         .init = qat_alg_aead_sha1_init,
1171         .exit = qat_alg_aead_exit,
1172         .setkey = qat_alg_aead_setkey,
1173         .decrypt = qat_alg_aead_dec,
1174         .encrypt = qat_alg_aead_enc,
1175         .ivsize = AES_BLOCK_SIZE,
1176         .maxauthsize = SHA1_DIGEST_SIZE,
1177 }, {
1178         .base = {
1179                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1180                 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1181                 .cra_priority = 4001,
1182                 .cra_flags = CRYPTO_ALG_ASYNC,
1183                 .cra_blocksize = AES_BLOCK_SIZE,
1184                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1185                 .cra_module = THIS_MODULE,
1186         },
1187         .init = qat_alg_aead_sha256_init,
1188         .exit = qat_alg_aead_exit,
1189         .setkey = qat_alg_aead_setkey,
1190         .decrypt = qat_alg_aead_dec,
1191         .encrypt = qat_alg_aead_enc,
1192         .ivsize = AES_BLOCK_SIZE,
1193         .maxauthsize = SHA256_DIGEST_SIZE,
1194 }, {
1195         .base = {
1196                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1197                 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1198                 .cra_priority = 4001,
1199                 .cra_flags = CRYPTO_ALG_ASYNC,
1200                 .cra_blocksize = AES_BLOCK_SIZE,
1201                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1202                 .cra_module = THIS_MODULE,
1203         },
1204         .init = qat_alg_aead_sha512_init,
1205         .exit = qat_alg_aead_exit,
1206         .setkey = qat_alg_aead_setkey,
1207         .decrypt = qat_alg_aead_dec,
1208         .encrypt = qat_alg_aead_enc,
1209         .ivsize = AES_BLOCK_SIZE,
1210         .maxauthsize = SHA512_DIGEST_SIZE,
1211 } };
1212
1213 static struct crypto_alg qat_algs[] = { {
1214         .cra_name = "cbc(aes)",
1215         .cra_driver_name = "qat_aes_cbc",
1216         .cra_priority = 4001,
1217         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1218         .cra_blocksize = AES_BLOCK_SIZE,
1219         .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1220         .cra_alignmask = 0,
1221         .cra_type = &crypto_ablkcipher_type,
1222         .cra_module = THIS_MODULE,
1223         .cra_init = qat_alg_ablkcipher_init,
1224         .cra_exit = qat_alg_ablkcipher_exit,
1225         .cra_u = {
1226                 .ablkcipher = {
1227                         .setkey = qat_alg_ablkcipher_cbc_setkey,
1228                         .decrypt = qat_alg_ablkcipher_decrypt,
1229                         .encrypt = qat_alg_ablkcipher_encrypt,
1230                         .min_keysize = AES_MIN_KEY_SIZE,
1231                         .max_keysize = AES_MAX_KEY_SIZE,
1232                         .ivsize = AES_BLOCK_SIZE,
1233                 },
1234         },
1235 }, {
1236         .cra_name = "ctr(aes)",
1237         .cra_driver_name = "qat_aes_ctr",
1238         .cra_priority = 4001,
1239         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1240         .cra_blocksize = AES_BLOCK_SIZE,
1241         .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1242         .cra_alignmask = 0,
1243         .cra_type = &crypto_ablkcipher_type,
1244         .cra_module = THIS_MODULE,
1245         .cra_init = qat_alg_ablkcipher_init,
1246         .cra_exit = qat_alg_ablkcipher_exit,
1247         .cra_u = {
1248                 .ablkcipher = {
1249                         .setkey = qat_alg_ablkcipher_ctr_setkey,
1250                         .decrypt = qat_alg_ablkcipher_decrypt,
1251                         .encrypt = qat_alg_ablkcipher_encrypt,
1252                         .min_keysize = AES_MIN_KEY_SIZE,
1253                         .max_keysize = AES_MAX_KEY_SIZE,
1254                         .ivsize = AES_BLOCK_SIZE,
1255                 },
1256         },
1257 }, {
1258         .cra_name = "xts(aes)",
1259         .cra_driver_name = "qat_aes_xts",
1260         .cra_priority = 4001,
1261         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1262         .cra_blocksize = AES_BLOCK_SIZE,
1263         .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1264         .cra_alignmask = 0,
1265         .cra_type = &crypto_ablkcipher_type,
1266         .cra_module = THIS_MODULE,
1267         .cra_init = qat_alg_ablkcipher_init,
1268         .cra_exit = qat_alg_ablkcipher_exit,
1269         .cra_u = {
1270                 .ablkcipher = {
1271                         .setkey = qat_alg_ablkcipher_xts_setkey,
1272                         .decrypt = qat_alg_ablkcipher_decrypt,
1273                         .encrypt = qat_alg_ablkcipher_encrypt,
1274                         .min_keysize = 2 * AES_MIN_KEY_SIZE,
1275                         .max_keysize = 2 * AES_MAX_KEY_SIZE,
1276                         .ivsize = AES_BLOCK_SIZE,
1277                 },
1278         },
1279 } };
1280
1281 int qat_algs_register(void)
1282 {
1283         int ret = 0, i;
1284
1285         mutex_lock(&algs_lock);
1286         if (++active_devs != 1)
1287                 goto unlock;
1288
1289         for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1290                 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1291
1292         ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1293         if (ret)
1294                 goto unlock;
1295
1296         for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
1297                 qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
1298
1299         ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1300         if (ret)
1301                 goto unreg_algs;
1302
1303 unlock:
1304         mutex_unlock(&algs_lock);
1305         return ret;
1306
1307 unreg_algs:
1308         crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1309         goto unlock;
1310 }
1311
1312 void qat_algs_unregister(void)
1313 {
1314         mutex_lock(&algs_lock);
1315         if (--active_devs != 0)
1316                 goto unlock;
1317
1318         crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1319         crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1320
1321 unlock:
1322         mutex_unlock(&algs_lock);
1323 }