GNU Linux-libre 4.4.284-gnu1
[releases.git] / drivers / crypto / qat / qat_common / qat_algs.c
1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15
16   Contact Information:
17   qat-linux@intel.com
18
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <linux/dma-mapping.h>
57 #include "adf_accel_devices.h"
58 #include "adf_transport.h"
59 #include "adf_common_drv.h"
60 #include "qat_crypto.h"
61 #include "icp_qat_hw.h"
62 #include "icp_qat_fw.h"
63 #include "icp_qat_fw_la.h"
64
65 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
66         ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
67                                        ICP_QAT_HW_CIPHER_NO_CONVERT, \
68                                        ICP_QAT_HW_CIPHER_ENCRYPT)
69
70 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
71         ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
72                                        ICP_QAT_HW_CIPHER_KEY_CONVERT, \
73                                        ICP_QAT_HW_CIPHER_DECRYPT)
74
75 static DEFINE_MUTEX(algs_lock);
76 static unsigned int active_devs;
77
78 struct qat_alg_buf {
79         uint32_t len;
80         uint32_t resrvd;
81         uint64_t addr;
82 } __packed;
83
84 struct qat_alg_buf_list {
85         uint64_t resrvd;
86         uint32_t num_bufs;
87         uint32_t num_mapped_bufs;
88         struct qat_alg_buf bufers[];
89 } __packed __aligned(64);
90
91 /* Common content descriptor */
92 struct qat_alg_cd {
93         union {
94                 struct qat_enc { /* Encrypt content desc */
95                         struct icp_qat_hw_cipher_algo_blk cipher;
96                         struct icp_qat_hw_auth_algo_blk hash;
97                 } qat_enc_cd;
98                 struct qat_dec { /* Decrytp content desc */
99                         struct icp_qat_hw_auth_algo_blk hash;
100                         struct icp_qat_hw_cipher_algo_blk cipher;
101                 } qat_dec_cd;
102         };
103 } __aligned(64);
104
105 struct qat_alg_aead_ctx {
106         struct qat_alg_cd *enc_cd;
107         struct qat_alg_cd *dec_cd;
108         dma_addr_t enc_cd_paddr;
109         dma_addr_t dec_cd_paddr;
110         struct icp_qat_fw_la_bulk_req enc_fw_req;
111         struct icp_qat_fw_la_bulk_req dec_fw_req;
112         struct crypto_shash *hash_tfm;
113         enum icp_qat_hw_auth_algo qat_hash_alg;
114         struct qat_crypto_instance *inst;
115 };
116
117 struct qat_alg_ablkcipher_ctx {
118         struct icp_qat_hw_cipher_algo_blk *enc_cd;
119         struct icp_qat_hw_cipher_algo_blk *dec_cd;
120         dma_addr_t enc_cd_paddr;
121         dma_addr_t dec_cd_paddr;
122         struct icp_qat_fw_la_bulk_req enc_fw_req;
123         struct icp_qat_fw_la_bulk_req dec_fw_req;
124         struct qat_crypto_instance *inst;
125         struct crypto_tfm *tfm;
126         spinlock_t lock;        /* protects qat_alg_ablkcipher_ctx struct */
127 };
128
129 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
130 {
131         switch (qat_hash_alg) {
132         case ICP_QAT_HW_AUTH_ALGO_SHA1:
133                 return ICP_QAT_HW_SHA1_STATE1_SZ;
134         case ICP_QAT_HW_AUTH_ALGO_SHA256:
135                 return ICP_QAT_HW_SHA256_STATE1_SZ;
136         case ICP_QAT_HW_AUTH_ALGO_SHA512:
137                 return ICP_QAT_HW_SHA512_STATE1_SZ;
138         default:
139                 return -EFAULT;
140         };
141         return -EFAULT;
142 }
143
144 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
145                                   struct qat_alg_aead_ctx *ctx,
146                                   const uint8_t *auth_key,
147                                   unsigned int auth_keylen)
148 {
149         SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
150         struct sha1_state sha1;
151         struct sha256_state sha256;
152         struct sha512_state sha512;
153         int block_size = crypto_shash_blocksize(ctx->hash_tfm);
154         int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
155         char ipad[block_size];
156         char opad[block_size];
157         __be32 *hash_state_out;
158         __be64 *hash512_state_out;
159         int i, offset;
160
161         memset(ipad, 0, block_size);
162         memset(opad, 0, block_size);
163         shash->tfm = ctx->hash_tfm;
164         shash->flags = 0x0;
165
166         if (auth_keylen > block_size) {
167                 int ret = crypto_shash_digest(shash, auth_key,
168                                               auth_keylen, ipad);
169                 if (ret)
170                         return ret;
171
172                 memcpy(opad, ipad, digest_size);
173         } else {
174                 memcpy(ipad, auth_key, auth_keylen);
175                 memcpy(opad, auth_key, auth_keylen);
176         }
177
178         for (i = 0; i < block_size; i++) {
179                 char *ipad_ptr = ipad + i;
180                 char *opad_ptr = opad + i;
181                 *ipad_ptr ^= 0x36;
182                 *opad_ptr ^= 0x5C;
183         }
184
185         if (crypto_shash_init(shash))
186                 return -EFAULT;
187
188         if (crypto_shash_update(shash, ipad, block_size))
189                 return -EFAULT;
190
191         hash_state_out = (__be32 *)hash->sha.state1;
192         hash512_state_out = (__be64 *)hash_state_out;
193
194         switch (ctx->qat_hash_alg) {
195         case ICP_QAT_HW_AUTH_ALGO_SHA1:
196                 if (crypto_shash_export(shash, &sha1))
197                         return -EFAULT;
198                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
199                         *hash_state_out = cpu_to_be32(*(sha1.state + i));
200                 break;
201         case ICP_QAT_HW_AUTH_ALGO_SHA256:
202                 if (crypto_shash_export(shash, &sha256))
203                         return -EFAULT;
204                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
205                         *hash_state_out = cpu_to_be32(*(sha256.state + i));
206                 break;
207         case ICP_QAT_HW_AUTH_ALGO_SHA512:
208                 if (crypto_shash_export(shash, &sha512))
209                         return -EFAULT;
210                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
211                         *hash512_state_out = cpu_to_be64(*(sha512.state + i));
212                 break;
213         default:
214                 return -EFAULT;
215         }
216
217         if (crypto_shash_init(shash))
218                 return -EFAULT;
219
220         if (crypto_shash_update(shash, opad, block_size))
221                 return -EFAULT;
222
223         offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
224         hash_state_out = (__be32 *)(hash->sha.state1 + offset);
225         hash512_state_out = (__be64 *)hash_state_out;
226
227         switch (ctx->qat_hash_alg) {
228         case ICP_QAT_HW_AUTH_ALGO_SHA1:
229                 if (crypto_shash_export(shash, &sha1))
230                         return -EFAULT;
231                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
232                         *hash_state_out = cpu_to_be32(*(sha1.state + i));
233                 break;
234         case ICP_QAT_HW_AUTH_ALGO_SHA256:
235                 if (crypto_shash_export(shash, &sha256))
236                         return -EFAULT;
237                 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
238                         *hash_state_out = cpu_to_be32(*(sha256.state + i));
239                 break;
240         case ICP_QAT_HW_AUTH_ALGO_SHA512:
241                 if (crypto_shash_export(shash, &sha512))
242                         return -EFAULT;
243                 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
244                         *hash512_state_out = cpu_to_be64(*(sha512.state + i));
245                 break;
246         default:
247                 return -EFAULT;
248         }
249         memzero_explicit(ipad, block_size);
250         memzero_explicit(opad, block_size);
251         return 0;
252 }
253
254 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
255 {
256         header->hdr_flags =
257                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
258         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
259         header->comn_req_flags =
260                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
261                                             QAT_COMN_PTR_TYPE_SGL);
262         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
263                                   ICP_QAT_FW_LA_PARTIAL_NONE);
264         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
265                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
266         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
267                                 ICP_QAT_FW_LA_NO_PROTO);
268         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
269                                        ICP_QAT_FW_LA_NO_UPDATE_STATE);
270 }
271
272 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
273                                          int alg,
274                                          struct crypto_authenc_keys *keys,
275                                          int mode)
276 {
277         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
278         unsigned int digestsize = crypto_aead_authsize(aead_tfm);
279         struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
280         struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
281         struct icp_qat_hw_auth_algo_blk *hash =
282                 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
283                 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
284         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
285         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
286         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
287         void *ptr = &req_tmpl->cd_ctrl;
288         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
289         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
290
291         /* CD setup */
292         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
293         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
294         hash->sha.inner_setup.auth_config.config =
295                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
296                                              ctx->qat_hash_alg, digestsize);
297         hash->sha.inner_setup.auth_counter.counter =
298                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
299
300         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
301                 return -EFAULT;
302
303         /* Request setup */
304         qat_alg_init_common_hdr(header);
305         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
306         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
307                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
308         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
309                                    ICP_QAT_FW_LA_RET_AUTH_RES);
310         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
311                                    ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
312         cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
313         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
314
315         /* Cipher CD config setup */
316         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
317         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
318         cipher_cd_ctrl->cipher_cfg_offset = 0;
319         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
320         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
321         /* Auth CD config setup */
322         hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
323         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
324         hash_cd_ctrl->inner_res_sz = digestsize;
325         hash_cd_ctrl->final_sz = digestsize;
326
327         switch (ctx->qat_hash_alg) {
328         case ICP_QAT_HW_AUTH_ALGO_SHA1:
329                 hash_cd_ctrl->inner_state1_sz =
330                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
331                 hash_cd_ctrl->inner_state2_sz =
332                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
333                 break;
334         case ICP_QAT_HW_AUTH_ALGO_SHA256:
335                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
336                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
337                 break;
338         case ICP_QAT_HW_AUTH_ALGO_SHA512:
339                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
340                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
341                 break;
342         default:
343                 break;
344         }
345         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
346                         ((sizeof(struct icp_qat_hw_auth_setup) +
347                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
348         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
349         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
350         return 0;
351 }
352
353 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
354                                          int alg,
355                                          struct crypto_authenc_keys *keys,
356                                          int mode)
357 {
358         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
359         unsigned int digestsize = crypto_aead_authsize(aead_tfm);
360         struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
361         struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
362         struct icp_qat_hw_cipher_algo_blk *cipher =
363                 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
364                 sizeof(struct icp_qat_hw_auth_setup) +
365                 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
366         struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
367         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
368         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
369         void *ptr = &req_tmpl->cd_ctrl;
370         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
371         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
372         struct icp_qat_fw_la_auth_req_params *auth_param =
373                 (struct icp_qat_fw_la_auth_req_params *)
374                 ((char *)&req_tmpl->serv_specif_rqpars +
375                 sizeof(struct icp_qat_fw_la_cipher_req_params));
376
377         /* CD setup */
378         cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
379         memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
380         hash->sha.inner_setup.auth_config.config =
381                 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
382                                              ctx->qat_hash_alg,
383                                              digestsize);
384         hash->sha.inner_setup.auth_counter.counter =
385                 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
386
387         if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
388                 return -EFAULT;
389
390         /* Request setup */
391         qat_alg_init_common_hdr(header);
392         header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
393         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
394                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
395         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
396                                    ICP_QAT_FW_LA_NO_RET_AUTH_RES);
397         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
398                                    ICP_QAT_FW_LA_CMP_AUTH_RES);
399         cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
400         cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
401
402         /* Cipher CD config setup */
403         cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
404         cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
405         cipher_cd_ctrl->cipher_cfg_offset =
406                 (sizeof(struct icp_qat_hw_auth_setup) +
407                  roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
408         ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
409         ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
410
411         /* Auth CD config setup */
412         hash_cd_ctrl->hash_cfg_offset = 0;
413         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
414         hash_cd_ctrl->inner_res_sz = digestsize;
415         hash_cd_ctrl->final_sz = digestsize;
416
417         switch (ctx->qat_hash_alg) {
418         case ICP_QAT_HW_AUTH_ALGO_SHA1:
419                 hash_cd_ctrl->inner_state1_sz =
420                         round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
421                 hash_cd_ctrl->inner_state2_sz =
422                         round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
423                 break;
424         case ICP_QAT_HW_AUTH_ALGO_SHA256:
425                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
426                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
427                 break;
428         case ICP_QAT_HW_AUTH_ALGO_SHA512:
429                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
430                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
431                 break;
432         default:
433                 break;
434         }
435
436         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
437                         ((sizeof(struct icp_qat_hw_auth_setup) +
438                          round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
439         auth_param->auth_res_sz = digestsize;
440         ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
441         ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
442         return 0;
443 }
444
445 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
446                                         struct icp_qat_fw_la_bulk_req *req,
447                                         struct icp_qat_hw_cipher_algo_blk *cd,
448                                         const uint8_t *key, unsigned int keylen)
449 {
450         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
451         struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
452         struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
453
454         memcpy(cd->aes.key, key, keylen);
455         qat_alg_init_common_hdr(header);
456         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
457         cd_pars->u.s.content_desc_params_sz =
458                                 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
459         /* Cipher CD config setup */
460         cd_ctrl->cipher_key_sz = keylen >> 3;
461         cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
462         cd_ctrl->cipher_cfg_offset = 0;
463         ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
464         ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
465 }
466
467 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
468                                         int alg, const uint8_t *key,
469                                         unsigned int keylen, int mode)
470 {
471         struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
472         struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
473         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
474
475         qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
476         cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
477         enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
478 }
479
480 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
481                                         int alg, const uint8_t *key,
482                                         unsigned int keylen, int mode)
483 {
484         struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
485         struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
486         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
487
488         qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
489         cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
490
491         if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
492                 dec_cd->aes.cipher_config.val =
493                                         QAT_AES_HW_CONFIG_DEC(alg, mode);
494         else
495                 dec_cd->aes.cipher_config.val =
496                                         QAT_AES_HW_CONFIG_ENC(alg, mode);
497 }
498
499 static int qat_alg_validate_key(int key_len, int *alg, int mode)
500 {
501         if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
502                 switch (key_len) {
503                 case AES_KEYSIZE_128:
504                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
505                         break;
506                 case AES_KEYSIZE_192:
507                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
508                         break;
509                 case AES_KEYSIZE_256:
510                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
511                         break;
512                 default:
513                         return -EINVAL;
514                 }
515         } else {
516                 switch (key_len) {
517                 case AES_KEYSIZE_128 << 1:
518                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
519                         break;
520                 case AES_KEYSIZE_256 << 1:
521                         *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
522                         break;
523                 default:
524                         return -EINVAL;
525                 }
526         }
527         return 0;
528 }
529
530 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
531                                       unsigned int keylen,  int mode)
532 {
533         struct crypto_authenc_keys keys;
534         int alg;
535
536         if (crypto_authenc_extractkeys(&keys, key, keylen))
537                 goto bad_key;
538
539         if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
540                 goto bad_key;
541
542         if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
543                 goto error;
544
545         if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
546                 goto error;
547
548         return 0;
549 bad_key:
550         crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
551         return -EINVAL;
552 error:
553         return -EFAULT;
554 }
555
556 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
557                                             const uint8_t *key,
558                                             unsigned int keylen,
559                                             int mode)
560 {
561         int alg;
562
563         if (qat_alg_validate_key(keylen, &alg, mode))
564                 goto bad_key;
565
566         qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
567         qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
568         return 0;
569 bad_key:
570         crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
571         return -EINVAL;
572 }
573
574 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
575                                unsigned int keylen)
576 {
577         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
578         struct device *dev;
579
580         if (ctx->enc_cd) {
581                 /* rekeying */
582                 dev = &GET_DEV(ctx->inst->accel_dev);
583                 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
584                 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
585                 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
586                 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
587         } else {
588                 /* new key */
589                 int node = get_current_node();
590                 struct qat_crypto_instance *inst =
591                                 qat_crypto_get_instance_node(node);
592                 if (!inst) {
593                         return -EINVAL;
594                 }
595
596                 dev = &GET_DEV(inst->accel_dev);
597                 ctx->inst = inst;
598                 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
599                                                   &ctx->enc_cd_paddr,
600                                                   GFP_ATOMIC);
601                 if (!ctx->enc_cd) {
602                         return -ENOMEM;
603                 }
604                 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
605                                                   &ctx->dec_cd_paddr,
606                                                   GFP_ATOMIC);
607                 if (!ctx->dec_cd) {
608                         goto out_free_enc;
609                 }
610         }
611         if (qat_alg_aead_init_sessions(tfm, key, keylen,
612                                        ICP_QAT_HW_CIPHER_CBC_MODE))
613                 goto out_free_all;
614
615         return 0;
616
617 out_free_all:
618         memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
619         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
620                           ctx->dec_cd, ctx->dec_cd_paddr);
621         ctx->dec_cd = NULL;
622 out_free_enc:
623         memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
624         dma_free_coherent(dev, sizeof(struct qat_alg_cd),
625                           ctx->enc_cd, ctx->enc_cd_paddr);
626         ctx->enc_cd = NULL;
627         return -ENOMEM;
628 }
629
630 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
631                               struct qat_crypto_request *qat_req)
632 {
633         struct device *dev = &GET_DEV(inst->accel_dev);
634         struct qat_alg_buf_list *bl = qat_req->buf.bl;
635         struct qat_alg_buf_list *blout = qat_req->buf.blout;
636         dma_addr_t blp = qat_req->buf.blp;
637         dma_addr_t blpout = qat_req->buf.bloutp;
638         size_t sz = qat_req->buf.sz;
639         size_t sz_out = qat_req->buf.sz_out;
640         int i;
641
642         for (i = 0; i < bl->num_bufs; i++)
643                 dma_unmap_single(dev, bl->bufers[i].addr,
644                                  bl->bufers[i].len, DMA_BIDIRECTIONAL);
645
646         dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
647         kfree(bl);
648         if (blp != blpout) {
649                 /* If out of place operation dma unmap only data */
650                 int bufless = blout->num_bufs - blout->num_mapped_bufs;
651
652                 for (i = bufless; i < blout->num_bufs; i++) {
653                         dma_unmap_single(dev, blout->bufers[i].addr,
654                                          blout->bufers[i].len,
655                                          DMA_BIDIRECTIONAL);
656                 }
657                 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
658                 kfree(blout);
659         }
660 }
661
662 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
663                                struct scatterlist *sgl,
664                                struct scatterlist *sglout,
665                                struct qat_crypto_request *qat_req)
666 {
667         struct device *dev = &GET_DEV(inst->accel_dev);
668         int i, sg_nctr = 0;
669         int n = sg_nents(sgl);
670         struct qat_alg_buf_list *bufl;
671         struct qat_alg_buf_list *buflout = NULL;
672         dma_addr_t blp;
673         dma_addr_t bloutp = 0;
674         struct scatterlist *sg;
675         size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
676                         ((1 + n) * sizeof(struct qat_alg_buf));
677
678         if (unlikely(!n))
679                 return -EINVAL;
680
681         bufl = kzalloc_node(sz, GFP_ATOMIC,
682                             dev_to_node(&GET_DEV(inst->accel_dev)));
683         if (unlikely(!bufl))
684                 return -ENOMEM;
685
686         blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
687         if (unlikely(dma_mapping_error(dev, blp)))
688                 goto err;
689
690         for_each_sg(sgl, sg, n, i) {
691                 int y = sg_nctr;
692
693                 if (!sg->length)
694                         continue;
695
696                 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
697                                                       sg->length,
698                                                       DMA_BIDIRECTIONAL);
699                 bufl->bufers[y].len = sg->length;
700                 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
701                         goto err;
702                 sg_nctr++;
703         }
704         bufl->num_bufs = sg_nctr;
705         qat_req->buf.bl = bufl;
706         qat_req->buf.blp = blp;
707         qat_req->buf.sz = sz;
708         /* Handle out of place operation */
709         if (sgl != sglout) {
710                 struct qat_alg_buf *bufers;
711
712                 n = sg_nents(sglout);
713                 sz_out = sizeof(struct qat_alg_buf_list) +
714                         ((1 + n) * sizeof(struct qat_alg_buf));
715                 sg_nctr = 0;
716                 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
717                                        dev_to_node(&GET_DEV(inst->accel_dev)));
718                 if (unlikely(!buflout))
719                         goto err;
720                 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
721                 if (unlikely(dma_mapping_error(dev, bloutp)))
722                         goto err;
723                 bufers = buflout->bufers;
724                 for_each_sg(sglout, sg, n, i) {
725                         int y = sg_nctr;
726
727                         if (!sg->length)
728                                 continue;
729
730                         bufers[y].addr = dma_map_single(dev, sg_virt(sg),
731                                                         sg->length,
732                                                         DMA_BIDIRECTIONAL);
733                         if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
734                                 goto err;
735                         bufers[y].len = sg->length;
736                         sg_nctr++;
737                 }
738                 buflout->num_bufs = sg_nctr;
739                 buflout->num_mapped_bufs = sg_nctr;
740                 qat_req->buf.blout = buflout;
741                 qat_req->buf.bloutp = bloutp;
742                 qat_req->buf.sz_out = sz_out;
743         } else {
744                 /* Otherwise set the src and dst to the same address */
745                 qat_req->buf.bloutp = qat_req->buf.blp;
746                 qat_req->buf.sz_out = 0;
747         }
748         return 0;
749 err:
750         dev_err(dev, "Failed to map buf for dma\n");
751         sg_nctr = 0;
752         for (i = 0; i < n; i++)
753                 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
754                         dma_unmap_single(dev, bufl->bufers[i].addr,
755                                          bufl->bufers[i].len,
756                                          DMA_BIDIRECTIONAL);
757
758         if (!dma_mapping_error(dev, blp))
759                 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
760         kfree(bufl);
761         if (sgl != sglout && buflout) {
762                 n = sg_nents(sglout);
763                 for (i = 0; i < n; i++)
764                         if (!dma_mapping_error(dev, buflout->bufers[i].addr))
765                                 dma_unmap_single(dev, buflout->bufers[i].addr,
766                                                  buflout->bufers[i].len,
767                                                  DMA_BIDIRECTIONAL);
768                 if (!dma_mapping_error(dev, bloutp))
769                         dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
770                 kfree(buflout);
771         }
772         return -ENOMEM;
773 }
774
775 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
776                                   struct qat_crypto_request *qat_req)
777 {
778         struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
779         struct qat_crypto_instance *inst = ctx->inst;
780         struct aead_request *areq = qat_req->aead_req;
781         uint8_t stat_filed = qat_resp->comn_resp.comn_status;
782         int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
783
784         qat_alg_free_bufl(inst, qat_req);
785         if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
786                 res = -EBADMSG;
787         areq->base.complete(&areq->base, res);
788 }
789
790 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
791                                         struct qat_crypto_request *qat_req)
792 {
793         struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
794         struct qat_crypto_instance *inst = ctx->inst;
795         struct ablkcipher_request *areq = qat_req->ablkcipher_req;
796         uint8_t stat_filed = qat_resp->comn_resp.comn_status;
797         int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
798
799         qat_alg_free_bufl(inst, qat_req);
800         if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
801                 res = -EINVAL;
802         areq->base.complete(&areq->base, res);
803 }
804
805 void qat_alg_callback(void *resp)
806 {
807         struct icp_qat_fw_la_resp *qat_resp = resp;
808         struct qat_crypto_request *qat_req =
809                                 (void *)(__force long)qat_resp->opaque_data;
810
811         qat_req->cb(qat_resp, qat_req);
812 }
813
814 static int qat_alg_aead_dec(struct aead_request *areq)
815 {
816         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
817         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
818         struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
819         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
820         struct icp_qat_fw_la_cipher_req_params *cipher_param;
821         struct icp_qat_fw_la_auth_req_params *auth_param;
822         struct icp_qat_fw_la_bulk_req *msg;
823         int digst_size = crypto_aead_authsize(aead_tfm);
824         int ret, ctr = 0;
825         u32 cipher_len;
826
827         cipher_len = areq->cryptlen - digst_size;
828         if (cipher_len % AES_BLOCK_SIZE != 0)
829                 return -EINVAL;
830
831         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
832         if (unlikely(ret))
833                 return ret;
834
835         msg = &qat_req->req;
836         *msg = ctx->dec_fw_req;
837         qat_req->aead_ctx = ctx;
838         qat_req->aead_req = areq;
839         qat_req->cb = qat_aead_alg_callback;
840         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
841         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
842         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
843         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
844         cipher_param->cipher_length = cipher_len;
845         cipher_param->cipher_offset = areq->assoclen;
846         memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
847         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
848         auth_param->auth_off = 0;
849         auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
850         do {
851                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
852         } while (ret == -EAGAIN && ctr++ < 10);
853
854         if (ret == -EAGAIN) {
855                 qat_alg_free_bufl(ctx->inst, qat_req);
856                 return -EBUSY;
857         }
858         return -EINPROGRESS;
859 }
860
861 static int qat_alg_aead_enc(struct aead_request *areq)
862 {
863         struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
864         struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
865         struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
866         struct qat_crypto_request *qat_req = aead_request_ctx(areq);
867         struct icp_qat_fw_la_cipher_req_params *cipher_param;
868         struct icp_qat_fw_la_auth_req_params *auth_param;
869         struct icp_qat_fw_la_bulk_req *msg;
870         uint8_t *iv = areq->iv;
871         int ret, ctr = 0;
872
873         if (areq->cryptlen % AES_BLOCK_SIZE != 0)
874                 return -EINVAL;
875
876         ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
877         if (unlikely(ret))
878                 return ret;
879
880         msg = &qat_req->req;
881         *msg = ctx->enc_fw_req;
882         qat_req->aead_ctx = ctx;
883         qat_req->aead_req = areq;
884         qat_req->cb = qat_aead_alg_callback;
885         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
886         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
887         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
888         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
889         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
890
891         memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
892         cipher_param->cipher_length = areq->cryptlen;
893         cipher_param->cipher_offset = areq->assoclen;
894
895         auth_param->auth_off = 0;
896         auth_param->auth_len = areq->assoclen + areq->cryptlen;
897
898         do {
899                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
900         } while (ret == -EAGAIN && ctr++ < 10);
901
902         if (ret == -EAGAIN) {
903                 qat_alg_free_bufl(ctx->inst, qat_req);
904                 return -EBUSY;
905         }
906         return -EINPROGRESS;
907 }
908
909 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
910                                      const u8 *key, unsigned int keylen,
911                                      int mode)
912 {
913         struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
914         struct device *dev;
915
916         spin_lock(&ctx->lock);
917         if (ctx->enc_cd) {
918                 /* rekeying */
919                 dev = &GET_DEV(ctx->inst->accel_dev);
920                 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
921                 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
922                 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
923                 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
924         } else {
925                 /* new key */
926                 int node = get_current_node();
927                 struct qat_crypto_instance *inst =
928                                 qat_crypto_get_instance_node(node);
929                 if (!inst) {
930                         spin_unlock(&ctx->lock);
931                         return -EINVAL;
932                 }
933
934                 dev = &GET_DEV(inst->accel_dev);
935                 ctx->inst = inst;
936                 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
937                                                   &ctx->enc_cd_paddr,
938                                                   GFP_ATOMIC);
939                 if (!ctx->enc_cd) {
940                         spin_unlock(&ctx->lock);
941                         return -ENOMEM;
942                 }
943                 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
944                                                   &ctx->dec_cd_paddr,
945                                                   GFP_ATOMIC);
946                 if (!ctx->dec_cd) {
947                         spin_unlock(&ctx->lock);
948                         goto out_free_enc;
949                 }
950         }
951         spin_unlock(&ctx->lock);
952         if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
953                 goto out_free_all;
954
955         return 0;
956
957 out_free_all:
958         memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd));
959         dma_free_coherent(dev, sizeof(*ctx->enc_cd),
960                           ctx->dec_cd, ctx->dec_cd_paddr);
961         ctx->dec_cd = NULL;
962 out_free_enc:
963         memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd));
964         dma_free_coherent(dev, sizeof(*ctx->dec_cd),
965                           ctx->enc_cd, ctx->enc_cd_paddr);
966         ctx->enc_cd = NULL;
967         return -ENOMEM;
968 }
969
970 static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
971                                          const u8 *key, unsigned int keylen)
972 {
973         return qat_alg_ablkcipher_setkey(tfm, key, keylen,
974                                          ICP_QAT_HW_CIPHER_CBC_MODE);
975 }
976
977 static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
978                                          const u8 *key, unsigned int keylen)
979 {
980         return qat_alg_ablkcipher_setkey(tfm, key, keylen,
981                                          ICP_QAT_HW_CIPHER_CTR_MODE);
982 }
983
984 static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
985                                          const u8 *key, unsigned int keylen)
986 {
987         return qat_alg_ablkcipher_setkey(tfm, key, keylen,
988                                          ICP_QAT_HW_CIPHER_XTS_MODE);
989 }
990
991 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
992 {
993         struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
994         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
995         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
996         struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
997         struct icp_qat_fw_la_cipher_req_params *cipher_param;
998         struct icp_qat_fw_la_bulk_req *msg;
999         int ret, ctr = 0;
1000
1001         ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1002         if (unlikely(ret))
1003                 return ret;
1004
1005         msg = &qat_req->req;
1006         *msg = ctx->enc_fw_req;
1007         qat_req->ablkcipher_ctx = ctx;
1008         qat_req->ablkcipher_req = req;
1009         qat_req->cb = qat_ablkcipher_alg_callback;
1010         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1011         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1012         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1013         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1014         cipher_param->cipher_length = req->nbytes;
1015         cipher_param->cipher_offset = 0;
1016         memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1017         do {
1018                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1019         } while (ret == -EAGAIN && ctr++ < 10);
1020
1021         if (ret == -EAGAIN) {
1022                 qat_alg_free_bufl(ctx->inst, qat_req);
1023                 return -EBUSY;
1024         }
1025         return -EINPROGRESS;
1026 }
1027
1028 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1029 {
1030         struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1031         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1032         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1033         struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1034         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1035         struct icp_qat_fw_la_bulk_req *msg;
1036         int ret, ctr = 0;
1037
1038         ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1039         if (unlikely(ret))
1040                 return ret;
1041
1042         msg = &qat_req->req;
1043         *msg = ctx->dec_fw_req;
1044         qat_req->ablkcipher_ctx = ctx;
1045         qat_req->ablkcipher_req = req;
1046         qat_req->cb = qat_ablkcipher_alg_callback;
1047         qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1048         qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1049         qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1050         cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1051         cipher_param->cipher_length = req->nbytes;
1052         cipher_param->cipher_offset = 0;
1053         memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1054         do {
1055                 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1056         } while (ret == -EAGAIN && ctr++ < 10);
1057
1058         if (ret == -EAGAIN) {
1059                 qat_alg_free_bufl(ctx->inst, qat_req);
1060                 return -EBUSY;
1061         }
1062         return -EINPROGRESS;
1063 }
1064
1065 static int qat_alg_aead_init(struct crypto_aead *tfm,
1066                              enum icp_qat_hw_auth_algo hash,
1067                              const char *hash_name)
1068 {
1069         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1070
1071         ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1072         if (IS_ERR(ctx->hash_tfm))
1073                 return PTR_ERR(ctx->hash_tfm);
1074         ctx->qat_hash_alg = hash;
1075         crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
1076                                      sizeof(struct qat_crypto_request));
1077         return 0;
1078 }
1079
1080 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1081 {
1082         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1083 }
1084
1085 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1086 {
1087         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1088 }
1089
1090 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1091 {
1092         return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1093 }
1094
1095 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1096 {
1097         struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1098         struct qat_crypto_instance *inst = ctx->inst;
1099         struct device *dev;
1100
1101         crypto_free_shash(ctx->hash_tfm);
1102
1103         if (!inst)
1104                 return;
1105
1106         dev = &GET_DEV(inst->accel_dev);
1107         if (ctx->enc_cd) {
1108                 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1109                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1110                                   ctx->enc_cd, ctx->enc_cd_paddr);
1111         }
1112         if (ctx->dec_cd) {
1113                 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1114                 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1115                                   ctx->dec_cd, ctx->dec_cd_paddr);
1116         }
1117         qat_crypto_put_instance(inst);
1118 }
1119
1120 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1121 {
1122         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1123
1124         spin_lock_init(&ctx->lock);
1125         tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
1126                                         sizeof(struct qat_crypto_request);
1127         ctx->tfm = tfm;
1128         return 0;
1129 }
1130
1131 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1132 {
1133         struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1134         struct qat_crypto_instance *inst = ctx->inst;
1135         struct device *dev;
1136
1137         if (!inst)
1138                 return;
1139
1140         dev = &GET_DEV(inst->accel_dev);
1141         if (ctx->enc_cd) {
1142                 memset(ctx->enc_cd, 0,
1143                        sizeof(struct icp_qat_hw_cipher_algo_blk));
1144                 dma_free_coherent(dev,
1145                                   sizeof(struct icp_qat_hw_cipher_algo_blk),
1146                                   ctx->enc_cd, ctx->enc_cd_paddr);
1147         }
1148         if (ctx->dec_cd) {
1149                 memset(ctx->dec_cd, 0,
1150                        sizeof(struct icp_qat_hw_cipher_algo_blk));
1151                 dma_free_coherent(dev,
1152                                   sizeof(struct icp_qat_hw_cipher_algo_blk),
1153                                   ctx->dec_cd, ctx->dec_cd_paddr);
1154         }
1155         qat_crypto_put_instance(inst);
1156 }
1157
1158
1159 static struct aead_alg qat_aeads[] = { {
1160         .base = {
1161                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1162                 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1163                 .cra_priority = 4001,
1164                 .cra_flags = CRYPTO_ALG_ASYNC,
1165                 .cra_blocksize = AES_BLOCK_SIZE,
1166                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1167                 .cra_module = THIS_MODULE,
1168         },
1169         .init = qat_alg_aead_sha1_init,
1170         .exit = qat_alg_aead_exit,
1171         .setkey = qat_alg_aead_setkey,
1172         .decrypt = qat_alg_aead_dec,
1173         .encrypt = qat_alg_aead_enc,
1174         .ivsize = AES_BLOCK_SIZE,
1175         .maxauthsize = SHA1_DIGEST_SIZE,
1176 }, {
1177         .base = {
1178                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1179                 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1180                 .cra_priority = 4001,
1181                 .cra_flags = CRYPTO_ALG_ASYNC,
1182                 .cra_blocksize = AES_BLOCK_SIZE,
1183                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1184                 .cra_module = THIS_MODULE,
1185         },
1186         .init = qat_alg_aead_sha256_init,
1187         .exit = qat_alg_aead_exit,
1188         .setkey = qat_alg_aead_setkey,
1189         .decrypt = qat_alg_aead_dec,
1190         .encrypt = qat_alg_aead_enc,
1191         .ivsize = AES_BLOCK_SIZE,
1192         .maxauthsize = SHA256_DIGEST_SIZE,
1193 }, {
1194         .base = {
1195                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1196                 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1197                 .cra_priority = 4001,
1198                 .cra_flags = CRYPTO_ALG_ASYNC,
1199                 .cra_blocksize = AES_BLOCK_SIZE,
1200                 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1201                 .cra_module = THIS_MODULE,
1202         },
1203         .init = qat_alg_aead_sha512_init,
1204         .exit = qat_alg_aead_exit,
1205         .setkey = qat_alg_aead_setkey,
1206         .decrypt = qat_alg_aead_dec,
1207         .encrypt = qat_alg_aead_enc,
1208         .ivsize = AES_BLOCK_SIZE,
1209         .maxauthsize = SHA512_DIGEST_SIZE,
1210 } };
1211
1212 static struct crypto_alg qat_algs[] = { {
1213         .cra_name = "cbc(aes)",
1214         .cra_driver_name = "qat_aes_cbc",
1215         .cra_priority = 4001,
1216         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1217         .cra_blocksize = AES_BLOCK_SIZE,
1218         .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1219         .cra_alignmask = 0,
1220         .cra_type = &crypto_ablkcipher_type,
1221         .cra_module = THIS_MODULE,
1222         .cra_init = qat_alg_ablkcipher_init,
1223         .cra_exit = qat_alg_ablkcipher_exit,
1224         .cra_u = {
1225                 .ablkcipher = {
1226                         .setkey = qat_alg_ablkcipher_cbc_setkey,
1227                         .decrypt = qat_alg_ablkcipher_decrypt,
1228                         .encrypt = qat_alg_ablkcipher_encrypt,
1229                         .min_keysize = AES_MIN_KEY_SIZE,
1230                         .max_keysize = AES_MAX_KEY_SIZE,
1231                         .ivsize = AES_BLOCK_SIZE,
1232                 },
1233         },
1234 }, {
1235         .cra_name = "ctr(aes)",
1236         .cra_driver_name = "qat_aes_ctr",
1237         .cra_priority = 4001,
1238         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1239         .cra_blocksize = AES_BLOCK_SIZE,
1240         .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1241         .cra_alignmask = 0,
1242         .cra_type = &crypto_ablkcipher_type,
1243         .cra_module = THIS_MODULE,
1244         .cra_init = qat_alg_ablkcipher_init,
1245         .cra_exit = qat_alg_ablkcipher_exit,
1246         .cra_u = {
1247                 .ablkcipher = {
1248                         .setkey = qat_alg_ablkcipher_ctr_setkey,
1249                         .decrypt = qat_alg_ablkcipher_decrypt,
1250                         .encrypt = qat_alg_ablkcipher_encrypt,
1251                         .min_keysize = AES_MIN_KEY_SIZE,
1252                         .max_keysize = AES_MAX_KEY_SIZE,
1253                         .ivsize = AES_BLOCK_SIZE,
1254                 },
1255         },
1256 }, {
1257         .cra_name = "xts(aes)",
1258         .cra_driver_name = "qat_aes_xts",
1259         .cra_priority = 4001,
1260         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1261         .cra_blocksize = AES_BLOCK_SIZE,
1262         .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1263         .cra_alignmask = 0,
1264         .cra_type = &crypto_ablkcipher_type,
1265         .cra_module = THIS_MODULE,
1266         .cra_init = qat_alg_ablkcipher_init,
1267         .cra_exit = qat_alg_ablkcipher_exit,
1268         .cra_u = {
1269                 .ablkcipher = {
1270                         .setkey = qat_alg_ablkcipher_xts_setkey,
1271                         .decrypt = qat_alg_ablkcipher_decrypt,
1272                         .encrypt = qat_alg_ablkcipher_encrypt,
1273                         .min_keysize = 2 * AES_MIN_KEY_SIZE,
1274                         .max_keysize = 2 * AES_MAX_KEY_SIZE,
1275                         .ivsize = AES_BLOCK_SIZE,
1276                 },
1277         },
1278 } };
1279
1280 int qat_algs_register(void)
1281 {
1282         int ret = 0, i;
1283
1284         mutex_lock(&algs_lock);
1285         if (++active_devs != 1)
1286                 goto unlock;
1287
1288         for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1289                 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1290
1291         ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1292         if (ret)
1293                 goto unlock;
1294
1295         for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
1296                 qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
1297
1298         ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1299         if (ret)
1300                 goto unreg_algs;
1301
1302 unlock:
1303         mutex_unlock(&algs_lock);
1304         return ret;
1305
1306 unreg_algs:
1307         crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1308         goto unlock;
1309 }
1310
1311 void qat_algs_unregister(void)
1312 {
1313         mutex_lock(&algs_lock);
1314         if (--active_devs != 0)
1315                 goto unlock;
1316
1317         crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1318         crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1319
1320 unlock:
1321         mutex_unlock(&algs_lock);
1322 }