2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/hmac.h>
55 #include <crypto/algapi.h>
56 #include <crypto/authenc.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
66 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
71 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
76 static DEFINE_MUTEX(algs_lock);
77 static unsigned int active_devs;
85 struct qat_alg_buf_list {
88 uint32_t num_mapped_bufs;
89 struct qat_alg_buf bufers[];
90 } __packed __aligned(64);
92 /* Common content descriptor */
95 struct qat_enc { /* Encrypt content desc */
96 struct icp_qat_hw_cipher_algo_blk cipher;
97 struct icp_qat_hw_auth_algo_blk hash;
99 struct qat_dec { /* Decrytp content desc */
100 struct icp_qat_hw_auth_algo_blk hash;
101 struct icp_qat_hw_cipher_algo_blk cipher;
106 struct qat_alg_aead_ctx {
107 struct qat_alg_cd *enc_cd;
108 struct qat_alg_cd *dec_cd;
109 dma_addr_t enc_cd_paddr;
110 dma_addr_t dec_cd_paddr;
111 struct icp_qat_fw_la_bulk_req enc_fw_req;
112 struct icp_qat_fw_la_bulk_req dec_fw_req;
113 struct crypto_shash *hash_tfm;
114 enum icp_qat_hw_auth_algo qat_hash_alg;
115 struct qat_crypto_instance *inst;
118 struct qat_alg_ablkcipher_ctx {
119 struct icp_qat_hw_cipher_algo_blk *enc_cd;
120 struct icp_qat_hw_cipher_algo_blk *dec_cd;
121 dma_addr_t enc_cd_paddr;
122 dma_addr_t dec_cd_paddr;
123 struct icp_qat_fw_la_bulk_req enc_fw_req;
124 struct icp_qat_fw_la_bulk_req dec_fw_req;
125 struct qat_crypto_instance *inst;
126 struct crypto_tfm *tfm;
127 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
130 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
132 switch (qat_hash_alg) {
133 case ICP_QAT_HW_AUTH_ALGO_SHA1:
134 return ICP_QAT_HW_SHA1_STATE1_SZ;
135 case ICP_QAT_HW_AUTH_ALGO_SHA256:
136 return ICP_QAT_HW_SHA256_STATE1_SZ;
137 case ICP_QAT_HW_AUTH_ALGO_SHA512:
138 return ICP_QAT_HW_SHA512_STATE1_SZ;
145 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
146 struct qat_alg_aead_ctx *ctx,
147 const uint8_t *auth_key,
148 unsigned int auth_keylen)
150 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
151 struct sha1_state sha1;
152 struct sha256_state sha256;
153 struct sha512_state sha512;
154 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
155 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
156 char ipad[block_size];
157 char opad[block_size];
158 __be32 *hash_state_out;
159 __be64 *hash512_state_out;
162 memset(ipad, 0, block_size);
163 memset(opad, 0, block_size);
164 shash->tfm = ctx->hash_tfm;
167 if (auth_keylen > block_size) {
168 int ret = crypto_shash_digest(shash, auth_key,
173 memcpy(opad, ipad, digest_size);
175 memcpy(ipad, auth_key, auth_keylen);
176 memcpy(opad, auth_key, auth_keylen);
179 for (i = 0; i < block_size; i++) {
180 char *ipad_ptr = ipad + i;
181 char *opad_ptr = opad + i;
182 *ipad_ptr ^= HMAC_IPAD_VALUE;
183 *opad_ptr ^= HMAC_OPAD_VALUE;
186 if (crypto_shash_init(shash))
189 if (crypto_shash_update(shash, ipad, block_size))
192 hash_state_out = (__be32 *)hash->sha.state1;
193 hash512_state_out = (__be64 *)hash_state_out;
195 switch (ctx->qat_hash_alg) {
196 case ICP_QAT_HW_AUTH_ALGO_SHA1:
197 if (crypto_shash_export(shash, &sha1))
199 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
200 *hash_state_out = cpu_to_be32(*(sha1.state + i));
202 case ICP_QAT_HW_AUTH_ALGO_SHA256:
203 if (crypto_shash_export(shash, &sha256))
205 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
206 *hash_state_out = cpu_to_be32(*(sha256.state + i));
208 case ICP_QAT_HW_AUTH_ALGO_SHA512:
209 if (crypto_shash_export(shash, &sha512))
211 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
212 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
218 if (crypto_shash_init(shash))
221 if (crypto_shash_update(shash, opad, block_size))
224 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
225 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
226 hash512_state_out = (__be64 *)hash_state_out;
228 switch (ctx->qat_hash_alg) {
229 case ICP_QAT_HW_AUTH_ALGO_SHA1:
230 if (crypto_shash_export(shash, &sha1))
232 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
233 *hash_state_out = cpu_to_be32(*(sha1.state + i));
235 case ICP_QAT_HW_AUTH_ALGO_SHA256:
236 if (crypto_shash_export(shash, &sha256))
238 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
239 *hash_state_out = cpu_to_be32(*(sha256.state + i));
241 case ICP_QAT_HW_AUTH_ALGO_SHA512:
242 if (crypto_shash_export(shash, &sha512))
244 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
245 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
250 memzero_explicit(ipad, block_size);
251 memzero_explicit(opad, block_size);
255 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
258 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
259 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
260 header->comn_req_flags =
261 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
262 QAT_COMN_PTR_TYPE_SGL);
263 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
264 ICP_QAT_FW_LA_PARTIAL_NONE);
265 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
266 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
267 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
268 ICP_QAT_FW_LA_NO_PROTO);
269 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
270 ICP_QAT_FW_LA_NO_UPDATE_STATE);
273 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
275 struct crypto_authenc_keys *keys,
278 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
279 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
280 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
281 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
282 struct icp_qat_hw_auth_algo_blk *hash =
283 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
284 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
285 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
286 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
287 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
288 void *ptr = &req_tmpl->cd_ctrl;
289 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
290 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
293 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
294 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
295 hash->sha.inner_setup.auth_config.config =
296 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
297 ctx->qat_hash_alg, digestsize);
298 hash->sha.inner_setup.auth_counter.counter =
299 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
301 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
305 qat_alg_init_common_hdr(header);
306 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
307 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
308 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
309 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
310 ICP_QAT_FW_LA_RET_AUTH_RES);
311 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
312 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
313 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
314 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
316 /* Cipher CD config setup */
317 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
318 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
319 cipher_cd_ctrl->cipher_cfg_offset = 0;
320 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
321 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
322 /* Auth CD config setup */
323 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
324 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
325 hash_cd_ctrl->inner_res_sz = digestsize;
326 hash_cd_ctrl->final_sz = digestsize;
328 switch (ctx->qat_hash_alg) {
329 case ICP_QAT_HW_AUTH_ALGO_SHA1:
330 hash_cd_ctrl->inner_state1_sz =
331 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
332 hash_cd_ctrl->inner_state2_sz =
333 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
335 case ICP_QAT_HW_AUTH_ALGO_SHA256:
336 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
337 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
339 case ICP_QAT_HW_AUTH_ALGO_SHA512:
340 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
341 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
346 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
347 ((sizeof(struct icp_qat_hw_auth_setup) +
348 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
349 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
350 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
354 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
356 struct crypto_authenc_keys *keys,
359 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
360 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
361 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
362 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
363 struct icp_qat_hw_cipher_algo_blk *cipher =
364 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
365 sizeof(struct icp_qat_hw_auth_setup) +
366 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
367 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
368 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
369 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
370 void *ptr = &req_tmpl->cd_ctrl;
371 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
372 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
373 struct icp_qat_fw_la_auth_req_params *auth_param =
374 (struct icp_qat_fw_la_auth_req_params *)
375 ((char *)&req_tmpl->serv_specif_rqpars +
376 sizeof(struct icp_qat_fw_la_cipher_req_params));
379 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
380 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
381 hash->sha.inner_setup.auth_config.config =
382 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
385 hash->sha.inner_setup.auth_counter.counter =
386 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
388 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
392 qat_alg_init_common_hdr(header);
393 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
394 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
395 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
396 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
397 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
398 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
399 ICP_QAT_FW_LA_CMP_AUTH_RES);
400 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
401 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
403 /* Cipher CD config setup */
404 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
405 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
406 cipher_cd_ctrl->cipher_cfg_offset =
407 (sizeof(struct icp_qat_hw_auth_setup) +
408 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
409 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
410 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
412 /* Auth CD config setup */
413 hash_cd_ctrl->hash_cfg_offset = 0;
414 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
415 hash_cd_ctrl->inner_res_sz = digestsize;
416 hash_cd_ctrl->final_sz = digestsize;
418 switch (ctx->qat_hash_alg) {
419 case ICP_QAT_HW_AUTH_ALGO_SHA1:
420 hash_cd_ctrl->inner_state1_sz =
421 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
422 hash_cd_ctrl->inner_state2_sz =
423 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
425 case ICP_QAT_HW_AUTH_ALGO_SHA256:
426 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
427 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
429 case ICP_QAT_HW_AUTH_ALGO_SHA512:
430 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
431 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
437 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
438 ((sizeof(struct icp_qat_hw_auth_setup) +
439 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
440 auth_param->auth_res_sz = digestsize;
441 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
442 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
446 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
447 struct icp_qat_fw_la_bulk_req *req,
448 struct icp_qat_hw_cipher_algo_blk *cd,
449 const uint8_t *key, unsigned int keylen)
451 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
452 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
453 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
455 memcpy(cd->aes.key, key, keylen);
456 qat_alg_init_common_hdr(header);
457 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
458 cd_pars->u.s.content_desc_params_sz =
459 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
460 /* Cipher CD config setup */
461 cd_ctrl->cipher_key_sz = keylen >> 3;
462 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
463 cd_ctrl->cipher_cfg_offset = 0;
464 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
465 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
468 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
469 int alg, const uint8_t *key,
470 unsigned int keylen, int mode)
472 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
473 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
474 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
476 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
477 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
478 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
481 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
482 int alg, const uint8_t *key,
483 unsigned int keylen, int mode)
485 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
486 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
487 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
489 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
490 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
492 if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
493 dec_cd->aes.cipher_config.val =
494 QAT_AES_HW_CONFIG_DEC(alg, mode);
496 dec_cd->aes.cipher_config.val =
497 QAT_AES_HW_CONFIG_ENC(alg, mode);
500 static int qat_alg_validate_key(int key_len, int *alg, int mode)
502 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
504 case AES_KEYSIZE_128:
505 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
507 case AES_KEYSIZE_192:
508 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
510 case AES_KEYSIZE_256:
511 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
518 case AES_KEYSIZE_128 << 1:
519 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
521 case AES_KEYSIZE_256 << 1:
522 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
531 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
532 unsigned int keylen, int mode)
534 struct crypto_authenc_keys keys;
537 if (crypto_authenc_extractkeys(&keys, key, keylen))
540 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
543 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
546 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
549 memzero_explicit(&keys, sizeof(keys));
552 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
553 memzero_explicit(&keys, sizeof(keys));
556 memzero_explicit(&keys, sizeof(keys));
560 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
567 if (qat_alg_validate_key(keylen, &alg, mode))
570 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
571 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
574 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
578 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
581 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
586 dev = &GET_DEV(ctx->inst->accel_dev);
587 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
588 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
589 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
590 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
593 int node = get_current_node();
594 struct qat_crypto_instance *inst =
595 qat_crypto_get_instance_node(node);
600 dev = &GET_DEV(inst->accel_dev);
602 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
608 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
615 if (qat_alg_aead_init_sessions(tfm, key, keylen,
616 ICP_QAT_HW_CIPHER_CBC_MODE))
622 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
623 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
624 ctx->dec_cd, ctx->dec_cd_paddr);
627 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
628 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
629 ctx->enc_cd, ctx->enc_cd_paddr);
634 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
635 struct qat_crypto_request *qat_req)
637 struct device *dev = &GET_DEV(inst->accel_dev);
638 struct qat_alg_buf_list *bl = qat_req->buf.bl;
639 struct qat_alg_buf_list *blout = qat_req->buf.blout;
640 dma_addr_t blp = qat_req->buf.blp;
641 dma_addr_t blpout = qat_req->buf.bloutp;
642 size_t sz = qat_req->buf.sz;
643 size_t sz_out = qat_req->buf.sz_out;
646 for (i = 0; i < bl->num_bufs; i++)
647 dma_unmap_single(dev, bl->bufers[i].addr,
648 bl->bufers[i].len, DMA_BIDIRECTIONAL);
650 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
653 /* If out of place operation dma unmap only data */
654 int bufless = blout->num_bufs - blout->num_mapped_bufs;
656 for (i = bufless; i < blout->num_bufs; i++) {
657 dma_unmap_single(dev, blout->bufers[i].addr,
658 blout->bufers[i].len,
661 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
666 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
667 struct scatterlist *sgl,
668 struct scatterlist *sglout,
669 struct qat_crypto_request *qat_req)
671 struct device *dev = &GET_DEV(inst->accel_dev);
673 int n = sg_nents(sgl);
674 struct qat_alg_buf_list *bufl;
675 struct qat_alg_buf_list *buflout = NULL;
677 dma_addr_t bloutp = 0;
678 struct scatterlist *sg;
679 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
680 ((1 + n) * sizeof(struct qat_alg_buf));
685 bufl = kzalloc_node(sz, GFP_ATOMIC,
686 dev_to_node(&GET_DEV(inst->accel_dev)));
690 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
691 if (unlikely(dma_mapping_error(dev, blp)))
694 for_each_sg(sgl, sg, n, i) {
700 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
703 bufl->bufers[y].len = sg->length;
704 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
708 bufl->num_bufs = sg_nctr;
709 qat_req->buf.bl = bufl;
710 qat_req->buf.blp = blp;
711 qat_req->buf.sz = sz;
712 /* Handle out of place operation */
714 struct qat_alg_buf *bufers;
716 n = sg_nents(sglout);
717 sz_out = sizeof(struct qat_alg_buf_list) +
718 ((1 + n) * sizeof(struct qat_alg_buf));
720 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
721 dev_to_node(&GET_DEV(inst->accel_dev)));
722 if (unlikely(!buflout))
724 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
725 if (unlikely(dma_mapping_error(dev, bloutp)))
727 bufers = buflout->bufers;
728 for_each_sg(sglout, sg, n, i) {
734 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
737 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
739 bufers[y].len = sg->length;
742 buflout->num_bufs = sg_nctr;
743 buflout->num_mapped_bufs = sg_nctr;
744 qat_req->buf.blout = buflout;
745 qat_req->buf.bloutp = bloutp;
746 qat_req->buf.sz_out = sz_out;
748 /* Otherwise set the src and dst to the same address */
749 qat_req->buf.bloutp = qat_req->buf.blp;
750 qat_req->buf.sz_out = 0;
755 n = sg_nents(sglout);
756 for (i = 0; i < n; i++)
757 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
758 dma_unmap_single(dev, buflout->bufers[i].addr,
759 buflout->bufers[i].len,
761 if (!dma_mapping_error(dev, bloutp))
762 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
767 for (i = 0; i < n; i++)
768 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
769 dma_unmap_single(dev, bufl->bufers[i].addr,
773 if (!dma_mapping_error(dev, blp))
774 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
777 dev_err(dev, "Failed to map buf for dma\n");
781 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
782 struct qat_crypto_request *qat_req)
784 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
785 struct qat_crypto_instance *inst = ctx->inst;
786 struct aead_request *areq = qat_req->aead_req;
787 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
788 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
790 qat_alg_free_bufl(inst, qat_req);
791 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
793 areq->base.complete(&areq->base, res);
796 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
797 struct qat_crypto_request *qat_req)
799 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
800 struct qat_crypto_instance *inst = ctx->inst;
801 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
802 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
803 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
805 qat_alg_free_bufl(inst, qat_req);
806 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
808 areq->base.complete(&areq->base, res);
811 void qat_alg_callback(void *resp)
813 struct icp_qat_fw_la_resp *qat_resp = resp;
814 struct qat_crypto_request *qat_req =
815 (void *)(__force long)qat_resp->opaque_data;
817 qat_req->cb(qat_resp, qat_req);
820 static int qat_alg_aead_dec(struct aead_request *areq)
822 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
823 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
824 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
825 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
826 struct icp_qat_fw_la_cipher_req_params *cipher_param;
827 struct icp_qat_fw_la_auth_req_params *auth_param;
828 struct icp_qat_fw_la_bulk_req *msg;
829 int digst_size = crypto_aead_authsize(aead_tfm);
833 cipher_len = areq->cryptlen - digst_size;
834 if (cipher_len % AES_BLOCK_SIZE != 0)
837 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
842 *msg = ctx->dec_fw_req;
843 qat_req->aead_ctx = ctx;
844 qat_req->aead_req = areq;
845 qat_req->cb = qat_aead_alg_callback;
846 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
847 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
848 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
849 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
850 cipher_param->cipher_length = cipher_len;
851 cipher_param->cipher_offset = areq->assoclen;
852 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
853 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
854 auth_param->auth_off = 0;
855 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
857 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
858 } while (ret == -EAGAIN && ctr++ < 10);
860 if (ret == -EAGAIN) {
861 qat_alg_free_bufl(ctx->inst, qat_req);
867 static int qat_alg_aead_enc(struct aead_request *areq)
869 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
870 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
871 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
872 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
873 struct icp_qat_fw_la_cipher_req_params *cipher_param;
874 struct icp_qat_fw_la_auth_req_params *auth_param;
875 struct icp_qat_fw_la_bulk_req *msg;
876 uint8_t *iv = areq->iv;
879 if (areq->cryptlen % AES_BLOCK_SIZE != 0)
882 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
887 *msg = ctx->enc_fw_req;
888 qat_req->aead_ctx = ctx;
889 qat_req->aead_req = areq;
890 qat_req->cb = qat_aead_alg_callback;
891 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
892 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
893 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
894 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
895 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
897 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
898 cipher_param->cipher_length = areq->cryptlen;
899 cipher_param->cipher_offset = areq->assoclen;
901 auth_param->auth_off = 0;
902 auth_param->auth_len = areq->assoclen + areq->cryptlen;
905 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
906 } while (ret == -EAGAIN && ctr++ < 10);
908 if (ret == -EAGAIN) {
909 qat_alg_free_bufl(ctx->inst, qat_req);
915 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
916 const u8 *key, unsigned int keylen,
919 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
922 spin_lock(&ctx->lock);
925 dev = &GET_DEV(ctx->inst->accel_dev);
926 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
927 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
928 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
929 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
932 int node = get_current_node();
933 struct qat_crypto_instance *inst =
934 qat_crypto_get_instance_node(node);
936 spin_unlock(&ctx->lock);
940 dev = &GET_DEV(inst->accel_dev);
942 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
946 spin_unlock(&ctx->lock);
949 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
953 spin_unlock(&ctx->lock);
957 spin_unlock(&ctx->lock);
958 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
964 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
965 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
966 ctx->dec_cd, ctx->dec_cd_paddr);
969 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
970 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
971 ctx->enc_cd, ctx->enc_cd_paddr);
976 static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
977 const u8 *key, unsigned int keylen)
979 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
980 ICP_QAT_HW_CIPHER_CBC_MODE);
983 static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
984 const u8 *key, unsigned int keylen)
986 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
987 ICP_QAT_HW_CIPHER_CTR_MODE);
990 static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
991 const u8 *key, unsigned int keylen)
993 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
994 ICP_QAT_HW_CIPHER_XTS_MODE);
997 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
999 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1000 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1001 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1002 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1003 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1004 struct icp_qat_fw_la_bulk_req *msg;
1007 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1011 msg = &qat_req->req;
1012 *msg = ctx->enc_fw_req;
1013 qat_req->ablkcipher_ctx = ctx;
1014 qat_req->ablkcipher_req = req;
1015 qat_req->cb = qat_ablkcipher_alg_callback;
1016 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1017 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1018 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1019 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1020 cipher_param->cipher_length = req->nbytes;
1021 cipher_param->cipher_offset = 0;
1022 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1024 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1025 } while (ret == -EAGAIN && ctr++ < 10);
1027 if (ret == -EAGAIN) {
1028 qat_alg_free_bufl(ctx->inst, qat_req);
1031 return -EINPROGRESS;
1034 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1036 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1037 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1038 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1039 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1040 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1041 struct icp_qat_fw_la_bulk_req *msg;
1044 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1048 msg = &qat_req->req;
1049 *msg = ctx->dec_fw_req;
1050 qat_req->ablkcipher_ctx = ctx;
1051 qat_req->ablkcipher_req = req;
1052 qat_req->cb = qat_ablkcipher_alg_callback;
1053 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1054 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1055 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1056 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1057 cipher_param->cipher_length = req->nbytes;
1058 cipher_param->cipher_offset = 0;
1059 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1061 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1062 } while (ret == -EAGAIN && ctr++ < 10);
1064 if (ret == -EAGAIN) {
1065 qat_alg_free_bufl(ctx->inst, qat_req);
1068 return -EINPROGRESS;
1071 static int qat_alg_aead_init(struct crypto_aead *tfm,
1072 enum icp_qat_hw_auth_algo hash,
1073 const char *hash_name)
1075 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1077 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1078 if (IS_ERR(ctx->hash_tfm))
1079 return PTR_ERR(ctx->hash_tfm);
1080 ctx->qat_hash_alg = hash;
1081 crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1085 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1087 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1090 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1092 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1095 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1097 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1100 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1102 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1103 struct qat_crypto_instance *inst = ctx->inst;
1106 crypto_free_shash(ctx->hash_tfm);
1111 dev = &GET_DEV(inst->accel_dev);
1113 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1114 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1115 ctx->enc_cd, ctx->enc_cd_paddr);
1118 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1119 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1120 ctx->dec_cd, ctx->dec_cd_paddr);
1122 qat_crypto_put_instance(inst);
1125 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1127 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1129 spin_lock_init(&ctx->lock);
1130 tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
1135 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1137 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1138 struct qat_crypto_instance *inst = ctx->inst;
1144 dev = &GET_DEV(inst->accel_dev);
1146 memset(ctx->enc_cd, 0,
1147 sizeof(struct icp_qat_hw_cipher_algo_blk));
1148 dma_free_coherent(dev,
1149 sizeof(struct icp_qat_hw_cipher_algo_blk),
1150 ctx->enc_cd, ctx->enc_cd_paddr);
1153 memset(ctx->dec_cd, 0,
1154 sizeof(struct icp_qat_hw_cipher_algo_blk));
1155 dma_free_coherent(dev,
1156 sizeof(struct icp_qat_hw_cipher_algo_blk),
1157 ctx->dec_cd, ctx->dec_cd_paddr);
1159 qat_crypto_put_instance(inst);
1163 static struct aead_alg qat_aeads[] = { {
1165 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1166 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1167 .cra_priority = 4001,
1168 .cra_flags = CRYPTO_ALG_ASYNC,
1169 .cra_blocksize = AES_BLOCK_SIZE,
1170 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1171 .cra_module = THIS_MODULE,
1173 .init = qat_alg_aead_sha1_init,
1174 .exit = qat_alg_aead_exit,
1175 .setkey = qat_alg_aead_setkey,
1176 .decrypt = qat_alg_aead_dec,
1177 .encrypt = qat_alg_aead_enc,
1178 .ivsize = AES_BLOCK_SIZE,
1179 .maxauthsize = SHA1_DIGEST_SIZE,
1182 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1183 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1184 .cra_priority = 4001,
1185 .cra_flags = CRYPTO_ALG_ASYNC,
1186 .cra_blocksize = AES_BLOCK_SIZE,
1187 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1188 .cra_module = THIS_MODULE,
1190 .init = qat_alg_aead_sha256_init,
1191 .exit = qat_alg_aead_exit,
1192 .setkey = qat_alg_aead_setkey,
1193 .decrypt = qat_alg_aead_dec,
1194 .encrypt = qat_alg_aead_enc,
1195 .ivsize = AES_BLOCK_SIZE,
1196 .maxauthsize = SHA256_DIGEST_SIZE,
1199 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1200 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1201 .cra_priority = 4001,
1202 .cra_flags = CRYPTO_ALG_ASYNC,
1203 .cra_blocksize = AES_BLOCK_SIZE,
1204 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1205 .cra_module = THIS_MODULE,
1207 .init = qat_alg_aead_sha512_init,
1208 .exit = qat_alg_aead_exit,
1209 .setkey = qat_alg_aead_setkey,
1210 .decrypt = qat_alg_aead_dec,
1211 .encrypt = qat_alg_aead_enc,
1212 .ivsize = AES_BLOCK_SIZE,
1213 .maxauthsize = SHA512_DIGEST_SIZE,
1216 static struct crypto_alg qat_algs[] = { {
1217 .cra_name = "cbc(aes)",
1218 .cra_driver_name = "qat_aes_cbc",
1219 .cra_priority = 4001,
1220 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1221 .cra_blocksize = AES_BLOCK_SIZE,
1222 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1224 .cra_type = &crypto_ablkcipher_type,
1225 .cra_module = THIS_MODULE,
1226 .cra_init = qat_alg_ablkcipher_init,
1227 .cra_exit = qat_alg_ablkcipher_exit,
1230 .setkey = qat_alg_ablkcipher_cbc_setkey,
1231 .decrypt = qat_alg_ablkcipher_decrypt,
1232 .encrypt = qat_alg_ablkcipher_encrypt,
1233 .min_keysize = AES_MIN_KEY_SIZE,
1234 .max_keysize = AES_MAX_KEY_SIZE,
1235 .ivsize = AES_BLOCK_SIZE,
1239 .cra_name = "ctr(aes)",
1240 .cra_driver_name = "qat_aes_ctr",
1241 .cra_priority = 4001,
1242 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1243 .cra_blocksize = AES_BLOCK_SIZE,
1244 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1246 .cra_type = &crypto_ablkcipher_type,
1247 .cra_module = THIS_MODULE,
1248 .cra_init = qat_alg_ablkcipher_init,
1249 .cra_exit = qat_alg_ablkcipher_exit,
1252 .setkey = qat_alg_ablkcipher_ctr_setkey,
1253 .decrypt = qat_alg_ablkcipher_decrypt,
1254 .encrypt = qat_alg_ablkcipher_encrypt,
1255 .min_keysize = AES_MIN_KEY_SIZE,
1256 .max_keysize = AES_MAX_KEY_SIZE,
1257 .ivsize = AES_BLOCK_SIZE,
1261 .cra_name = "xts(aes)",
1262 .cra_driver_name = "qat_aes_xts",
1263 .cra_priority = 4001,
1264 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1265 .cra_blocksize = AES_BLOCK_SIZE,
1266 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1268 .cra_type = &crypto_ablkcipher_type,
1269 .cra_module = THIS_MODULE,
1270 .cra_init = qat_alg_ablkcipher_init,
1271 .cra_exit = qat_alg_ablkcipher_exit,
1274 .setkey = qat_alg_ablkcipher_xts_setkey,
1275 .decrypt = qat_alg_ablkcipher_decrypt,
1276 .encrypt = qat_alg_ablkcipher_encrypt,
1277 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1278 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1279 .ivsize = AES_BLOCK_SIZE,
1284 int qat_algs_register(void)
1288 mutex_lock(&algs_lock);
1289 if (++active_devs != 1)
1292 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1293 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1295 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1299 for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
1300 qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
1302 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1307 mutex_unlock(&algs_lock);
1311 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1315 void qat_algs_unregister(void)
1317 mutex_lock(&algs_lock);
1318 if (--active_devs != 0)
1321 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1322 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1325 mutex_unlock(&algs_lock);