2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/hmac.h>
55 #include <crypto/algapi.h>
56 #include <crypto/authenc.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
66 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
71 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
76 static DEFINE_MUTEX(algs_lock);
77 static unsigned int active_devs;
85 struct qat_alg_buf_list {
88 uint32_t num_mapped_bufs;
89 struct qat_alg_buf bufers[];
90 } __packed __aligned(64);
92 /* Common content descriptor */
95 struct qat_enc { /* Encrypt content desc */
96 struct icp_qat_hw_cipher_algo_blk cipher;
97 struct icp_qat_hw_auth_algo_blk hash;
99 struct qat_dec { /* Decrytp content desc */
100 struct icp_qat_hw_auth_algo_blk hash;
101 struct icp_qat_hw_cipher_algo_blk cipher;
106 struct qat_alg_aead_ctx {
107 struct qat_alg_cd *enc_cd;
108 struct qat_alg_cd *dec_cd;
109 dma_addr_t enc_cd_paddr;
110 dma_addr_t dec_cd_paddr;
111 struct icp_qat_fw_la_bulk_req enc_fw_req;
112 struct icp_qat_fw_la_bulk_req dec_fw_req;
113 struct crypto_shash *hash_tfm;
114 enum icp_qat_hw_auth_algo qat_hash_alg;
115 struct qat_crypto_instance *inst;
118 struct qat_alg_ablkcipher_ctx {
119 struct icp_qat_hw_cipher_algo_blk *enc_cd;
120 struct icp_qat_hw_cipher_algo_blk *dec_cd;
121 dma_addr_t enc_cd_paddr;
122 dma_addr_t dec_cd_paddr;
123 struct icp_qat_fw_la_bulk_req enc_fw_req;
124 struct icp_qat_fw_la_bulk_req dec_fw_req;
125 struct qat_crypto_instance *inst;
126 struct crypto_tfm *tfm;
127 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
130 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
132 switch (qat_hash_alg) {
133 case ICP_QAT_HW_AUTH_ALGO_SHA1:
134 return ICP_QAT_HW_SHA1_STATE1_SZ;
135 case ICP_QAT_HW_AUTH_ALGO_SHA256:
136 return ICP_QAT_HW_SHA256_STATE1_SZ;
137 case ICP_QAT_HW_AUTH_ALGO_SHA512:
138 return ICP_QAT_HW_SHA512_STATE1_SZ;
145 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
146 struct qat_alg_aead_ctx *ctx,
147 const uint8_t *auth_key,
148 unsigned int auth_keylen)
150 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
151 struct sha1_state sha1;
152 struct sha256_state sha256;
153 struct sha512_state sha512;
154 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
155 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
156 char ipad[block_size];
157 char opad[block_size];
158 __be32 *hash_state_out;
159 __be64 *hash512_state_out;
162 memset(ipad, 0, block_size);
163 memset(opad, 0, block_size);
164 shash->tfm = ctx->hash_tfm;
167 if (auth_keylen > block_size) {
168 int ret = crypto_shash_digest(shash, auth_key,
173 memcpy(opad, ipad, digest_size);
175 memcpy(ipad, auth_key, auth_keylen);
176 memcpy(opad, auth_key, auth_keylen);
179 for (i = 0; i < block_size; i++) {
180 char *ipad_ptr = ipad + i;
181 char *opad_ptr = opad + i;
182 *ipad_ptr ^= HMAC_IPAD_VALUE;
183 *opad_ptr ^= HMAC_OPAD_VALUE;
186 if (crypto_shash_init(shash))
189 if (crypto_shash_update(shash, ipad, block_size))
192 hash_state_out = (__be32 *)hash->sha.state1;
193 hash512_state_out = (__be64 *)hash_state_out;
195 switch (ctx->qat_hash_alg) {
196 case ICP_QAT_HW_AUTH_ALGO_SHA1:
197 if (crypto_shash_export(shash, &sha1))
199 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
200 *hash_state_out = cpu_to_be32(*(sha1.state + i));
202 case ICP_QAT_HW_AUTH_ALGO_SHA256:
203 if (crypto_shash_export(shash, &sha256))
205 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
206 *hash_state_out = cpu_to_be32(*(sha256.state + i));
208 case ICP_QAT_HW_AUTH_ALGO_SHA512:
209 if (crypto_shash_export(shash, &sha512))
211 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
212 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
218 if (crypto_shash_init(shash))
221 if (crypto_shash_update(shash, opad, block_size))
224 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
225 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
226 hash512_state_out = (__be64 *)hash_state_out;
228 switch (ctx->qat_hash_alg) {
229 case ICP_QAT_HW_AUTH_ALGO_SHA1:
230 if (crypto_shash_export(shash, &sha1))
232 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
233 *hash_state_out = cpu_to_be32(*(sha1.state + i));
235 case ICP_QAT_HW_AUTH_ALGO_SHA256:
236 if (crypto_shash_export(shash, &sha256))
238 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
239 *hash_state_out = cpu_to_be32(*(sha256.state + i));
241 case ICP_QAT_HW_AUTH_ALGO_SHA512:
242 if (crypto_shash_export(shash, &sha512))
244 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
245 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
250 memzero_explicit(ipad, block_size);
251 memzero_explicit(opad, block_size);
255 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
258 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
259 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
260 header->comn_req_flags =
261 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
262 QAT_COMN_PTR_TYPE_SGL);
263 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
264 ICP_QAT_FW_LA_PARTIAL_NONE);
265 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
266 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
267 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
268 ICP_QAT_FW_LA_NO_PROTO);
269 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
270 ICP_QAT_FW_LA_NO_UPDATE_STATE);
273 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
275 struct crypto_authenc_keys *keys,
278 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
279 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
280 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
281 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
282 struct icp_qat_hw_auth_algo_blk *hash =
283 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
284 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
285 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
286 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
287 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
288 void *ptr = &req_tmpl->cd_ctrl;
289 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
290 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
293 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
294 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
295 hash->sha.inner_setup.auth_config.config =
296 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
297 ctx->qat_hash_alg, digestsize);
298 hash->sha.inner_setup.auth_counter.counter =
299 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
301 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
305 qat_alg_init_common_hdr(header);
306 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
307 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
308 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
309 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
310 ICP_QAT_FW_LA_RET_AUTH_RES);
311 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
312 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
313 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
314 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
316 /* Cipher CD config setup */
317 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
318 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
319 cipher_cd_ctrl->cipher_cfg_offset = 0;
320 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
321 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
322 /* Auth CD config setup */
323 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
324 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
325 hash_cd_ctrl->inner_res_sz = digestsize;
326 hash_cd_ctrl->final_sz = digestsize;
328 switch (ctx->qat_hash_alg) {
329 case ICP_QAT_HW_AUTH_ALGO_SHA1:
330 hash_cd_ctrl->inner_state1_sz =
331 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
332 hash_cd_ctrl->inner_state2_sz =
333 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
335 case ICP_QAT_HW_AUTH_ALGO_SHA256:
336 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
337 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
339 case ICP_QAT_HW_AUTH_ALGO_SHA512:
340 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
341 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
346 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
347 ((sizeof(struct icp_qat_hw_auth_setup) +
348 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
349 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
350 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
354 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
356 struct crypto_authenc_keys *keys,
359 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
360 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
361 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
362 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
363 struct icp_qat_hw_cipher_algo_blk *cipher =
364 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
365 sizeof(struct icp_qat_hw_auth_setup) +
366 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
367 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
368 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
369 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
370 void *ptr = &req_tmpl->cd_ctrl;
371 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
372 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
373 struct icp_qat_fw_la_auth_req_params *auth_param =
374 (struct icp_qat_fw_la_auth_req_params *)
375 ((char *)&req_tmpl->serv_specif_rqpars +
376 sizeof(struct icp_qat_fw_la_cipher_req_params));
379 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
380 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
381 hash->sha.inner_setup.auth_config.config =
382 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
385 hash->sha.inner_setup.auth_counter.counter =
386 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
388 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
392 qat_alg_init_common_hdr(header);
393 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
394 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
395 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
396 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
397 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
398 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
399 ICP_QAT_FW_LA_CMP_AUTH_RES);
400 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
401 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
403 /* Cipher CD config setup */
404 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
405 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
406 cipher_cd_ctrl->cipher_cfg_offset =
407 (sizeof(struct icp_qat_hw_auth_setup) +
408 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
409 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
410 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
412 /* Auth CD config setup */
413 hash_cd_ctrl->hash_cfg_offset = 0;
414 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
415 hash_cd_ctrl->inner_res_sz = digestsize;
416 hash_cd_ctrl->final_sz = digestsize;
418 switch (ctx->qat_hash_alg) {
419 case ICP_QAT_HW_AUTH_ALGO_SHA1:
420 hash_cd_ctrl->inner_state1_sz =
421 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
422 hash_cd_ctrl->inner_state2_sz =
423 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
425 case ICP_QAT_HW_AUTH_ALGO_SHA256:
426 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
427 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
429 case ICP_QAT_HW_AUTH_ALGO_SHA512:
430 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
431 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
437 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
438 ((sizeof(struct icp_qat_hw_auth_setup) +
439 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
440 auth_param->auth_res_sz = digestsize;
441 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
442 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
446 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
447 struct icp_qat_fw_la_bulk_req *req,
448 struct icp_qat_hw_cipher_algo_blk *cd,
449 const uint8_t *key, unsigned int keylen)
451 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
452 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
453 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
455 memcpy(cd->aes.key, key, keylen);
456 qat_alg_init_common_hdr(header);
457 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
458 cd_pars->u.s.content_desc_params_sz =
459 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
460 /* Cipher CD config setup */
461 cd_ctrl->cipher_key_sz = keylen >> 3;
462 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
463 cd_ctrl->cipher_cfg_offset = 0;
464 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
465 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
468 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
469 int alg, const uint8_t *key,
470 unsigned int keylen, int mode)
472 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
473 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
474 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
476 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
477 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
478 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
481 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
482 int alg, const uint8_t *key,
483 unsigned int keylen, int mode)
485 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
486 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
487 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
489 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
490 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
492 if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
493 dec_cd->aes.cipher_config.val =
494 QAT_AES_HW_CONFIG_DEC(alg, mode);
496 dec_cd->aes.cipher_config.val =
497 QAT_AES_HW_CONFIG_ENC(alg, mode);
500 static int qat_alg_validate_key(int key_len, int *alg, int mode)
502 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
504 case AES_KEYSIZE_128:
505 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
507 case AES_KEYSIZE_192:
508 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
510 case AES_KEYSIZE_256:
511 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
518 case AES_KEYSIZE_128 << 1:
519 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
521 case AES_KEYSIZE_256 << 1:
522 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
531 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
532 unsigned int keylen, int mode)
534 struct crypto_authenc_keys keys;
537 if (crypto_authenc_extractkeys(&keys, key, keylen))
540 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
543 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
546 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
551 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
557 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
564 if (qat_alg_validate_key(keylen, &alg, mode))
567 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
568 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
571 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
575 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
578 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
583 dev = &GET_DEV(ctx->inst->accel_dev);
584 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
585 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
586 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
587 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
590 int node = get_current_node();
591 struct qat_crypto_instance *inst =
592 qat_crypto_get_instance_node(node);
597 dev = &GET_DEV(inst->accel_dev);
599 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
605 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
612 if (qat_alg_aead_init_sessions(tfm, key, keylen,
613 ICP_QAT_HW_CIPHER_CBC_MODE))
619 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
620 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
621 ctx->dec_cd, ctx->dec_cd_paddr);
624 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
625 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
626 ctx->enc_cd, ctx->enc_cd_paddr);
631 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
632 struct qat_crypto_request *qat_req)
634 struct device *dev = &GET_DEV(inst->accel_dev);
635 struct qat_alg_buf_list *bl = qat_req->buf.bl;
636 struct qat_alg_buf_list *blout = qat_req->buf.blout;
637 dma_addr_t blp = qat_req->buf.blp;
638 dma_addr_t blpout = qat_req->buf.bloutp;
639 size_t sz = qat_req->buf.sz;
640 size_t sz_out = qat_req->buf.sz_out;
643 for (i = 0; i < bl->num_bufs; i++)
644 dma_unmap_single(dev, bl->bufers[i].addr,
645 bl->bufers[i].len, DMA_BIDIRECTIONAL);
647 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
650 /* If out of place operation dma unmap only data */
651 int bufless = blout->num_bufs - blout->num_mapped_bufs;
653 for (i = bufless; i < blout->num_bufs; i++) {
654 dma_unmap_single(dev, blout->bufers[i].addr,
655 blout->bufers[i].len,
658 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
663 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
664 struct scatterlist *sgl,
665 struct scatterlist *sglout,
666 struct qat_crypto_request *qat_req)
668 struct device *dev = &GET_DEV(inst->accel_dev);
670 int n = sg_nents(sgl);
671 struct qat_alg_buf_list *bufl;
672 struct qat_alg_buf_list *buflout = NULL;
674 dma_addr_t bloutp = 0;
675 struct scatterlist *sg;
676 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
677 ((1 + n) * sizeof(struct qat_alg_buf));
682 bufl = kzalloc_node(sz, GFP_ATOMIC,
683 dev_to_node(&GET_DEV(inst->accel_dev)));
687 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
688 if (unlikely(dma_mapping_error(dev, blp)))
691 for_each_sg(sgl, sg, n, i) {
697 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
700 bufl->bufers[y].len = sg->length;
701 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
705 bufl->num_bufs = sg_nctr;
706 qat_req->buf.bl = bufl;
707 qat_req->buf.blp = blp;
708 qat_req->buf.sz = sz;
709 /* Handle out of place operation */
711 struct qat_alg_buf *bufers;
713 n = sg_nents(sglout);
714 sz_out = sizeof(struct qat_alg_buf_list) +
715 ((1 + n) * sizeof(struct qat_alg_buf));
717 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
718 dev_to_node(&GET_DEV(inst->accel_dev)));
719 if (unlikely(!buflout))
721 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
722 if (unlikely(dma_mapping_error(dev, bloutp)))
724 bufers = buflout->bufers;
725 for_each_sg(sglout, sg, n, i) {
731 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
734 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
736 bufers[y].len = sg->length;
739 buflout->num_bufs = sg_nctr;
740 buflout->num_mapped_bufs = sg_nctr;
741 qat_req->buf.blout = buflout;
742 qat_req->buf.bloutp = bloutp;
743 qat_req->buf.sz_out = sz_out;
745 /* Otherwise set the src and dst to the same address */
746 qat_req->buf.bloutp = qat_req->buf.blp;
747 qat_req->buf.sz_out = 0;
752 n = sg_nents(sglout);
753 for (i = 0; i < n; i++)
754 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
755 dma_unmap_single(dev, buflout->bufers[i].addr,
756 buflout->bufers[i].len,
758 if (!dma_mapping_error(dev, bloutp))
759 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
764 for (i = 0; i < n; i++)
765 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
766 dma_unmap_single(dev, bufl->bufers[i].addr,
770 if (!dma_mapping_error(dev, blp))
771 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
774 dev_err(dev, "Failed to map buf for dma\n");
778 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
779 struct qat_crypto_request *qat_req)
781 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
782 struct qat_crypto_instance *inst = ctx->inst;
783 struct aead_request *areq = qat_req->aead_req;
784 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
785 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
787 qat_alg_free_bufl(inst, qat_req);
788 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
790 areq->base.complete(&areq->base, res);
793 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
794 struct qat_crypto_request *qat_req)
796 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
797 struct qat_crypto_instance *inst = ctx->inst;
798 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
799 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
800 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
802 qat_alg_free_bufl(inst, qat_req);
803 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
805 areq->base.complete(&areq->base, res);
808 void qat_alg_callback(void *resp)
810 struct icp_qat_fw_la_resp *qat_resp = resp;
811 struct qat_crypto_request *qat_req =
812 (void *)(__force long)qat_resp->opaque_data;
814 qat_req->cb(qat_resp, qat_req);
817 static int qat_alg_aead_dec(struct aead_request *areq)
819 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
820 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
821 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
822 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
823 struct icp_qat_fw_la_cipher_req_params *cipher_param;
824 struct icp_qat_fw_la_auth_req_params *auth_param;
825 struct icp_qat_fw_la_bulk_req *msg;
826 int digst_size = crypto_aead_authsize(aead_tfm);
830 cipher_len = areq->cryptlen - digst_size;
831 if (cipher_len % AES_BLOCK_SIZE != 0)
834 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
839 *msg = ctx->dec_fw_req;
840 qat_req->aead_ctx = ctx;
841 qat_req->aead_req = areq;
842 qat_req->cb = qat_aead_alg_callback;
843 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
844 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
845 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
846 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
847 cipher_param->cipher_length = cipher_len;
848 cipher_param->cipher_offset = areq->assoclen;
849 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
850 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
851 auth_param->auth_off = 0;
852 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
854 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
855 } while (ret == -EAGAIN && ctr++ < 10);
857 if (ret == -EAGAIN) {
858 qat_alg_free_bufl(ctx->inst, qat_req);
864 static int qat_alg_aead_enc(struct aead_request *areq)
866 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
867 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
868 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
869 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
870 struct icp_qat_fw_la_cipher_req_params *cipher_param;
871 struct icp_qat_fw_la_auth_req_params *auth_param;
872 struct icp_qat_fw_la_bulk_req *msg;
873 uint8_t *iv = areq->iv;
876 if (areq->cryptlen % AES_BLOCK_SIZE != 0)
879 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
884 *msg = ctx->enc_fw_req;
885 qat_req->aead_ctx = ctx;
886 qat_req->aead_req = areq;
887 qat_req->cb = qat_aead_alg_callback;
888 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
889 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
890 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
891 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
892 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
894 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
895 cipher_param->cipher_length = areq->cryptlen;
896 cipher_param->cipher_offset = areq->assoclen;
898 auth_param->auth_off = 0;
899 auth_param->auth_len = areq->assoclen + areq->cryptlen;
902 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
903 } while (ret == -EAGAIN && ctr++ < 10);
905 if (ret == -EAGAIN) {
906 qat_alg_free_bufl(ctx->inst, qat_req);
912 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
913 const u8 *key, unsigned int keylen,
916 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
919 spin_lock(&ctx->lock);
922 dev = &GET_DEV(ctx->inst->accel_dev);
923 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
924 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
925 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
926 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
929 int node = get_current_node();
930 struct qat_crypto_instance *inst =
931 qat_crypto_get_instance_node(node);
933 spin_unlock(&ctx->lock);
937 dev = &GET_DEV(inst->accel_dev);
939 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
943 spin_unlock(&ctx->lock);
946 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
950 spin_unlock(&ctx->lock);
954 spin_unlock(&ctx->lock);
955 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
961 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
962 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
963 ctx->dec_cd, ctx->dec_cd_paddr);
966 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
967 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
968 ctx->enc_cd, ctx->enc_cd_paddr);
973 static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
974 const u8 *key, unsigned int keylen)
976 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
977 ICP_QAT_HW_CIPHER_CBC_MODE);
980 static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
981 const u8 *key, unsigned int keylen)
983 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
984 ICP_QAT_HW_CIPHER_CTR_MODE);
987 static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
988 const u8 *key, unsigned int keylen)
990 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
991 ICP_QAT_HW_CIPHER_XTS_MODE);
994 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
996 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
997 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
998 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
999 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1000 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1001 struct icp_qat_fw_la_bulk_req *msg;
1004 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1008 msg = &qat_req->req;
1009 *msg = ctx->enc_fw_req;
1010 qat_req->ablkcipher_ctx = ctx;
1011 qat_req->ablkcipher_req = req;
1012 qat_req->cb = qat_ablkcipher_alg_callback;
1013 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1014 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1015 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1016 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1017 cipher_param->cipher_length = req->nbytes;
1018 cipher_param->cipher_offset = 0;
1019 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1021 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1022 } while (ret == -EAGAIN && ctr++ < 10);
1024 if (ret == -EAGAIN) {
1025 qat_alg_free_bufl(ctx->inst, qat_req);
1028 return -EINPROGRESS;
1031 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1033 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1034 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1035 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1036 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1037 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1038 struct icp_qat_fw_la_bulk_req *msg;
1041 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1045 msg = &qat_req->req;
1046 *msg = ctx->dec_fw_req;
1047 qat_req->ablkcipher_ctx = ctx;
1048 qat_req->ablkcipher_req = req;
1049 qat_req->cb = qat_ablkcipher_alg_callback;
1050 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1051 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1052 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1053 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1054 cipher_param->cipher_length = req->nbytes;
1055 cipher_param->cipher_offset = 0;
1056 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1058 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1059 } while (ret == -EAGAIN && ctr++ < 10);
1061 if (ret == -EAGAIN) {
1062 qat_alg_free_bufl(ctx->inst, qat_req);
1065 return -EINPROGRESS;
1068 static int qat_alg_aead_init(struct crypto_aead *tfm,
1069 enum icp_qat_hw_auth_algo hash,
1070 const char *hash_name)
1072 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1074 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1075 if (IS_ERR(ctx->hash_tfm))
1076 return PTR_ERR(ctx->hash_tfm);
1077 ctx->qat_hash_alg = hash;
1078 crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1082 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1084 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1087 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1089 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1092 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1094 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1097 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1099 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1100 struct qat_crypto_instance *inst = ctx->inst;
1103 crypto_free_shash(ctx->hash_tfm);
1108 dev = &GET_DEV(inst->accel_dev);
1110 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1111 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1112 ctx->enc_cd, ctx->enc_cd_paddr);
1115 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1116 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1117 ctx->dec_cd, ctx->dec_cd_paddr);
1119 qat_crypto_put_instance(inst);
1122 static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1124 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1126 spin_lock_init(&ctx->lock);
1127 tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
1132 static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1134 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1135 struct qat_crypto_instance *inst = ctx->inst;
1141 dev = &GET_DEV(inst->accel_dev);
1143 memset(ctx->enc_cd, 0,
1144 sizeof(struct icp_qat_hw_cipher_algo_blk));
1145 dma_free_coherent(dev,
1146 sizeof(struct icp_qat_hw_cipher_algo_blk),
1147 ctx->enc_cd, ctx->enc_cd_paddr);
1150 memset(ctx->dec_cd, 0,
1151 sizeof(struct icp_qat_hw_cipher_algo_blk));
1152 dma_free_coherent(dev,
1153 sizeof(struct icp_qat_hw_cipher_algo_blk),
1154 ctx->dec_cd, ctx->dec_cd_paddr);
1156 qat_crypto_put_instance(inst);
1160 static struct aead_alg qat_aeads[] = { {
1162 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1163 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1164 .cra_priority = 4001,
1165 .cra_flags = CRYPTO_ALG_ASYNC,
1166 .cra_blocksize = AES_BLOCK_SIZE,
1167 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1168 .cra_module = THIS_MODULE,
1170 .init = qat_alg_aead_sha1_init,
1171 .exit = qat_alg_aead_exit,
1172 .setkey = qat_alg_aead_setkey,
1173 .decrypt = qat_alg_aead_dec,
1174 .encrypt = qat_alg_aead_enc,
1175 .ivsize = AES_BLOCK_SIZE,
1176 .maxauthsize = SHA1_DIGEST_SIZE,
1179 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1180 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1181 .cra_priority = 4001,
1182 .cra_flags = CRYPTO_ALG_ASYNC,
1183 .cra_blocksize = AES_BLOCK_SIZE,
1184 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1185 .cra_module = THIS_MODULE,
1187 .init = qat_alg_aead_sha256_init,
1188 .exit = qat_alg_aead_exit,
1189 .setkey = qat_alg_aead_setkey,
1190 .decrypt = qat_alg_aead_dec,
1191 .encrypt = qat_alg_aead_enc,
1192 .ivsize = AES_BLOCK_SIZE,
1193 .maxauthsize = SHA256_DIGEST_SIZE,
1196 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1197 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1198 .cra_priority = 4001,
1199 .cra_flags = CRYPTO_ALG_ASYNC,
1200 .cra_blocksize = AES_BLOCK_SIZE,
1201 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1202 .cra_module = THIS_MODULE,
1204 .init = qat_alg_aead_sha512_init,
1205 .exit = qat_alg_aead_exit,
1206 .setkey = qat_alg_aead_setkey,
1207 .decrypt = qat_alg_aead_dec,
1208 .encrypt = qat_alg_aead_enc,
1209 .ivsize = AES_BLOCK_SIZE,
1210 .maxauthsize = SHA512_DIGEST_SIZE,
1213 static struct crypto_alg qat_algs[] = { {
1214 .cra_name = "cbc(aes)",
1215 .cra_driver_name = "qat_aes_cbc",
1216 .cra_priority = 4001,
1217 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1218 .cra_blocksize = AES_BLOCK_SIZE,
1219 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1221 .cra_type = &crypto_ablkcipher_type,
1222 .cra_module = THIS_MODULE,
1223 .cra_init = qat_alg_ablkcipher_init,
1224 .cra_exit = qat_alg_ablkcipher_exit,
1227 .setkey = qat_alg_ablkcipher_cbc_setkey,
1228 .decrypt = qat_alg_ablkcipher_decrypt,
1229 .encrypt = qat_alg_ablkcipher_encrypt,
1230 .min_keysize = AES_MIN_KEY_SIZE,
1231 .max_keysize = AES_MAX_KEY_SIZE,
1232 .ivsize = AES_BLOCK_SIZE,
1236 .cra_name = "ctr(aes)",
1237 .cra_driver_name = "qat_aes_ctr",
1238 .cra_priority = 4001,
1239 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1240 .cra_blocksize = AES_BLOCK_SIZE,
1241 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1243 .cra_type = &crypto_ablkcipher_type,
1244 .cra_module = THIS_MODULE,
1245 .cra_init = qat_alg_ablkcipher_init,
1246 .cra_exit = qat_alg_ablkcipher_exit,
1249 .setkey = qat_alg_ablkcipher_ctr_setkey,
1250 .decrypt = qat_alg_ablkcipher_decrypt,
1251 .encrypt = qat_alg_ablkcipher_encrypt,
1252 .min_keysize = AES_MIN_KEY_SIZE,
1253 .max_keysize = AES_MAX_KEY_SIZE,
1254 .ivsize = AES_BLOCK_SIZE,
1258 .cra_name = "xts(aes)",
1259 .cra_driver_name = "qat_aes_xts",
1260 .cra_priority = 4001,
1261 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1262 .cra_blocksize = AES_BLOCK_SIZE,
1263 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1265 .cra_type = &crypto_ablkcipher_type,
1266 .cra_module = THIS_MODULE,
1267 .cra_init = qat_alg_ablkcipher_init,
1268 .cra_exit = qat_alg_ablkcipher_exit,
1271 .setkey = qat_alg_ablkcipher_xts_setkey,
1272 .decrypt = qat_alg_ablkcipher_decrypt,
1273 .encrypt = qat_alg_ablkcipher_encrypt,
1274 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1275 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1276 .ivsize = AES_BLOCK_SIZE,
1281 int qat_algs_register(void)
1285 mutex_lock(&algs_lock);
1286 if (++active_devs != 1)
1289 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1290 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1292 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1296 for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
1297 qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
1299 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1304 mutex_unlock(&algs_lock);
1308 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1312 void qat_algs_unregister(void)
1314 mutex_lock(&algs_lock);
1315 if (--active_devs != 0)
1318 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1319 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1322 mutex_unlock(&algs_lock);