#include <linux/slab.h>
#include <linux/crypto.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
#include <crypto/hash.h>
char opad[SHA512_BLOCK_SIZE];
};
-struct qat_alg_ablkcipher_ctx {
+struct qat_alg_skcipher_ctx {
struct icp_qat_hw_cipher_algo_blk *enc_cd;
struct icp_qat_hw_cipher_algo_blk *dec_cd;
dma_addr_t enc_cd_paddr;
struct icp_qat_fw_la_bulk_req enc_fw_req;
struct icp_qat_fw_la_bulk_req dec_fw_req;
struct qat_crypto_instance *inst;
- struct crypto_tfm *tfm;
+ struct crypto_skcipher *tfm;
};
static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
return 0;
}
-static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
- struct icp_qat_fw_la_bulk_req *req,
- struct icp_qat_hw_cipher_algo_blk *cd,
- const uint8_t *key, unsigned int keylen)
+static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
+ struct icp_qat_fw_la_bulk_req *req,
+ struct icp_qat_hw_cipher_algo_blk *cd,
+ const uint8_t *key, unsigned int keylen)
{
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
}
-static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
- int alg, const uint8_t *key,
- unsigned int keylen, int mode)
+static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
+ int alg, const uint8_t *key,
+ unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
- qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
+ qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
}
-static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
- int alg, const uint8_t *key,
- unsigned int keylen, int mode)
+static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
+ int alg, const uint8_t *key,
+ unsigned int keylen, int mode)
{
struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
- qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
+ qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
return -EFAULT;
}
-static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
- const uint8_t *key,
- unsigned int keylen,
- int mode)
+static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
+ const uint8_t *key,
+ unsigned int keylen,
+ int mode)
{
int alg;
if (qat_alg_validate_key(keylen, &alg, mode))
goto bad_key;
- qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
- qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
+ qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
+ qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
return 0;
bad_key:
- crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
areq->base.complete(&areq->base, res);
}
-static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
- struct qat_crypto_request *qat_req)
+static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+ struct qat_crypto_request *qat_req)
{
- struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
+ struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
struct qat_crypto_instance *inst = ctx->inst;
- struct ablkcipher_request *areq = qat_req->ablkcipher_req;
+ struct skcipher_request *sreq = qat_req->skcipher_req;
uint8_t stat_filed = qat_resp->comn_resp.comn_status;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
res = -EINVAL;
- memcpy(areq->info, qat_req->iv, AES_BLOCK_SIZE);
+ memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
qat_req->iv_paddr);
- areq->base.complete(&areq->base, res);
+ sreq->base.complete(&sreq->base, res);
}
void qat_alg_callback(void *resp)
return -EINPROGRESS;
}
-static int qat_alg_ablkcipher_rekey(struct qat_alg_ablkcipher_ctx *ctx,
- const u8 *key, unsigned int keylen,
- int mode)
+static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
+ const u8 *key, unsigned int keylen,
+ int mode)
{
memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
- return qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
+ return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
}
-static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx *ctx,
- const u8 *key, unsigned int keylen,
- int mode)
+static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
+ const u8 *key, unsigned int keylen,
+ int mode)
{
struct qat_crypto_instance *inst = NULL;
struct device *dev;
goto out_free_enc;
}
- ret = qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode);
+ ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
if (ret)
goto out_free_all;
return ret;
}
-static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen,
- int mode)
+static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen,
+ int mode)
{
- struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->enc_cd)
- return qat_alg_ablkcipher_rekey(ctx, key, keylen, mode);
+ return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
else
- return qat_alg_ablkcipher_newkey(ctx, key, keylen, mode);
+ return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
}
-static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen)
+static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
{
- return qat_alg_ablkcipher_setkey(tfm, key, keylen,
- ICP_QAT_HW_CIPHER_CBC_MODE);
+ return qat_alg_skcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CBC_MODE);
}
-static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen)
+static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
{
- return qat_alg_ablkcipher_setkey(tfm, key, keylen,
- ICP_QAT_HW_CIPHER_CTR_MODE);
+ return qat_alg_skcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_CTR_MODE);
}
-static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen)
+static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
{
- return qat_alg_ablkcipher_setkey(tfm, key, keylen,
- ICP_QAT_HW_CIPHER_XTS_MODE);
+ return qat_alg_skcipher_setkey(tfm, key, keylen,
+ ICP_QAT_HW_CIPHER_XTS_MODE);
}
-static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
+static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int ret, ctr = 0;
- if (req->nbytes == 0)
+ if (req->cryptlen == 0)
return 0;
qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
msg = &qat_req->req;
*msg = ctx->enc_fw_req;
- qat_req->ablkcipher_ctx = ctx;
- qat_req->ablkcipher_req = req;
- qat_req->cb = qat_ablkcipher_alg_callback;
+ qat_req->skcipher_ctx = ctx;
+ qat_req->skcipher_req = req;
+ qat_req->cb = qat_skcipher_alg_callback;
qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
- cipher_param->cipher_length = req->nbytes;
+ cipher_param->cipher_length = req->cryptlen;
cipher_param->cipher_offset = 0;
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
- memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
return -EINPROGRESS;
}
-static int qat_alg_ablkcipher_blk_encrypt(struct ablkcipher_request *req)
+static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
{
- if (req->nbytes % AES_BLOCK_SIZE != 0)
+ if (req->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
- return qat_alg_ablkcipher_encrypt(req);
+ return qat_alg_skcipher_encrypt(req);
}
-static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
+static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
{
- struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
+ struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_bulk_req *msg;
struct device *dev = &GET_DEV(ctx->inst->accel_dev);
int ret, ctr = 0;
- if (req->nbytes == 0)
+ if (req->cryptlen == 0)
return 0;
qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
msg = &qat_req->req;
*msg = ctx->dec_fw_req;
- qat_req->ablkcipher_ctx = ctx;
- qat_req->ablkcipher_req = req;
- qat_req->cb = qat_ablkcipher_alg_callback;
+ qat_req->skcipher_ctx = ctx;
+ qat_req->skcipher_req = req;
+ qat_req->cb = qat_skcipher_alg_callback;
qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
- cipher_param->cipher_length = req->nbytes;
+ cipher_param->cipher_length = req->cryptlen;
cipher_param->cipher_offset = 0;
cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
- memcpy(qat_req->iv, req->info, AES_BLOCK_SIZE);
+ memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
do {
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
} while (ret == -EAGAIN && ctr++ < 10);
return -EINPROGRESS;
}
-static int qat_alg_ablkcipher_blk_decrypt(struct ablkcipher_request *req)
+static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
{
- if (req->nbytes % AES_BLOCK_SIZE != 0)
+ if (req->cryptlen % AES_BLOCK_SIZE != 0)
return -EINVAL;
- return qat_alg_ablkcipher_decrypt(req);
+ return qat_alg_skcipher_decrypt(req);
}
static int qat_alg_aead_init(struct crypto_aead *tfm,
enum icp_qat_hw_auth_algo hash,
qat_crypto_put_instance(inst);
}
-static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
+static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
{
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
ctx->tfm = tfm;
return 0;
}
-static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
+static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
{
- struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev;
.maxauthsize = SHA512_DIGEST_SIZE,
} };
-static struct crypto_alg qat_algs[] = { {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "qat_aes_cbc",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_ablkcipher_init,
- .cra_exit = qat_alg_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = qat_alg_ablkcipher_cbc_setkey,
- .decrypt = qat_alg_ablkcipher_blk_decrypt,
- .encrypt = qat_alg_ablkcipher_blk_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+static struct skcipher_alg qat_skciphers[] = { {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "qat_aes_cbc",
+ .base.cra_priority = 4001,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = qat_alg_skcipher_init_tfm,
+ .exit = qat_alg_skcipher_exit_tfm,
+ .setkey = qat_alg_skcipher_cbc_setkey,
+ .decrypt = qat_alg_skcipher_blk_decrypt,
+ .encrypt = qat_alg_skcipher_blk_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
}, {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "qat_aes_ctr",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_ablkcipher_init,
- .cra_exit = qat_alg_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = qat_alg_ablkcipher_ctr_setkey,
- .decrypt = qat_alg_ablkcipher_decrypt,
- .encrypt = qat_alg_ablkcipher_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "qat_aes_ctr",
+ .base.cra_priority = 4001,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = qat_alg_skcipher_init_tfm,
+ .exit = qat_alg_skcipher_exit_tfm,
+ .setkey = qat_alg_skcipher_ctr_setkey,
+ .decrypt = qat_alg_skcipher_decrypt,
+ .encrypt = qat_alg_skcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
}, {
- .cra_name = "xts(aes)",
- .cra_driver_name = "qat_aes_xts",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = qat_alg_ablkcipher_init,
- .cra_exit = qat_alg_ablkcipher_exit,
- .cra_u = {
- .ablkcipher = {
- .setkey = qat_alg_ablkcipher_xts_setkey,
- .decrypt = qat_alg_ablkcipher_blk_decrypt,
- .encrypt = qat_alg_ablkcipher_blk_encrypt,
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- },
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "qat_aes_xts",
+ .base.cra_priority = 4001,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = qat_alg_skcipher_init_tfm,
+ .exit = qat_alg_skcipher_exit_tfm,
+ .setkey = qat_alg_skcipher_xts_setkey,
+ .decrypt = qat_alg_skcipher_blk_decrypt,
+ .encrypt = qat_alg_skcipher_blk_encrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
} };
int qat_algs_register(void)
{
- int ret = 0, i;
+ int ret = 0;
mutex_lock(&algs_lock);
if (++active_devs != 1)
goto unlock;
- for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
- qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
-
- ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ ret = crypto_register_skciphers(qat_skciphers,
+ ARRAY_SIZE(qat_skciphers));
if (ret)
goto unlock;
- for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
- qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
-
ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
if (ret)
goto unreg_algs;
return ret;
unreg_algs:
- crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
goto unlock;
}
goto unlock;
crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
- crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
unlock:
mutex_unlock(&algs_lock);