Function updates on the uadk framework. Support heterogeneous hybrid computing acceleration functions for all uadk algorithms
Longfang Liu (14): Revert "drivers alloc and free resources by themself" uadk: add heterogeneous scheduling solutions uadk: update uadk scheduler uadk: update acc user mode driver uadk: aead adapts to heterogeneous scheduling mode uadk: update hash agg’s function code uadk: update cipher’s function code uadk: comp adapt to heterogeneous scheduling mode uadk: DH adapt to heterogeneous scheduling mode uadk: digest adapt to heterogeneous scheduling mode uadk: ECC adapt to heterogeneous scheduling mode uadk: rsa adapt to heterogeneous scheduling mode uadk: update uadk's internal functions uadk_tool: update uadk test tool
Makefile.am | 2 +- drv/hash_mb/hash_mb.c | 48 +- drv/hisi_comp.c | 32 +- drv/hisi_dae.c | 39 +- drv/hisi_hpre.c | 63 +- drv/hisi_sec.c | 120 ++- drv/isa_ce_sm3.c | 55 +- drv/isa_ce_sm4.c | 59 +- include/wd_alg.h | 58 +- include/wd_alg_common.h | 58 +- include/wd_sched.h | 7 + include/wd_util.h | 35 +- uadk_tool/benchmark/sec_uadk_benchmark.c | 71 +- wd_aead.c | 126 ++- wd_agg.c | 81 +- wd_alg.c | 63 +- wd_cipher.c | 103 ++- wd_comp.c | 92 ++- wd_dh.c | 95 ++- wd_digest.c | 125 ++- wd_ecc.c | 97 ++- wd_rsa.c | 95 ++- wd_sched.c | 974 +++++++++++++++++++++-- wd_util.c | 798 ++++++++++++------- 24 files changed, 2205 insertions(+), 1091 deletions(-)
This reverts commit 3fc344aa4f7c460269cd0d870fe388f01dfa22a2. --- drv/hash_mb/hash_mb.c | 48 +++++++---------- drv/hisi_comp.c | 29 ++++------- drv/hisi_dae.c | 36 ++++++------- drv/hisi_hpre.c | 60 ++++++++-------------- drv/hisi_sec.c | 117 ++++++++++++++++++++---------------------- drv/isa_ce_sm3.c | 37 +++++-------- drv/isa_ce_sm4.c | 37 +++++-------- include/wd_alg.h | 35 +++---------- include/wd_util.h | 14 ++--- wd_aead.c | 37 +++++-------- wd_agg.c | 13 ++--- wd_cipher.c | 20 ++++---- wd_comp.c | 20 ++++---- wd_dh.c | 22 ++++---- wd_digest.c | 37 ++++++------- wd_ecc.c | 22 ++++---- wd_rsa.c | 22 ++++---- wd_util.c | 35 ++++++++++--- 18 files changed, 281 insertions(+), 360 deletions(-)
diff --git a/drv/hash_mb/hash_mb.c b/drv/hash_mb/hash_mb.c index f0f27b5..e9ef69f 100644 --- a/drv/hash_mb/hash_mb.c +++ b/drv/hash_mb/hash_mb.c @@ -186,52 +186,41 @@ free_mb_queue: return ret; }
-static int hash_mb_init(struct wd_alg_driver *drv, void *conf) +static int hash_mb_init(void *conf, void *priv) { struct wd_ctx_config_internal *config = conf; - struct hash_mb_ctx *priv; - int ret; + struct hash_mb_ctx *mb_ctx = priv;
/* Fallback init is NULL */ - if (!drv || !conf) + if (!conf || !priv) return 0;
- priv = malloc(sizeof(struct hash_mb_ctx)); - if (!priv) - return -WD_ENOMEM; - /* multibuff does not use epoll. */ config->epoll_en = 0; - memcpy(&priv->config, config, sizeof(struct wd_ctx_config_internal)); - - ret = hash_mb_queue_init(config); - if (ret) { - free(priv); - return ret; - } + memcpy(&mb_ctx->config, config, sizeof(struct wd_ctx_config_internal));
- drv->priv = priv; - - return WD_SUCCESS; + return hash_mb_queue_init(config); }
-static void hash_mb_exit(struct wd_alg_driver *drv) +static void hash_mb_exit(void *priv) { - if(!drv || !drv->priv) - return; + struct hash_mb_ctx *mb_ctx = priv; + struct wd_ctx_config_internal *config;
- struct hash_mb_ctx *priv = (struct hash_mb_ctx *)drv->priv; + if (!priv) { + WD_ERR("invalid: input parameter is NULL!\n"); + return; + }
- hash_mb_queue_uninit(&priv->config, priv->config.ctx_num); - free(priv); - drv->priv = NULL; + config = &mb_ctx->config; + hash_mb_queue_uninit(config, config->ctx_num); }
static void hash_mb_pad_data(struct hash_pad *hash_pad, __u8 *in, __u32 partial, __u64 total_len, bool transfer) { - __u64 size = total_len << BYTES_TO_BITS_OFFSET; __u8 *buffer = hash_pad->pad; + __u64 size = total_len << 3;
if (partial) memcpy(buffer, in, partial); @@ -266,7 +255,7 @@ static inline void hash_xor(__u8 *key_out, __u8 *key_in, __u32 key_len, __u8 xor if (i < key_len) key_out[i] = key_in[i] ^ xor_value; else - key_out[i] = xor_value; + key_out[i] = 0x0 ^ xor_value; } }
@@ -554,7 +543,7 @@ static int hash_mb_check_param(struct hash_mb_queue *mb_queue, struct wd_digest_ return WD_SUCCESS; }
-static int hash_mb_send(struct wd_alg_driver *drv, handle_t ctx, void *drv_msg) +static int hash_mb_send(handle_t ctx, void *drv_msg) { struct wd_soft_ctx *s_ctx = (struct wd_soft_ctx *)ctx; struct hash_mb_queue *mb_queue = s_ctx->priv; @@ -775,7 +764,7 @@ static int hash_mb_do_jobs(struct hash_mb_queue *mb_queue) return WD_SUCCESS; }
-static int hash_mb_recv(struct wd_alg_driver *drv, handle_t ctx, void *drv_msg) +static int hash_mb_recv(handle_t ctx, void *drv_msg) { struct wd_soft_ctx *s_ctx = (struct wd_soft_ctx *)ctx; struct hash_mb_queue *mb_queue = s_ctx->priv; @@ -809,6 +798,7 @@ static int hash_mb_get_usage(void *param) .alg_name = (hash_alg_name),\ .calc_type = UADK_ALG_SVE_INSTR,\ .priority = 100,\ + .priv_size = sizeof(struct hash_mb_ctx),\ .queue_num = 1,\ .op_type_num = 1,\ .fallback = 0,\ diff --git a/drv/hisi_comp.c b/drv/hisi_comp.c index 71e859f..547e665 100644 --- a/drv/hisi_comp.c +++ b/drv/hisi_comp.c @@ -790,11 +790,11 @@ static void hisi_zip_sqe_ops_adapt(handle_t h_qp) } }
-static int hisi_zip_init(struct wd_alg_driver *drv, void *conf) +static int hisi_zip_init(void *conf, void *priv) { struct wd_ctx_config_internal *config = conf; + struct hisi_zip_ctx *zip_ctx = (struct hisi_zip_ctx *)priv; struct hisi_qm_priv qm_priv; - struct hisi_zip_ctx *priv; handle_t h_qp = 0; handle_t h_ctx; __u32 i, j; @@ -804,11 +804,7 @@ static int hisi_zip_init(struct wd_alg_driver *drv, void *conf) return -WD_EINVAL; }
- priv = malloc(sizeof(struct hisi_zip_ctx)); - if (!priv) - return -WD_EINVAL; - - memcpy(&priv->config, config, sizeof(struct wd_ctx_config_internal)); + memcpy(&zip_ctx->config, config, sizeof(struct wd_ctx_config_internal)); /* allocate qp for each context */ for (i = 0; i < config->ctx_num; i++) { h_ctx = config->ctxs[i].ctx; @@ -826,7 +822,6 @@ static int hisi_zip_init(struct wd_alg_driver *drv, void *conf) }
hisi_zip_sqe_ops_adapt(h_qp); - drv->priv = priv;
return 0; out: @@ -834,27 +829,20 @@ out: h_qp = (handle_t)wd_ctx_get_priv(config->ctxs[j].ctx); hisi_qm_free_qp(h_qp); } - free(priv); return -WD_EINVAL; }
-static void hisi_zip_exit(struct wd_alg_driver *drv) +static void hisi_zip_exit(void *priv) { - if(!drv || !drv->priv) - return; - - struct hisi_zip_ctx *priv = (struct hisi_zip_ctx *)drv->priv; - struct wd_ctx_config_internal *config; + struct hisi_zip_ctx *zip_ctx = (struct hisi_zip_ctx *)priv; + struct wd_ctx_config_internal *config = &zip_ctx->config; handle_t h_qp; __u32 i;
- config = &priv->config; for (i = 0; i < config->ctx_num; i++) { h_qp = (handle_t)wd_ctx_get_priv(config->ctxs[i].ctx); hisi_qm_free_qp(h_qp); } - free(priv); - drv->priv = NULL; }
static int fill_zip_comp_sqe(struct hisi_qp *qp, struct wd_comp_msg *msg, @@ -934,7 +922,7 @@ static void free_hw_sgl(handle_t h_qp, struct hisi_zip_sqe *sqe, } }
-static int hisi_zip_comp_send(struct wd_alg_driver *drv, handle_t ctx, void *comp_msg) +static int hisi_zip_comp_send(handle_t ctx, void *comp_msg) { struct hisi_qp *qp = wd_ctx_get_priv(ctx); struct wd_comp_msg *msg = comp_msg; @@ -1078,7 +1066,7 @@ static int parse_zip_sqe(struct hisi_qp *qp, struct hisi_zip_sqe *sqe, return 0; }
-static int hisi_zip_comp_recv(struct wd_alg_driver *drv, handle_t ctx, void *comp_msg) +static int hisi_zip_comp_recv(handle_t ctx, void *comp_msg) { struct hisi_qp *qp = wd_ctx_get_priv(ctx); struct wd_comp_msg *recv_msg = comp_msg; @@ -1100,6 +1088,7 @@ static int hisi_zip_comp_recv(struct wd_alg_driver *drv, handle_t ctx, void *com .alg_name = (zip_alg_name),\ .calc_type = UADK_ALG_HW,\ .priority = 100,\ + .priv_size = sizeof(struct hisi_zip_ctx),\ .queue_num = ZIP_CTX_Q_NUM_DEF,\ .op_type_num = 2,\ .fallback = 0,\ diff --git a/drv/hisi_dae.c b/drv/hisi_dae.c index b9f6ee0..49d6b55 100644 --- a/drv/hisi_dae.c +++ b/drv/hisi_dae.c @@ -524,7 +524,7 @@ static int check_hashagg_param(struct wd_agg_msg *msg) return WD_SUCCESS; }
-static int hashagg_send(struct wd_alg_driver *drv, handle_t ctx, void *hashagg_msg) +static int hashagg_send(handle_t ctx, void *hashagg_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct hisi_qp *qp = (struct hisi_qp *)h_qp; @@ -657,7 +657,7 @@ static void fill_hashagg_msg_task_err(struct dae_sqe *sqe, struct wd_agg_msg *ms } }
-static int hashagg_recv(struct wd_alg_driver *drv, handle_t ctx, void *hashagg_msg) +static int hashagg_recv(handle_t ctx, void *hashagg_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct hisi_qp *qp = (struct hisi_qp *)h_qp; @@ -1554,11 +1554,11 @@ update_table: return ret; }
-static int dae_init(struct wd_alg_driver *drv, void *conf) +static int dae_init(void *conf, void *priv) { struct wd_ctx_config_internal *config = conf; + struct hisi_dae_ctx *dae_ctx = priv; struct hisi_qm_priv qm_priv; - struct hisi_dae_ctx *priv; handle_t h_qp = 0; handle_t h_ctx; __u32 i, j; @@ -1569,10 +1569,6 @@ static int dae_init(struct wd_alg_driver *drv, void *conf) return -WD_EINVAL; }
- priv = malloc(sizeof(struct hisi_dae_ctx)); - if (!priv) - return -WD_ENOMEM; - qm_priv.op_type = DAE_HASH_AGG_TYPE; qm_priv.sqe_size = sizeof(struct dae_sqe); /* Allocate qp for each context */ @@ -1593,10 +1589,9 @@ static int dae_init(struct wd_alg_driver *drv, void *conf) if (ret) goto free_h_qp; } - memcpy(&priv->config, config, sizeof(struct wd_ctx_config_internal)); - drv->priv = priv; + memcpy(&dae_ctx->config, config, sizeof(struct wd_ctx_config_internal));
- return WD_SUCCESS; + return 0;
free_h_qp: hisi_qm_free_qp(h_qp); @@ -1606,29 +1601,27 @@ out: dae_uninit_qp_priv(h_qp); hisi_qm_free_qp(h_qp); } - free(priv); return ret; }
-static void dae_exit(struct wd_alg_driver *drv) +static void dae_exit(void *priv) { - if(!drv || !drv->priv) - return; - - struct hisi_dae_ctx *priv = (struct hisi_dae_ctx *)drv->priv; + struct hisi_dae_ctx *dae_ctx = priv; struct wd_ctx_config_internal *config; handle_t h_qp; __u32 i;
- config = &priv->config; + if (!priv) { + WD_ERR("invalid: input parameter is NULL!\n"); + return; + } + + config = &dae_ctx->config; for (i = 0; i < config->ctx_num; i++) { h_qp = (handle_t)wd_ctx_get_priv(config->ctxs[i].ctx); dae_uninit_qp_priv(h_qp); hisi_qm_free_qp(h_qp); } - - free(priv); - drv->priv = NULL; }
static int dae_get_usage(void *param) @@ -1656,6 +1649,7 @@ static struct wd_alg_driver hashagg_driver = { .alg_name = "hashagg", .calc_type = UADK_ALG_HW, .priority = 100, + .priv_size = sizeof(struct hisi_dae_ctx), .queue_num = DAE_CTX_Q_NUM_DEF, .op_type_num = 1, .fallback = 0, diff --git a/drv/hisi_hpre.c b/drv/hisi_hpre.c index 7c652d1..f91d7a8 100644 --- a/drv/hisi_hpre.c +++ b/drv/hisi_hpre.c @@ -525,11 +525,11 @@ out: return -WD_EINVAL; }
-static int hpre_rsa_dh_init(struct wd_alg_driver *drv, void *conf) +static int hpre_rsa_dh_init(void *conf, void *priv) { struct wd_ctx_config_internal *config = (struct wd_ctx_config_internal *)conf; + struct hisi_hpre_ctx *hpre_ctx = (struct hisi_hpre_ctx *)priv; struct hisi_qm_priv qm_priv; - struct hisi_hpre_ctx *priv; int ret;
if (!config->ctx_num) { @@ -537,27 +537,19 @@ static int hpre_rsa_dh_init(struct wd_alg_driver *drv, void *conf) return -WD_EINVAL; }
- priv = malloc(sizeof(struct hisi_hpre_ctx)); - if (!priv) - return -WD_EINVAL; - qm_priv.op_type = HPRE_HW_V2_ALG_TYPE; - ret = hpre_init_qm_priv(config, priv, &qm_priv); - if (ret) { - free(priv); + ret = hpre_init_qm_priv(config, hpre_ctx, &qm_priv); + if (ret) return ret; - } - - drv->priv = priv;
return WD_SUCCESS; }
-static int hpre_ecc_init(struct wd_alg_driver *drv, void *conf) +static int hpre_ecc_init(void *conf, void *priv) { struct wd_ctx_config_internal *config = (struct wd_ctx_config_internal *)conf; + struct hisi_hpre_ctx *hpre_ctx = (struct hisi_hpre_ctx *)priv; struct hisi_qm_priv qm_priv; - struct hisi_hpre_ctx *priv; int ret;
if (!config->ctx_num) { @@ -565,43 +557,28 @@ static int hpre_ecc_init(struct wd_alg_driver *drv, void *conf) return -WD_EINVAL; }
- priv = malloc(sizeof(struct hisi_hpre_ctx)); - if (!priv) - return -WD_EINVAL; - qm_priv.op_type = HPRE_HW_V3_ECC_ALG_TYPE; - ret = hpre_init_qm_priv(config, priv, &qm_priv); - if (ret) { - free(priv); + ret = hpre_init_qm_priv(config, hpre_ctx, &qm_priv); + if (ret) return ret; - } - - drv->priv = priv;
return WD_SUCCESS; }
-static void hpre_exit(struct wd_alg_driver *drv) +static void hpre_exit(void *priv) { - if(!drv || !drv->priv) - return; - - struct hisi_hpre_ctx *priv = (struct hisi_hpre_ctx *)drv->priv; - struct wd_ctx_config_internal *config; + struct hisi_hpre_ctx *hpre_ctx = (struct hisi_hpre_ctx *)priv; + struct wd_ctx_config_internal *config = &hpre_ctx->config; handle_t h_qp; __u32 i;
- config = &priv->config; for (i = 0; i < config->ctx_num; i++) { h_qp = (handle_t)wd_ctx_get_priv(config->ctxs[i].ctx); hisi_qm_free_qp(h_qp); } - - free(priv); - drv->priv = NULL; }
-static int rsa_send(struct wd_alg_driver *drv, handle_t ctx, void *rsa_msg) +static int rsa_send(handle_t ctx, void *rsa_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_rsa_msg *msg = rsa_msg; @@ -657,7 +634,7 @@ static void hpre_result_check(struct hisi_hpre_sqe *hw_msg, } }
-static int rsa_recv(struct wd_alg_driver *drv, handle_t ctx, void *rsa_msg) +static int rsa_recv(handle_t ctx, void *rsa_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct hisi_qp *qp = (struct hisi_qp *)h_qp; @@ -755,7 +732,7 @@ static int dh_out_transfer(struct wd_dh_msg *msg, return WD_SUCCESS; }
-static int dh_send(struct wd_alg_driver *drv, handle_t ctx, void *dh_msg) +static int dh_send(handle_t ctx, void *dh_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_dh_msg *msg = dh_msg; @@ -800,7 +777,7 @@ static int dh_send(struct wd_alg_driver *drv, handle_t ctx, void *dh_msg) return hisi_qm_send(h_qp, &hw_msg, 1, &send_cnt); }
-static int dh_recv(struct wd_alg_driver *drv, handle_t ctx, void *dh_msg) +static int dh_recv(handle_t ctx, void *dh_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct hisi_qp *qp = (struct hisi_qp *)h_qp; @@ -1889,7 +1866,7 @@ free_dst: return ret; }
-static int ecc_send(struct wd_alg_driver *drv, handle_t ctx, void *ecc_msg) +static int ecc_send(handle_t ctx, void *ecc_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_ecc_msg *msg = ecc_msg; @@ -2460,7 +2437,7 @@ fail: return ret; }
-static int ecc_recv(struct wd_alg_driver *drv, handle_t ctx, void *ecc_msg) +static int ecc_recv(handle_t ctx, void *ecc_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_ecc_msg *msg = ecc_msg; @@ -2497,6 +2474,7 @@ static int hpre_get_usage(void *param) .alg_name = (hpre_alg_name),\ .calc_type = UADK_ALG_HW,\ .priority = 100,\ + .priv_size = sizeof(struct hisi_hpre_ctx),\ .queue_num = HPRE_CTX_Q_NUM_DEF,\ .op_type_num = 1,\ .fallback = 0,\ @@ -2520,6 +2498,7 @@ static struct wd_alg_driver hpre_rsa_driver = { .alg_name = "rsa", .calc_type = UADK_ALG_HW, .priority = 100, + .priv_size = sizeof(struct hisi_hpre_ctx), .queue_num = HPRE_CTX_Q_NUM_DEF, .op_type_num = 1, .fallback = 0, @@ -2535,6 +2514,7 @@ static struct wd_alg_driver hpre_dh_driver = { .alg_name = "dh", .calc_type = UADK_ALG_HW, .priority = 100, + .priv_size = sizeof(struct hisi_hpre_ctx), .queue_num = HPRE_CTX_Q_NUM_DEF, .op_type_num = 1, .fallback = 0, diff --git a/drv/hisi_sec.c b/drv/hisi_sec.c index 9ad2eb2..7635466 100644 --- a/drv/hisi_sec.c +++ b/drv/hisi_sec.c @@ -523,76 +523,76 @@ static __u32 g_sec_hmac_full_len[WD_DIGEST_TYPE_MAX] = { SEC_HMAC_SHA512_MAC_LEN, SEC_HMAC_SHA512_224_MAC_LEN, SEC_HMAC_SHA512_256_MAC_LEN };
-static int hisi_sec_init(struct wd_alg_driver *drv, void *conf); -static void hisi_sec_exit(struct wd_alg_driver *drv); +static int hisi_sec_init(void *conf, void *priv); +static void hisi_sec_exit(void *priv);
-static int hisi_sec_cipher_send(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg); -static int hisi_sec_cipher_recv(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg); -static int hisi_sec_cipher_send_v3(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg); -static int hisi_sec_cipher_recv_v3(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg); +static int hisi_sec_cipher_send(handle_t ctx, void *wd_msg); +static int hisi_sec_cipher_recv(handle_t ctx, void *wd_msg); +static int hisi_sec_cipher_send_v3(handle_t ctx, void *wd_msg); +static int hisi_sec_cipher_recv_v3(handle_t ctx, void *wd_msg);
-static int hisi_sec_digest_send(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg); -static int hisi_sec_digest_recv(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg); -static int hisi_sec_digest_send_v3(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg); -static int hisi_sec_digest_recv_v3(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg); +static int hisi_sec_digest_send(handle_t ctx, void *wd_msg); +static int hisi_sec_digest_recv(handle_t ctx, void *wd_msg); +static int hisi_sec_digest_send_v3(handle_t ctx, void *wd_msg); +static int hisi_sec_digest_recv_v3(handle_t ctx, void *wd_msg);
-static int hisi_sec_aead_send(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg); -static int hisi_sec_aead_recv(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg); -static int hisi_sec_aead_send_v3(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg); -static int hisi_sec_aead_recv_v3(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg); +static int hisi_sec_aead_send(handle_t ctx, void *wd_msg); +static int hisi_sec_aead_recv(handle_t ctx, void *wd_msg); +static int hisi_sec_aead_send_v3(handle_t ctx, void *wd_msg); +static int hisi_sec_aead_recv_v3(handle_t ctx, void *wd_msg);
-static int cipher_send(struct wd_alg_driver *drv, handle_t ctx, void *msg) +static int cipher_send(handle_t ctx, void *msg) { struct hisi_qp *qp = (struct hisi_qp *)wd_ctx_get_priv(ctx);
if (qp->q_info.hw_type == HISI_QM_API_VER2_BASE) - return hisi_sec_cipher_send(drv, ctx, msg); - return hisi_sec_cipher_send_v3(drv, ctx, msg); + return hisi_sec_cipher_send(ctx, msg); + return hisi_sec_cipher_send_v3(ctx, msg); }
-static int cipher_recv(struct wd_alg_driver *drv, handle_t ctx, void *msg) +static int cipher_recv(handle_t ctx, void *msg) { struct hisi_qp *qp = (struct hisi_qp *)wd_ctx_get_priv(ctx);
if (qp->q_info.hw_type == HISI_QM_API_VER2_BASE) - return hisi_sec_cipher_recv(drv, ctx, msg); - return hisi_sec_cipher_recv_v3(drv, ctx, msg); + return hisi_sec_cipher_recv(ctx, msg); + return hisi_sec_cipher_recv_v3(ctx, msg); }
-static int digest_send(struct wd_alg_driver *drv, handle_t ctx, void *msg) +static int digest_send(handle_t ctx, void *msg) { struct hisi_qp *qp = (struct hisi_qp *)wd_ctx_get_priv(ctx);
if (qp->q_info.hw_type == HISI_QM_API_VER2_BASE) - return hisi_sec_digest_send(drv, ctx, msg); - return hisi_sec_digest_send_v3(drv, ctx, msg); + return hisi_sec_digest_send(ctx, msg); + return hisi_sec_digest_send_v3(ctx, msg); }
-static int digest_recv(struct wd_alg_driver *drv, handle_t ctx, void *msg) +static int digest_recv(handle_t ctx, void *msg) { struct hisi_qp *qp = (struct hisi_qp *)wd_ctx_get_priv(ctx);
if (qp->q_info.hw_type == HISI_QM_API_VER2_BASE) - return hisi_sec_digest_recv(drv, ctx, msg); - return hisi_sec_digest_recv_v3(drv, ctx, msg); + return hisi_sec_digest_recv(ctx, msg); + return hisi_sec_digest_recv_v3(ctx, msg); }
-static int aead_send(struct wd_alg_driver *drv, handle_t ctx, void *msg) +static int aead_send(handle_t ctx, void *msg) { struct hisi_qp *qp = (struct hisi_qp *)wd_ctx_get_priv(ctx);
if (qp->q_info.hw_type == HISI_QM_API_VER2_BASE) - return hisi_sec_aead_send(drv, ctx, msg); - return hisi_sec_aead_send_v3(drv, ctx, msg); + return hisi_sec_aead_send(ctx, msg); + return hisi_sec_aead_send_v3(ctx, msg); }
-static int aead_recv(struct wd_alg_driver *drv, handle_t ctx, void *msg) +static int aead_recv(handle_t ctx, void *msg) { struct hisi_qp *qp = (struct hisi_qp *)wd_ctx_get_priv(ctx);
if (qp->q_info.hw_type == HISI_QM_API_VER2_BASE) - return hisi_sec_aead_recv(drv, ctx, msg); - return hisi_sec_aead_recv_v3(drv, ctx, msg); + return hisi_sec_aead_recv(ctx, msg); + return hisi_sec_aead_recv_v3(ctx, msg); }
static int hisi_sec_get_usage(void *param) @@ -606,6 +606,7 @@ static int hisi_sec_get_usage(void *param) .alg_name = (sec_alg_name),\ .calc_type = UADK_ALG_HW,\ .priority = 100,\ + .priv_size = sizeof(struct hisi_sec_ctx),\ .queue_num = SEC_CTX_Q_NUM_DEF,\ .op_type_num = 1,\ .fallback = 0,\ @@ -1157,7 +1158,7 @@ static int fill_cipher_bd2(struct wd_cipher_msg *msg, struct hisi_sec_sqe *sqe) return 0; }
-static int hisi_sec_cipher_send(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg) +static int hisi_sec_cipher_send(handle_t ctx, void *wd_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_cipher_msg *msg = wd_msg; @@ -1202,7 +1203,7 @@ static int hisi_sec_cipher_send(struct wd_alg_driver *drv, handle_t ctx, void *w return 0; }
-static int hisi_sec_cipher_recv(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg) +int hisi_sec_cipher_recv(handle_t ctx, void *wd_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_cipher_msg *recv_msg = wd_msg; @@ -1360,7 +1361,7 @@ static int fill_cipher_bd3(struct wd_cipher_msg *msg, struct hisi_sec_sqe3 *sqe) return 0; }
-static int hisi_sec_cipher_send_v3(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg) +static int hisi_sec_cipher_send_v3(handle_t ctx, void *wd_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_cipher_msg *msg = wd_msg; @@ -1450,7 +1451,7 @@ static void parse_cipher_bd3(struct hisi_qp *qp, struct hisi_sec_sqe3 *sqe, dump_sec_msg(temp_msg, "cipher"); }
-static int hisi_sec_cipher_recv_v3(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg) +int hisi_sec_cipher_recv_v3(handle_t ctx, void *wd_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_cipher_msg *recv_msg = wd_msg; @@ -1704,7 +1705,7 @@ static int digest_len_check(struct wd_digest_msg *msg, enum sec_bd_type type) return 0; }
-static int hisi_sec_digest_send(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg) +static int hisi_sec_digest_send(handle_t ctx, void *wd_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_digest_msg *msg = wd_msg; @@ -1771,7 +1772,7 @@ put_sgl: return ret; }
-static int hisi_sec_digest_recv(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg) +int hisi_sec_digest_recv(handle_t ctx, void *wd_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_digest_msg *recv_msg = wd_msg; @@ -1948,7 +1949,7 @@ static void fill_digest_v3_scene(struct hisi_sec_sqe3 *sqe, sqe->bd_param |= (__u16)(de | scene); }
-static int hisi_sec_digest_send_v3(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg) +static int hisi_sec_digest_send_v3(handle_t ctx, void *wd_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_digest_msg *msg = wd_msg; @@ -2047,7 +2048,7 @@ static void parse_digest_bd3(struct hisi_qp *qp, struct hisi_sec_sqe3 *sqe, dump_sec_msg(temp_msg, "digest"); }
-static int hisi_sec_digest_recv_v3(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg) +int hisi_sec_digest_recv_v3(handle_t ctx, void *wd_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_digest_msg *recv_msg = wd_msg; @@ -2540,7 +2541,7 @@ static int aead_msg_state_check(struct wd_aead_msg *msg) return 0; }
-static int hisi_sec_aead_send(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg) +static int hisi_sec_aead_send(handle_t ctx, void *wd_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_aead_msg *msg = wd_msg; @@ -2678,7 +2679,7 @@ static bool soft_compute_check(struct hisi_qp *qp, struct wd_aead_msg *msg) return false; }
-static int hisi_sec_aead_recv(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg) +int hisi_sec_aead_recv(handle_t ctx, void *wd_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_aead_msg *recv_msg = wd_msg; @@ -2945,7 +2946,7 @@ static int fill_aead_bd3(struct wd_aead_msg *msg, struct hisi_sec_sqe3 *sqe) return 0; }
-static int hisi_sec_aead_send_v3(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg) +static int hisi_sec_aead_send_v3(handle_t ctx, void *wd_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_aead_msg *msg = wd_msg; @@ -3049,7 +3050,7 @@ static void parse_aead_bd3(struct hisi_qp *qp, struct hisi_sec_sqe3 *sqe, dump_sec_msg(temp_msg, "aead"); }
-static int hisi_sec_aead_recv_v3(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg) +int hisi_sec_aead_recv_v3(handle_t ctx, void *wd_msg) { handle_t h_qp = (handle_t)wd_ctx_get_priv(ctx); struct wd_aead_msg *recv_msg = wd_msg; @@ -3077,11 +3078,11 @@ static int hisi_sec_aead_recv_v3(struct wd_alg_driver *drv, handle_t ctx, void * return 0; }
-static int hisi_sec_init(struct wd_alg_driver *drv, void *conf) +static int hisi_sec_init(void *conf, void *priv) { struct wd_ctx_config_internal *config = conf; + struct hisi_sec_ctx *sec_ctx = priv; struct hisi_qm_priv qm_priv; - struct hisi_sec_ctx *priv; handle_t h_qp = 0; handle_t h_ctx; __u32 i, j; @@ -3091,10 +3092,6 @@ static int hisi_sec_init(struct wd_alg_driver *drv, void *conf) return -WD_EINVAL; }
- priv = malloc(sizeof(struct hisi_sec_ctx)); - if (!priv) - return -WD_EINVAL; - qm_priv.sqe_size = sizeof(struct hisi_sec_sqe); /* allocate qp for each context */ for (i = 0; i < config->ctx_num; i++) { @@ -3111,8 +3108,7 @@ static int hisi_sec_init(struct wd_alg_driver *drv, void *conf) goto out; config->ctxs[i].sqn = qm_priv.sqn; } - memcpy(&priv->config, config, sizeof(struct wd_ctx_config_internal)); - drv->priv = priv; + memcpy(&sec_ctx->config, config, sizeof(struct wd_ctx_config_internal));
return 0;
@@ -3121,27 +3117,26 @@ out: h_qp = (handle_t)wd_ctx_get_priv(config->ctxs[j].ctx); hisi_qm_free_qp(h_qp); } - free(priv); return -WD_EINVAL; }
-static void hisi_sec_exit(struct wd_alg_driver *drv) +static void hisi_sec_exit(void *priv) { - if(!drv || !drv->priv) - return; - - struct hisi_sec_ctx *priv = (struct hisi_sec_ctx *)drv->priv; + struct hisi_sec_ctx *sec_ctx = priv; struct wd_ctx_config_internal *config; handle_t h_qp; __u32 i;
- config = &priv->config; + if (!priv) { + WD_ERR("invalid: input parameter is NULL!\n"); + return; + } + + config = &sec_ctx->config; for (i = 0; i < config->ctx_num; i++) { h_qp = (handle_t)wd_ctx_get_priv(config->ctxs[i].ctx); hisi_qm_free_qp(h_qp); } - free(priv); - drv->priv = NULL; }
#ifdef WD_STATIC_DRV diff --git a/drv/isa_ce_sm3.c b/drv/isa_ce_sm3.c index 54c2a9e..c8812df 100644 --- a/drv/isa_ce_sm3.c +++ b/drv/isa_ce_sm3.c @@ -22,10 +22,10 @@ typedef void (sm3_ce_block_fn)(__u32 word_reg[SM3_STATE_WORDS], const unsigned char *src, size_t blocks);
-static int sm3_ce_drv_init(struct wd_alg_driver *drv, void *conf); -static void sm3_ce_drv_exit(struct wd_alg_driver *drv); -static int sm3_ce_drv_send(struct wd_alg_driver *drv, handle_t ctx, void *digest_msg); -static int sm3_ce_drv_recv(struct wd_alg_driver *drv, handle_t ctx, void *digest_msg); +static int sm3_ce_drv_init(void *conf, void *priv); +static void sm3_ce_drv_exit(void *priv); +static int sm3_ce_drv_send(handle_t ctx, void *digest_msg); +static int sm3_ce_drv_recv(handle_t ctx, void *digest_msg); static int sm3_ce_get_usage(void *param);
static struct wd_alg_driver sm3_ce_alg_driver = { @@ -33,6 +33,7 @@ static struct wd_alg_driver sm3_ce_alg_driver = { .alg_name = "sm3", .calc_type = UADK_ALG_CE_INSTR, .priority = 200, + .priv_size = sizeof(struct sm3_ce_drv_ctx), .queue_num = 1, .op_type_num = 1, .fallback = 0, @@ -337,7 +338,7 @@ static int do_hmac_sm3_ce(struct wd_digest_msg *msg, __u8 *out_hmac) return WD_SUCCESS; }
-static int sm3_ce_drv_send(struct wd_alg_driver *drv, handle_t ctx, void *digest_msg) +static int sm3_ce_drv_send(handle_t ctx, void *digest_msg) { struct wd_digest_msg *msg = (struct wd_digest_msg *)digest_msg; __u8 digest[SM3_DIGEST_SIZE] = {0}; @@ -365,38 +366,26 @@ static int sm3_ce_drv_send(struct wd_alg_driver *drv, handle_t ctx, void *digest return ret; }
-static int sm3_ce_drv_recv(struct wd_alg_driver *drv, handle_t ctx, void *digest_msg) +static int sm3_ce_drv_recv(handle_t ctx, void *digest_msg) { return WD_SUCCESS; }
-static int sm3_ce_drv_init(struct wd_alg_driver *drv, void *conf) +static int sm3_ce_drv_init(void *conf, void *priv) { - struct wd_ctx_config_internal *config = (struct wd_ctx_config_internal *)conf; - struct sm3_ce_drv_ctx *priv; + struct wd_ctx_config_internal *config = conf; + struct sm3_ce_drv_ctx *sctx = priv;
/* Fallback init is NULL */ - if (!drv || !conf) + if (!conf || !priv) return 0;
- priv = malloc(sizeof(struct sm3_ce_drv_ctx)); - if (!priv) - return -WD_EINVAL; - config->epoll_en = 0; - memcpy(&priv->config, config, sizeof(struct wd_ctx_config_internal)); - drv->priv = priv; + memcpy(&sctx->config, config, sizeof(struct wd_ctx_config_internal));
return WD_SUCCESS; }
-static void sm3_ce_drv_exit(struct wd_alg_driver *drv) +static void sm3_ce_drv_exit(void *priv) { - if(!drv || !drv->priv) - return; - - struct sm3_ce_drv_ctx *sctx = (struct sm3_ce_drv_ctx *)drv->priv; - - free(sctx); - drv->priv = NULL; } diff --git a/drv/isa_ce_sm4.c b/drv/isa_ce_sm4.c index 5e448fa..4b2f9cf 100644 --- a/drv/isa_ce_sm4.c +++ b/drv/isa_ce_sm4.c @@ -31,35 +31,23 @@ ((p)[0] = (__u8)((v) >> 24), (p)[1] = (__u8)((v) >> 16), \ (p)[2] = (__u8)((v) >> 8), (p)[3] = (__u8)(v))
-static int isa_ce_init(struct wd_alg_driver *drv, void *conf) +static int isa_ce_init(void *conf, void *priv) { struct wd_ctx_config_internal *config = conf; - struct sm4_ce_drv_ctx *priv; + struct sm4_ce_drv_ctx *sctx = priv;
/* Fallback init is NULL */ - if (!drv || !conf) + if (!conf || !priv) return 0;
- priv = malloc(sizeof(struct sm4_ce_drv_ctx)); - if (!priv) - return -WD_EINVAL; - config->epoll_en = 0; - memcpy(&priv->config, config, sizeof(struct wd_ctx_config_internal)); - drv->priv = priv; + memcpy(&sctx->config, config, sizeof(struct wd_ctx_config_internal));
- return WD_SUCCESS; + return 0; }
-static void isa_ce_exit(struct wd_alg_driver *drv) +static void isa_ce_exit(void *priv) { - if(!drv || !drv->priv) - return; - - struct sm4_ce_drv_ctx *sctx = (struct sm4_ce_drv_ctx *)drv->priv; - - free(sctx); - drv->priv = NULL; }
/* increment upper 96 bits of 128-bit counter by 1 */ @@ -333,7 +321,7 @@ static int sm4_xts_decrypt(struct wd_cipher_msg *msg, const struct SM4_KEY *rkey return 0; }
-static int isa_ce_cipher_send(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg) +static int isa_ce_cipher_send(handle_t ctx, void *wd_msg) { struct wd_cipher_msg *msg = wd_msg; struct SM4_KEY rkey; @@ -399,19 +387,19 @@ static int isa_ce_cipher_send(struct wd_alg_driver *drv, handle_t ctx, void *wd_ return ret; }
-static int isa_ce_cipher_recv(struct wd_alg_driver *drv, handle_t ctx, void *wd_msg) +static int isa_ce_cipher_recv(handle_t ctx, void *wd_msg) { return 0; }
-static int cipher_send(struct wd_alg_driver *drv, handle_t ctx, void *msg) +static int cipher_send(handle_t ctx, void *msg) { - return isa_ce_cipher_send(drv, ctx, msg); + return isa_ce_cipher_send(ctx, msg); }
-static int cipher_recv(struct wd_alg_driver *drv, handle_t ctx, void *msg) +static int cipher_recv(handle_t ctx, void *msg) { - return isa_ce_cipher_recv(drv, ctx, msg); + return isa_ce_cipher_recv(ctx, msg); }
#define GEN_CE_ALG_DRIVER(ce_alg_name, alg_type) \ @@ -420,6 +408,7 @@ static int cipher_recv(struct wd_alg_driver *drv, handle_t ctx, void *msg) .alg_name = (ce_alg_name),\ .calc_type = UADK_ALG_CE_INSTR,\ .priority = 200,\ + .priv_size = sizeof(struct sm4_ce_drv_ctx),\ .op_type_num = 1,\ .fallback = 0,\ .init = isa_ce_init,\ diff --git a/include/wd_alg.h b/include/wd_alg.h index aba855d..4c9f422 100644 --- a/include/wd_alg.h +++ b/include/wd_alg.h @@ -84,7 +84,8 @@ enum alg_dev_type { * execute the algorithm task * @op_type_num: number of modes in which the device executes the * algorithm business and requires queues to be executed separately - * @priv: pointer of priv ctx + * @priv_size: parameter memory size passed between the internal + * interfaces of the driver * @fallback: soft calculation driver handle when performing soft * calculation supplement * @init: callback interface for initializing device drivers @@ -104,38 +105,18 @@ struct wd_alg_driver { int calc_type; int queue_num; int op_type_num; - void *priv; + int priv_size; handle_t fallback;
- int (*init)(struct wd_alg_driver *drv, void *conf); - void (*exit)(struct wd_alg_driver *drv); - int (*send)(struct wd_alg_driver *drv, handle_t ctx, void *drv_msg); - int (*recv)(struct wd_alg_driver *drv, handle_t ctx, void *drv_msg); + int (*init)(void *conf, void *priv); + void (*exit)(void *priv); + int (*send)(handle_t ctx, void *drv_msg); + int (*recv)(handle_t ctx, void *drv_msg); int (*get_usage)(void *param); int (*get_extend_ops)(void *ops); };
-inline int wd_alg_driver_init(struct wd_alg_driver *drv, void *conf) -{ - return drv->init(drv, conf); -} - -inline void wd_alg_driver_exit(struct wd_alg_driver *drv) -{ - drv->exit(drv); -} - -inline int wd_alg_driver_send(struct wd_alg_driver *drv, handle_t ctx, void *msg) -{ - return drv->send(drv, ctx, msg); -} - -inline int wd_alg_driver_recv(struct wd_alg_driver *drv, handle_t ctx, void *msg) -{ - return drv->recv(drv, ctx, msg); -} - -/* +/** * wd_alg_driver_register() - Register a device driver. * @wd_alg_driver: a device driver that supports an algorithm. * diff --git a/include/wd_util.h b/include/wd_util.h index 1040f19..e870776 100644 --- a/include/wd_util.h +++ b/include/wd_util.h @@ -118,8 +118,8 @@ struct wd_ctx_attr { };
struct wd_msg_handle { - int (*send)(struct wd_alg_driver *drv, handle_t ctx, void *drv_msg); - int (*recv)(struct wd_alg_driver *drv, handle_t ctx, void *drv_msg); + int (*send)(handle_t sess, void *msg); + int (*recv)(handle_t sess, void *msg); };
struct wd_init_attrs { @@ -376,7 +376,6 @@ int wd_set_epoll_en(const char *var_name, bool *epoll_en);
/** * wd_handle_msg_sync() - recv msg from hardware - * @drv: the driver to handle msg. * @msg_handle: callback of msg handle ops. * @ctx: the handle of context. * @msg: the msg of task. @@ -385,8 +384,8 @@ int wd_set_epoll_en(const char *var_name, bool *epoll_en); * * Return 0 if successful or less than 0 otherwise. */ -int wd_handle_msg_sync(struct wd_alg_driver *drv, struct wd_msg_handle *msg_handle, - handle_t ctx, void *msg, __u64 *balance, bool epoll_en); +int wd_handle_msg_sync(struct wd_msg_handle *msg_handle, handle_t ctx, + void *msg, __u64 *balance, bool epoll_en);
/** * wd_init_check() - Check input parameters for wd_<alg>_init. @@ -484,13 +483,14 @@ void wd_alg_drv_unbind(struct wd_alg_driver *drv); * to the obtained queue resource and the applied driver. * @config: device resources requested by the current algorithm. * @driver: device driver for the current algorithm application. + * @drv_priv: the parameter pointer of the current device driver. * * Return 0 if succeed and other error number if fail. */ int wd_alg_init_driver(struct wd_ctx_config_internal *config, - struct wd_alg_driver *driver); + struct wd_alg_driver *driver, void **drv_priv); void wd_alg_uninit_driver(struct wd_ctx_config_internal *config, - struct wd_alg_driver *driver); + struct wd_alg_driver *driver, void **drv_priv);
/** * wd_dlopen_drv() - Open the dynamic library file of the device driver. diff --git a/wd_aead.c b/wd_aead.c index 061e768..1a1e381 100644 --- a/wd_aead.c +++ b/wd_aead.c @@ -34,6 +34,7 @@ struct wd_aead_setting { struct wd_sched sched; struct wd_alg_driver *driver; struct wd_async_msg_pool pool; + void *priv; void *dlhandle; void *dlh_list; } wd_aead_setting; @@ -465,7 +466,8 @@ static int wd_aead_init_nolock(struct wd_ctx_config *config, struct wd_sched *sc goto out_clear_sched;
ret = wd_alg_init_driver(&wd_aead_setting.config, - wd_aead_setting.driver); + wd_aead_setting.driver, + &wd_aead_setting.priv); if (ret) goto out_clear_pool;
@@ -514,30 +516,21 @@ out_clear_init: return ret; }
-static int wd_aead_uninit_nolock(void) +static void wd_aead_uninit_nolock(void) { - enum wd_status status; - - wd_alg_get_init(&wd_aead_setting.status, &status); - if (status == WD_UNINIT) - return -WD_EINVAL; - wd_uninit_async_request_pool(&wd_aead_setting.pool); wd_clear_sched(&wd_aead_setting.sched); wd_alg_uninit_driver(&wd_aead_setting.config, - wd_aead_setting.driver); - - return 0; + wd_aead_setting.driver, + &wd_aead_setting.priv); }
void wd_aead_uninit(void) { - int ret; - - ret = wd_aead_uninit_nolock(); - if (ret) + if (!wd_aead_setting.priv) return;
+ wd_aead_uninit_nolock(); wd_aead_close_driver(WD_TYPE_V1); wd_alg_clear_init(&wd_aead_setting.status); } @@ -643,12 +636,10 @@ out_uninit:
void wd_aead_uninit2(void) { - int ret; - - ret = wd_aead_uninit_nolock(); - if (ret) + if (!wd_aead_setting.priv) return;
+ wd_aead_uninit_nolock(); wd_alg_attrs_uninit(&wd_aead_init_attrs); wd_alg_drv_unbind(wd_aead_setting.driver); wd_aead_close_driver(WD_TYPE_V2); @@ -735,8 +726,8 @@ static int send_recv_sync(struct wd_ctx_internal *ctx, msg_handle.recv = wd_aead_setting.driver->recv;
pthread_spin_lock(&ctx->lock); - ret = wd_handle_msg_sync(wd_aead_setting.driver, &msg_handle, ctx->ctx, - msg, NULL, wd_aead_setting.config.epoll_en); + ret = wd_handle_msg_sync(&msg_handle, ctx->ctx, msg, NULL, + wd_aead_setting.config.epoll_en); pthread_spin_unlock(&ctx->lock);
return ret; @@ -811,7 +802,7 @@ int wd_do_aead_async(handle_t h_sess, struct wd_aead_req *req) fill_request_msg(msg, req, sess); msg->tag = msg_id;
- ret = wd_alg_driver_send(wd_aead_setting.driver, ctx->ctx, msg); + ret = wd_aead_setting.driver->send(ctx->ctx, msg); if (unlikely(ret < 0)) { if (ret != -WD_EBUSY) WD_ERR("failed to send BD, hw is err!\n"); @@ -860,7 +851,7 @@ int wd_aead_poll_ctx(__u32 idx, __u32 expt, __u32 *count) ctx = config->ctxs + idx;
do { - ret = wd_alg_driver_recv(wd_aead_setting.driver, ctx->ctx, &resp_msg); + ret = wd_aead_setting.driver->recv(ctx->ctx, &resp_msg); if (ret == -WD_EAGAIN) { return ret; } else if (ret < 0) { diff --git a/wd_agg.c b/wd_agg.c index efb25f6..7a4b17c 100644 --- a/wd_agg.c +++ b/wd_agg.c @@ -592,7 +592,8 @@ static int wd_agg_alg_init(struct wd_ctx_config *config, struct wd_sched *sched) if (ret < 0) goto out_clear_sched;
- ret = wd_alg_init_driver(&wd_agg_setting.config, wd_agg_setting.driver); + ret = wd_alg_init_driver(&wd_agg_setting.config, wd_agg_setting.driver, + &wd_agg_setting.priv); if (ret) goto out_clear_pool;
@@ -620,7 +621,8 @@ static int wd_agg_alg_uninit(void) /* Unset config, sched, driver */ wd_clear_sched(&wd_agg_setting.sched);
- wd_alg_uninit_driver(&wd_agg_setting.config, wd_agg_setting.driver); + wd_alg_uninit_driver(&wd_agg_setting.config, wd_agg_setting.driver, + &wd_agg_setting.priv);
return WD_SUCCESS; } @@ -1098,8 +1100,7 @@ static int wd_agg_sync_job(struct wd_agg_sess *sess, struct wd_agg_req *req, msg_handle.recv = wd_agg_setting.driver->recv;
pthread_spin_lock(&ctx->lock); - ret = wd_handle_msg_sync(wd_agg_setting.driver, &msg_handle, ctx->ctx, - msg, NULL, config->epoll_en); + ret = wd_handle_msg_sync(&msg_handle, ctx->ctx, msg, NULL, config->epoll_en); pthread_spin_unlock(&ctx->lock);
return ret; @@ -1200,7 +1201,7 @@ static int wd_agg_async_job(struct wd_agg_sess *sess, struct wd_agg_req *req, bo else fill_request_msg_output(msg, req, sess, false); msg->tag = msg_id; - ret = wd_alg_driver_send(wd_agg_setting.driver, ctx->ctx, msg); + ret = wd_agg_setting.driver->send(ctx->ctx, msg); if (unlikely(ret < 0)) { if (ret != -WD_EBUSY) WD_ERR("wd agg async send err!\n"); @@ -1527,7 +1528,7 @@ static int wd_agg_poll_ctx(__u32 idx, __u32 expt, __u32 *count) ctx = config->ctxs + idx;
do { - ret = wd_alg_driver_recv(wd_agg_setting.driver, ctx->ctx, &resp_msg); + ret = wd_agg_setting.driver->recv(ctx->ctx, &resp_msg); if (ret == -WD_EAGAIN) { return ret; } else if (unlikely(ret < 0)) { diff --git a/wd_cipher.c b/wd_cipher.c index 5999217..239a55c 100644 --- a/wd_cipher.c +++ b/wd_cipher.c @@ -53,6 +53,7 @@ struct wd_cipher_setting { struct wd_sched sched; struct wd_async_msg_pool pool; struct wd_alg_driver *driver; + void *priv; void *dlhandle; void *dlh_list; } wd_cipher_setting; @@ -346,7 +347,8 @@ static int wd_cipher_common_init(struct wd_ctx_config *config, goto out_clear_sched;
ret = wd_alg_init_driver(&wd_cipher_setting.config, - wd_cipher_setting.driver); + wd_cipher_setting.driver, + &wd_cipher_setting.priv); if (ret) goto out_clear_pool;
@@ -363,10 +365,9 @@ out_clear_ctx_config:
static int wd_cipher_common_uninit(void) { - enum wd_status status; + void *priv = wd_cipher_setting.priv;
- wd_alg_get_init(&wd_cipher_setting.status, &status); - if (status == WD_UNINIT) + if (!priv) return -WD_EINVAL;
/* uninit async request pool */ @@ -376,7 +377,8 @@ static int wd_cipher_common_uninit(void) wd_clear_sched(&wd_cipher_setting.sched);
wd_alg_uninit_driver(&wd_cipher_setting.config, - wd_cipher_setting.driver); + wd_cipher_setting.driver, + &wd_cipher_setting.priv);
return 0; } @@ -686,8 +688,8 @@ static int send_recv_sync(struct wd_ctx_internal *ctx, msg_handle.recv = wd_cipher_setting.driver->recv;
wd_ctx_spin_lock(ctx, wd_cipher_setting.driver->calc_type); - ret = wd_handle_msg_sync(wd_cipher_setting.driver, &msg_handle, ctx->ctx, - msg, NULL, wd_cipher_setting.config.epoll_en); + ret = wd_handle_msg_sync(&msg_handle, ctx->ctx, msg, NULL, + wd_cipher_setting.config.epoll_en); wd_ctx_spin_unlock(ctx, wd_cipher_setting.driver->calc_type);
return ret; @@ -762,7 +764,7 @@ int wd_do_cipher_async(handle_t h_sess, struct wd_cipher_req *req) fill_request_msg(msg, req, sess); msg->tag = msg_id;
- ret = wd_alg_driver_send(wd_cipher_setting.driver, ctx->ctx, msg); + ret = wd_cipher_setting.driver->send(ctx->ctx, msg); if (unlikely(ret < 0)) { if (ret != -WD_EBUSY) WD_ERR("wd cipher async send err!\n"); @@ -811,7 +813,7 @@ int wd_cipher_poll_ctx(__u32 idx, __u32 expt, __u32 *count) ctx = config->ctxs + idx;
do { - ret = wd_alg_driver_recv(wd_cipher_setting.driver, ctx->ctx, &resp_msg); + ret = wd_cipher_setting.driver->recv(ctx->ctx, &resp_msg); if (ret == -WD_EAGAIN) return ret; else if (ret < 0) { diff --git a/wd_comp.c b/wd_comp.c index 4768642..4914350 100644 --- a/wd_comp.c +++ b/wd_comp.c @@ -47,6 +47,7 @@ struct wd_comp_setting { struct wd_sched sched; struct wd_async_msg_pool pool; struct wd_alg_driver *driver; + void *priv; void *dlhandle; void *dlh_list; } wd_comp_setting; @@ -166,7 +167,8 @@ static int wd_comp_init_nolock(struct wd_ctx_config *config, struct wd_sched *sc goto out_clear_sched;
ret = wd_alg_init_driver(&wd_comp_setting.config, - wd_comp_setting.driver); + wd_comp_setting.driver, + &wd_comp_setting.priv); if (ret) goto out_clear_pool;
@@ -183,10 +185,9 @@ out_clear_ctx_config:
static int wd_comp_uninit_nolock(void) { - enum wd_status status; + void *priv = wd_comp_setting.priv;
- wd_alg_get_init(&wd_comp_setting.status, &status); - if (status == WD_UNINIT) + if (!priv) return -WD_EINVAL;
/* Uninit async request pool */ @@ -196,7 +197,8 @@ static int wd_comp_uninit_nolock(void) wd_clear_sched(&wd_comp_setting.sched);
wd_alg_uninit_driver(&wd_comp_setting.config, - wd_comp_setting.driver); + wd_comp_setting.driver, + &wd_comp_setting.priv);
return 0; } @@ -377,7 +379,7 @@ int wd_comp_poll_ctx(__u32 idx, __u32 expt, __u32 *count) ctx = config->ctxs + idx;
do { - ret = wd_alg_driver_recv(wd_comp_setting.driver, ctx->ctx, &resp_msg); + ret = wd_comp_setting.driver->recv(ctx->ctx, &resp_msg); if (unlikely(ret < 0)) { if (ret == -WD_HW_EACCESS) WD_ERR("wd comp recv hw error!\n"); @@ -611,8 +613,8 @@ static int wd_comp_sync_job(struct wd_comp_sess *sess, msg_handle.recv = wd_comp_setting.driver->recv;
pthread_spin_lock(&ctx->lock); - ret = wd_handle_msg_sync(wd_comp_setting.driver, &msg_handle, ctx->ctx, - msg, NULL, config->epoll_en); + ret = wd_handle_msg_sync(&msg_handle, ctx->ctx, msg, + NULL, config->epoll_en); pthread_spin_unlock(&ctx->lock);
return ret; @@ -866,7 +868,7 @@ int wd_do_comp_async(handle_t h_sess, struct wd_comp_req *req) msg->tag = tag; msg->stream_mode = WD_COMP_STATELESS;
- ret = wd_alg_driver_send(wd_comp_setting.driver, ctx->ctx, msg); + ret = wd_comp_setting.driver->send(ctx->ctx, msg); if (unlikely(ret < 0)) { WD_ERR("wd comp send error, ret = %d!\n", ret); goto fail_with_msg; diff --git a/wd_dh.c b/wd_dh.c index 043c3be..82bbf7f 100644 --- a/wd_dh.c +++ b/wd_dh.c @@ -34,6 +34,7 @@ static struct wd_dh_setting { struct wd_sched sched; struct wd_async_msg_pool pool; struct wd_alg_driver *driver; + void *priv; void *dlhandle; void *dlh_list; } wd_dh_setting; @@ -140,7 +141,8 @@ static int wd_dh_common_init(struct wd_ctx_config *config, struct wd_sched *sche goto out_clear_sched;
ret = wd_alg_init_driver(&wd_dh_setting.config, - wd_dh_setting.driver); + wd_dh_setting.driver, + &wd_dh_setting.priv); if (ret) goto out_clear_pool;
@@ -157,11 +159,10 @@ out_clear_ctx_config:
static int wd_dh_common_uninit(void) { - enum wd_status status; - - wd_alg_get_init(&wd_dh_setting.status, &status); - if (status == WD_UNINIT) + if (!wd_dh_setting.priv) { + WD_ERR("invalid: repeat uninit dh!\n"); return -WD_EINVAL; + }
/* uninit async request pool */ wd_uninit_async_request_pool(&wd_dh_setting.pool); @@ -169,7 +170,8 @@ static int wd_dh_common_uninit(void) /* unset config, sched, driver */ wd_clear_sched(&wd_dh_setting.sched); wd_alg_uninit_driver(&wd_dh_setting.config, - wd_dh_setting.driver); + wd_dh_setting.driver, + &wd_dh_setting.priv);
return WD_SUCCESS; } @@ -385,8 +387,8 @@ int wd_do_dh_sync(handle_t sess, struct wd_dh_req *req) msg_handle.recv = wd_dh_setting.driver->recv;
pthread_spin_lock(&ctx->lock); - ret = wd_handle_msg_sync(wd_dh_setting.driver, &msg_handle, ctx->ctx, - &msg, &balance, wd_dh_setting.config.epoll_en); + ret = wd_handle_msg_sync(&msg_handle, ctx->ctx, &msg, &balance, + wd_dh_setting.config.epoll_en); pthread_spin_unlock(&ctx->lock); if (unlikely(ret)) return ret; @@ -432,7 +434,7 @@ int wd_do_dh_async(handle_t sess, struct wd_dh_req *req) goto fail_with_msg; msg->tag = mid;
- ret = wd_alg_driver_send(wd_dh_setting.driver, ctx->ctx, msg); + ret = wd_dh_setting.driver->send(ctx->ctx, msg); if (unlikely(ret)) { if (ret != -WD_EBUSY) WD_ERR("failed to send dh BD, hw is err!\n"); @@ -483,7 +485,7 @@ int wd_dh_poll_ctx(__u32 idx, __u32 expt, __u32 *count) ctx = config->ctxs + idx;
do { - ret = wd_alg_driver_recv(wd_dh_setting.driver, ctx->ctx, &rcv_msg); + ret = wd_dh_setting.driver->recv(ctx->ctx, &rcv_msg); if (ret == -WD_EAGAIN) { return ret; } else if (unlikely(ret)) { diff --git a/wd_digest.c b/wd_digest.c index 4709a1c..379974a 100644 --- a/wd_digest.c +++ b/wd_digest.c @@ -42,6 +42,7 @@ struct wd_digest_setting { struct wd_sched sched; struct wd_alg_driver *driver; struct wd_async_msg_pool pool; + void *priv; void *dlhandle; void *dlh_list; } wd_digest_setting; @@ -277,7 +278,8 @@ static int wd_digest_init_nolock(struct wd_ctx_config *config, goto out_clear_sched;
ret = wd_alg_init_driver(&wd_digest_setting.config, - wd_digest_setting.driver); + wd_digest_setting.driver, + &wd_digest_setting.priv); if (ret) goto out_clear_pool;
@@ -326,29 +328,21 @@ out_clear_init: return ret; }
-static int wd_digest_uninit_nolock(void) +static void wd_digest_uninit_nolock(void) { - enum wd_status status; - - wd_alg_get_init(&wd_digest_setting.status, &status); - if (status == WD_UNINIT) - return -WD_EINVAL; - wd_uninit_async_request_pool(&wd_digest_setting.pool); wd_clear_sched(&wd_digest_setting.sched); wd_alg_uninit_driver(&wd_digest_setting.config, - wd_digest_setting.driver); - return 0; + wd_digest_setting.driver, + &wd_digest_setting.priv); }
void wd_digest_uninit(void) { - int ret; - - ret = wd_digest_uninit_nolock(); - if (ret) + if (!wd_digest_setting.priv) return;
+ wd_digest_uninit_nolock(); wd_digest_close_driver(WD_TYPE_V1); wd_alg_clear_init(&wd_digest_setting.status); } @@ -450,12 +444,10 @@ out_uninit:
void wd_digest_uninit2(void) { - int ret; - - ret = wd_digest_uninit_nolock(); - if (ret) + if (!wd_digest_setting.priv) return;
+ wd_digest_uninit_nolock(); wd_alg_attrs_uninit(&wd_digest_init_attrs); wd_alg_drv_unbind(wd_digest_setting.driver); wd_digest_close_driver(WD_TYPE_V2); @@ -618,8 +610,8 @@ static int send_recv_sync(struct wd_ctx_internal *ctx, struct wd_digest_sess *ds msg_handle.recv = wd_digest_setting.driver->recv;
wd_ctx_spin_lock(ctx, wd_digest_setting.driver->calc_type); - ret = wd_handle_msg_sync(wd_digest_setting.driver, &msg_handle, ctx->ctx, - msg, NULL, wd_digest_setting.config.epoll_en); + ret = wd_handle_msg_sync(&msg_handle, ctx->ctx, msg, + NULL, wd_digest_setting.config.epoll_en); wd_ctx_spin_unlock(ctx, wd_digest_setting.driver->calc_type); if (unlikely(ret)) return ret; @@ -713,7 +705,7 @@ int wd_do_digest_async(handle_t h_sess, struct wd_digest_req *req) fill_request_msg(msg, req, dsess); msg->tag = msg_id;
- ret = wd_alg_driver_send(wd_digest_setting.driver, ctx->ctx, msg); + ret = wd_digest_setting.driver->send(ctx->ctx, msg); if (unlikely(ret < 0)) { if (ret != -WD_EBUSY) WD_ERR("failed to send BD, hw is err!\n"); @@ -762,7 +754,8 @@ int wd_digest_poll_ctx(__u32 idx, __u32 expt, __u32 *count) ctx = config->ctxs + idx;
do { - ret = wd_alg_driver_recv(wd_digest_setting.driver, ctx->ctx, &recv_msg); + ret = wd_digest_setting.driver->recv(ctx->ctx, + &recv_msg); if (ret == -WD_EAGAIN) { return ret; } else if (ret < 0) { diff --git a/wd_ecc.c b/wd_ecc.c index 4e6a9c5..80a2679 100644 --- a/wd_ecc.c +++ b/wd_ecc.c @@ -65,6 +65,7 @@ static struct wd_ecc_setting { struct wd_sched sched; struct wd_async_msg_pool pool; struct wd_alg_driver *driver; + void *priv; void *dlhandle; void *dlh_list; } wd_ecc_setting; @@ -203,7 +204,8 @@ static int wd_ecc_common_init(struct wd_ctx_config *config, struct wd_sched *sch goto out_clear_sched;
ret = wd_alg_init_driver(&wd_ecc_setting.config, - wd_ecc_setting.driver); + wd_ecc_setting.driver, + &wd_ecc_setting.priv); if (ret) goto out_clear_pool;
@@ -220,11 +222,10 @@ out_clear_ctx_config:
static int wd_ecc_common_uninit(void) { - enum wd_status status; - - wd_alg_get_init(&wd_ecc_setting.status, &status); - if (status == WD_UNINIT) + if (!wd_ecc_setting.priv) { + WD_ERR("invalid: repeat uninit ecc!\n"); return -WD_EINVAL; + }
/* uninit async request pool */ wd_uninit_async_request_pool(&wd_ecc_setting.pool); @@ -232,7 +233,8 @@ static int wd_ecc_common_uninit(void) /* unset config, sched, driver */ wd_clear_sched(&wd_ecc_setting.sched); wd_alg_uninit_driver(&wd_ecc_setting.config, - wd_ecc_setting.driver); + wd_ecc_setting.driver, + &wd_ecc_setting.priv);
return WD_SUCCESS; } @@ -1577,8 +1579,8 @@ int wd_do_ecc_sync(handle_t h_sess, struct wd_ecc_req *req) msg_handle.recv = wd_ecc_setting.driver->recv;
pthread_spin_lock(&ctx->lock); - ret = wd_handle_msg_sync(wd_ecc_setting.driver, &msg_handle, ctx->ctx, &msg, - &balance, wd_ecc_setting.config.epoll_en); + ret = wd_handle_msg_sync(&msg_handle, ctx->ctx, &msg, &balance, + wd_ecc_setting.config.epoll_en); pthread_spin_unlock(&ctx->lock); if (unlikely(ret)) return ret; @@ -2265,7 +2267,7 @@ int wd_do_ecc_async(handle_t sess, struct wd_ecc_req *req) goto fail_with_msg; msg->tag = mid;
- ret = wd_alg_driver_send(wd_ecc_setting.driver, ctx->ctx, msg); + ret = wd_ecc_setting.driver->send(ctx->ctx, msg); if (unlikely(ret)) { if (ret != -WD_EBUSY) WD_ERR("failed to send ecc BD, hw is err!\n"); @@ -2314,7 +2316,7 @@ int wd_ecc_poll_ctx(__u32 idx, __u32 expt, __u32 *count) ctx = config->ctxs + idx;
do { - ret = wd_alg_driver_recv(wd_ecc_setting.driver, ctx->ctx, &recv_msg); + ret = wd_ecc_setting.driver->recv(ctx->ctx, &recv_msg); if (ret == -WD_EAGAIN) { return ret; } else if (ret < 0) { diff --git a/wd_rsa.c b/wd_rsa.c index f0dfb56..b1458da 100644 --- a/wd_rsa.c +++ b/wd_rsa.c @@ -75,6 +75,7 @@ static struct wd_rsa_setting { struct wd_sched sched; struct wd_async_msg_pool pool; struct wd_alg_driver *driver; + void *priv; void *dlhandle; void *dlh_list; } wd_rsa_setting; @@ -180,7 +181,8 @@ static int wd_rsa_common_init(struct wd_ctx_config *config, struct wd_sched *sch goto out_clear_sched;
ret = wd_alg_init_driver(&wd_rsa_setting.config, - wd_rsa_setting.driver); + wd_rsa_setting.driver, + &wd_rsa_setting.priv); if (ret) goto out_clear_pool;
@@ -197,11 +199,10 @@ out_clear_ctx_config:
static int wd_rsa_common_uninit(void) { - enum wd_status status; - - wd_alg_get_init(&wd_rsa_setting.status, &status); - if (status == WD_UNINIT) + if (!wd_rsa_setting.priv) { + WD_ERR("invalid: repeat uninit rsa!\n"); return -WD_EINVAL; + }
/* uninit async request pool */ wd_uninit_async_request_pool(&wd_rsa_setting.pool); @@ -209,7 +210,8 @@ static int wd_rsa_common_uninit(void) /* unset config, sched, driver */ wd_clear_sched(&wd_rsa_setting.sched); wd_alg_uninit_driver(&wd_rsa_setting.config, - wd_rsa_setting.driver); + wd_rsa_setting.driver, + &wd_rsa_setting.priv);
return WD_SUCCESS; } @@ -446,8 +448,8 @@ int wd_do_rsa_sync(handle_t h_sess, struct wd_rsa_req *req) msg_handle.recv = wd_rsa_setting.driver->recv;
pthread_spin_lock(&ctx->lock); - ret = wd_handle_msg_sync(wd_rsa_setting.driver, &msg_handle, ctx->ctx, &msg, - &balance, wd_rsa_setting.config.epoll_en); + ret = wd_handle_msg_sync(&msg_handle, ctx->ctx, &msg, &balance, + wd_rsa_setting.config.epoll_en); pthread_spin_unlock(&ctx->lock); if (unlikely(ret)) return ret; @@ -493,7 +495,7 @@ int wd_do_rsa_async(handle_t sess, struct wd_rsa_req *req) goto fail_with_msg; msg->tag = mid;
- ret = wd_alg_driver_send(wd_rsa_setting.driver, ctx->ctx, msg); + ret = wd_rsa_setting.driver->send(ctx->ctx, msg); if (unlikely(ret)) { if (ret != -WD_EBUSY) WD_ERR("failed to send rsa BD, hw is err!\n"); @@ -542,7 +544,7 @@ int wd_rsa_poll_ctx(__u32 idx, __u32 expt, __u32 *count) ctx = config->ctxs + idx;
do { - ret = wd_alg_driver_recv(wd_rsa_setting.driver, ctx->ctx, &recv_msg); + ret = wd_rsa_setting.driver->recv(ctx->ctx, &recv_msg); if (ret == -WD_EAGAIN) { return ret; } else if (ret < 0) { diff --git a/wd_util.c b/wd_util.c index b25e79f..a0a4304 100644 --- a/wd_util.c +++ b/wd_util.c @@ -1821,8 +1821,8 @@ int wd_set_epoll_en(const char *var_name, bool *epoll_en) return 0; }
-int wd_handle_msg_sync(struct wd_alg_driver *drv, struct wd_msg_handle *msg_handle, - handle_t ctx, void *msg, __u64 *balance, bool epoll_en) +int wd_handle_msg_sync(struct wd_msg_handle *msg_handle, handle_t ctx, + void *msg, __u64 *balance, bool epoll_en) { __u64 timeout = WD_RECV_MAX_CNT_NOSLEEP; __u64 rx_cnt = 0; @@ -1831,7 +1831,7 @@ int wd_handle_msg_sync(struct wd_alg_driver *drv, struct wd_msg_handle *msg_hand if (balance) timeout = WD_RECV_MAX_CNT_SLEEP;
- ret = msg_handle->send(drv, ctx, msg); + ret = msg_handle->send(ctx, msg); if (unlikely(ret < 0)) { WD_ERR("failed to send msg to hw, ret = %d!\n", ret); return ret; @@ -1844,7 +1844,7 @@ int wd_handle_msg_sync(struct wd_alg_driver *drv, struct wd_msg_handle *msg_hand WD_ERR("wd ctx wait timeout(%d)!\n", ret); }
- ret = msg_handle->recv(drv, ctx, msg); + ret = msg_handle->recv(ctx, msg); if (ret != -WD_EAGAIN) { if (unlikely(ret < 0)) { WD_ERR("failed to recv msg: error = %d!\n", ret); @@ -1953,10 +1953,21 @@ static void wd_alg_uninit_fallback(struct wd_alg_driver *fb_driver) }
int wd_alg_init_driver(struct wd_ctx_config_internal *config, - struct wd_alg_driver *driver) + struct wd_alg_driver *driver, void **drv_priv) { + void *priv; int ret;
+ if (!driver->priv_size) { + WD_ERR("invalid: driver priv ctx size is zero!\n"); + return -WD_EINVAL; + } + + /* Init ctx related resources in specific driver */ + priv = calloc(1, driver->priv_size); + if (!priv) + return -WD_ENOMEM; + if (!driver->init) { driver->fallback = 0; WD_ERR("driver have no init interface.\n"); @@ -1964,7 +1975,7 @@ int wd_alg_init_driver(struct wd_ctx_config_internal *config, goto err_alloc; }
- ret = driver->init(driver, config); + ret = driver->init(config, priv); if (ret < 0) { WD_ERR("driver init failed.\n"); goto err_alloc; @@ -1977,23 +1988,31 @@ int wd_alg_init_driver(struct wd_ctx_config_internal *config, WD_ERR("soft alg driver init failed.\n"); } } + *drv_priv = priv;
return 0;
err_alloc: + free(priv); return ret; }
void wd_alg_uninit_driver(struct wd_ctx_config_internal *config, - struct wd_alg_driver *driver) + struct wd_alg_driver *driver, void **drv_priv) { + void *priv = *drv_priv;
- driver->exit(driver); + driver->exit(priv); /* Ctx config just need clear once */ wd_clear_ctx_config(config);
if (driver->fallback) wd_alg_uninit_fallback((struct wd_alg_driver *)driver->fallback); + + if (priv) { + free(priv); + *drv_priv = NULL; + } }
void wd_dlclose_drv(void *dlh_list)
On Tue, 31 Dec 2024 at 17:35, Longfang Liu liulongfang@huawei.com wrote:
This reverts commit 3fc344aa4f7c460269cd0d870fe388f01dfa22a2.
ret = wd_alg_init_driver(&wd_rsa_setting.config,
wd_rsa_setting.driver);
wd_rsa_setting.driver,
&wd_rsa_setting.priv); if (ret) goto out_clear_pool;
@@ -197,11 +199,10 @@ out_clear_ctx_config:
static int wd_rsa_common_uninit(void) {
enum wd_status status;
wd_alg_get_init(&wd_rsa_setting.status, &status);
if (status == WD_UNINIT)
if (!wd_rsa_setting.priv) {
WD_ERR("invalid: repeat uninit rsa!\n"); return -WD_EINVAL;
} /* uninit async request pool */ wd_uninit_async_request_pool(&wd_rsa_setting.pool);
@@ -209,7 +210,8 @@ static int wd_rsa_common_uninit(void) /* unset config, sched, driver */ wd_clear_sched(&wd_rsa_setting.sched); wd_alg_uninit_driver(&wd_rsa_setting.config,
wd_rsa_setting.driver);
wd_rsa_setting.driver,
&wd_rsa_setting.priv);
int wd_alg_init_driver(struct wd_ctx_config_internal *config,
struct wd_alg_driver *driver)
struct wd_alg_driver *driver, void **drv_priv)
{
void *priv; int ret;
if (!driver->priv_size) {
WD_ERR("invalid: driver priv ctx size is zero!\n");
return -WD_EINVAL;
}
/* Init ctx related resources in specific driver */
priv = calloc(1, driver->priv_size);
if (!priv)
return -WD_ENOMEM;
if (!driver->init) { driver->fallback = 0; WD_ERR("driver have no init interface.\n");
@@ -1964,7 +1975,7 @@ int wd_alg_init_driver(struct wd_ctx_config_internal *config, goto err_alloc; }
ret = driver->init(driver, config);
ret = driver->init(config, priv); if (ret < 0) { WD_ERR("driver init failed.\n"); goto err_alloc;
@@ -1977,23 +1988,31 @@ int wd_alg_init_driver(struct wd_ctx_config_internal *config, WD_ERR("soft alg driver init failed.\n"); } }
*drv_priv = priv;
这个不太理解哎, 申请驱动A的priv_size 保存到setting->priv, 那驱动B 的priv 申请的空间不是保存不了么。
尽管咱们这个驱动不存在unload 的情况,一直都在,没有生命周期的问题。 但每个驱动保存自己的资源不是更好么?
Unify the software ctx and hardware ctx in uadk and merge them on the scheduler. Realize the function of software and hardware calculation together
Signed-off-by: Longfang Liu liulongfang@huawei.com --- include/wd_alg.h | 23 +- include/wd_alg_common.h | 58 +++- include/wd_sched.h | 1 + include/wd_util.h | 18 +- wd_alg.c | 63 ++-- wd_util.c | 725 +++++++++++++++++++++++++--------------- 6 files changed, 573 insertions(+), 315 deletions(-)
diff --git a/include/wd_alg.h b/include/wd_alg.h index 4c9f422..afdab7e 100644 --- a/include/wd_alg.h +++ b/include/wd_alg.h @@ -62,14 +62,23 @@ extern "C" { # define HWCAP2_RNG (1 << 16) #endif
-enum alg_dev_type { - UADK_ALG_SOFT = 0x0, +enum alg_priority { + UADK_ALG_HW = 0x0, UADK_ALG_CE_INSTR = 0x1, UADK_ALG_SVE_INSTR = 0x2, - UADK_ALG_HW = 0x3 + UADK_ALG_SOFT = 0x3 };
-/* +enum alg_drv_type { + ALG_DRV_HW = 0x0, + ALG_DRV_CE_INS, + ALG_DRV_SVE_INS, + ALG_DRV_SOFT, + ALG_DRV_INS, + ALG_DRV_FB, +}; + +/** * @drv_name: name of the current device driver * @alg_name: name of the algorithm supported by the driver * @priority: priority of the type of algorithm supported by the driver @@ -107,6 +116,7 @@ struct wd_alg_driver { int op_type_num; int priv_size; handle_t fallback; + int init_state;
int (*init)(void *conf, void *priv); void (*exit)(void *priv); @@ -155,7 +165,7 @@ struct wd_alg_list { * * Returns the applied algorithm driver, non means error. */ -struct wd_alg_driver *wd_request_drv(const char *alg_name, bool hw_mask); +struct wd_alg_driver *wd_request_drv(const char *alg_name, int drv_type); void wd_release_drv(struct wd_alg_driver *drv);
/* @@ -165,8 +175,7 @@ void wd_release_drv(struct wd_alg_driver *drv); * * Return check result. */ -bool wd_drv_alg_support(const char *alg_name, - struct wd_alg_driver *drv); +bool wd_drv_alg_support(const char *alg_name, void *param);
/* * wd_enable_drv() - Re-enable use of the current device driver. diff --git a/include/wd_alg_common.h b/include/wd_alg_common.h index fd77426..079dfb9 100644 --- a/include/wd_alg_common.h +++ b/include/wd_alg_common.h @@ -41,6 +41,14 @@ extern "C" {
/* Key size of digest */ #define MAX_HMAC_KEY_SIZE 128U +#define MAX_SOFT_QUEUE_LENGTH 1024U + +/* + * The maximum number of queue types + * required for similar algorithms + */ +#define MAX_CTX_OP_TYPE 4U +#define STATUS_ENABLE (void *)0x1
enum alg_task_type { TASK_MIX = 0x0, @@ -60,7 +68,27 @@ enum wd_init_type { WD_TYPE_V2, };
-/* +struct wd_soft_sqe { + __u8 used; + __u8 result; + __u8 complete; + __u32 id; +}; + +/** + * default queue length set to 1024 + */ +struct wd_soft_ctx { + pthread_spinlock_t slock; + __u32 head; + struct wd_soft_sqe qfifo[MAX_SOFT_QUEUE_LENGTH]; + pthread_spinlock_t rlock; + __u32 tail; + __u32 run_num; + void *priv; +}; + +/** * struct wd_ctx - Define one ctx and related type. * @ctx: The ctx itself. * @op_type: Define the operation type of this specific ctx. @@ -72,6 +100,7 @@ struct wd_ctx { handle_t ctx; __u8 op_type; __u8 ctx_mode; + __u8 ctx_type; };
/* @@ -103,16 +132,31 @@ struct wd_ctx_config { struct wd_cap_config *cap; };
-/* +/* 0x0 mean calloc init value */ +enum wd_ctx_property { + UADK_CTX_HW = 0x0, + UADK_CTX_CE_INS = 0x1, + UADK_CTX_SVE_INS = 0x2, + UADK_CTX_SOFT = 0x3, + UADK_CTX_MAX +}; + +/** * struct wd_ctx_nums - Define the ctx sets numbers. * @sync_ctx_num: The ctx numbers which are used for sync mode for each * ctx sets. * @async_ctx_num: The ctx numbers which are used for async mode for each * ctx sets. + * @ctx_prop: Indicates the properties of the current queue + * @ctx_begin: The encoding starting position of the current device ctx + * @other_ctx: Other types of queues configured */ struct wd_ctx_nums { __u32 sync_ctx_num; __u32 async_ctx_num; + __u8 ctx_prop; + __u16 ctx_begin; + struct wd_ctx_nums *other_ctx; };
/* @@ -132,16 +176,16 @@ struct wd_ctx_params { struct wd_cap_config *cap; };
-struct wd_soft_ctx { - void *priv; -}; - struct wd_ctx_internal { - handle_t ctx; __u8 op_type; __u8 ctx_mode; + __u8 ctx_type; + __u8 ctx_used; + handle_t ctx; // if ctx is first will cause problem __u16 sqn; pthread_spinlock_t lock; + struct wd_alg_driver *drv; + void *drv_priv; };
struct wd_ctx_config_internal { diff --git a/include/wd_sched.h b/include/wd_sched.h index be541c6..5a69cb9 100644 --- a/include/wd_sched.h +++ b/include/wd_sched.h @@ -32,6 +32,7 @@ struct sched_params { __u8 mode; __u32 begin; __u32 end; + int ctx_prop; };
typedef int (*user_poll_func)(__u32 pos, __u32 expect, __u32 *count); diff --git a/include/wd_util.h b/include/wd_util.h index e870776..bee7f29 100644 --- a/include/wd_util.h +++ b/include/wd_util.h @@ -124,13 +124,14 @@ struct wd_msg_handle {
struct wd_init_attrs { __u32 sched_type; - const char *alg; - struct wd_alg_driver *driver; + __u32 task_type; + char *alg; struct wd_sched *sched; struct wd_ctx_params *ctx_params; struct wd_ctx_config *ctx_config; wd_alg_init alg_init; wd_alg_poll_ctx alg_poll_ctx; + struct wd_alg_driver *driver; //stub for old code };
/* @@ -171,6 +172,9 @@ void wd_clear_ctx_config(struct wd_ctx_config_internal *in); */ void wd_memset_zero(void *data, __u32 size);
+int wd_ctx_drv_config(char *alg_name, struct wd_ctx_config_internal *ctx_config); +void wd_ctx_drv_deconfig(struct wd_ctx_config_internal *ctx_config); + /* * wd_init_async_request_pool() - Init async message pools. * @pool: Pointer of message pool. @@ -450,6 +454,10 @@ static inline void wd_alg_clear_init(enum wd_status *status) * * Return 0 if succeed and other error number if fail. */ +int wd_ctx_param_init_nw(struct wd_ctx_params *ctx_params, + struct wd_ctx_params *user_ctx_params, + char *alg, int task_type, enum wd_type type, + int max_op_type); int wd_ctx_param_init(struct wd_ctx_params *ctx_params, struct wd_ctx_params *user_ctx_params, struct wd_alg_driver *driver, @@ -470,12 +478,12 @@ void wd_alg_attrs_uninit(struct wd_init_attrs *attrs); /** * wd_alg_drv_bind() - Request the ctxs and initialize the sched_domain * with the given devices list, ctxs number and numa mask. - * @task_type: the type of task specified by the current algorithm. + * @ctx_type: the type of ctx specified by the current algorithm. * @alg_name: the name of the algorithm specified by the task. * * Return device driver if succeed and other NULL if fail. */ -struct wd_alg_driver *wd_alg_drv_bind(int task_type, const char *alg_name); +struct wd_alg_driver *wd_alg_drv_bind(__u8 ctx_prop, char *alg_name); void wd_alg_drv_unbind(struct wd_alg_driver *drv);
/** @@ -487,6 +495,8 @@ void wd_alg_drv_unbind(struct wd_alg_driver *drv); * * Return 0 if succeed and other error number if fail. */ +int wd_alg_init_driver_nw(struct wd_ctx_config_internal *config); +void wd_alg_uninit_driver_nw(struct wd_ctx_config_internal *config); int wd_alg_init_driver(struct wd_ctx_config_internal *config, struct wd_alg_driver *driver, void **drv_priv); void wd_alg_uninit_driver(struct wd_ctx_config_internal *config, diff --git a/wd_alg.c b/wd_alg.c index a33f5d6..bedbfc6 100644 --- a/wd_alg.c +++ b/wd_alg.c @@ -12,7 +12,7 @@ #include <sys/auxv.h>
#include "wd.h" -#include "wd_alg.h" +#include "wd_alg_common.h"
#define SYS_CLASS_DIR "/sys/class/uacce" #define SVA_FILE_NAME "flags" @@ -271,21 +271,26 @@ struct wd_alg_list *wd_get_alg_head(void) return &alg_list_head; }
-bool wd_drv_alg_support(const char *alg_name, - struct wd_alg_driver *drv) +bool wd_drv_alg_support(const char *alg_name, void *param) { + struct wd_ctx_config_internal *config = param; struct wd_alg_list *head = &alg_list_head; struct wd_alg_list *pnext = head->next; + struct wd_alg_driver *drv; + __u32 i;
- if (!alg_name || !drv) + if (!alg_name || !config) return false;
- while (pnext) { - if (!strcmp(alg_name, pnext->alg_name) && - !strcmp(drv->drv_name, pnext->drv_name)) { - return true; + for (i = 0; i < config->ctx_num; i++) { + drv = config->ctxs[i].drv; + while (pnext) { + if (!strcmp(alg_name, pnext->alg_name) && + !strcmp(drv->drv_name, pnext->drv_name)) { + return true; + } + pnext = pnext->next; } - pnext = pnext->next; }
return false; @@ -331,7 +336,7 @@ void wd_disable_drv(struct wd_alg_driver *drv) pthread_mutex_unlock(&mutex); }
-struct wd_alg_driver *wd_request_drv(const char *alg_name, bool hw_mask) +struct wd_alg_driver *wd_request_drv(const char *alg_name, int drv_type) { struct wd_alg_list *head = &alg_list_head; struct wd_alg_list *pnext = head->next; @@ -352,18 +357,32 @@ struct wd_alg_driver *wd_request_drv(const char *alg_name, bool hw_mask) /* Check the list to get an best driver */ pthread_mutex_lock(&mutex); while (pnext) { - /* hw_mask true mean not to used hardware dev */ - if ((hw_mask && pnext->drv->calc_type == UADK_ALG_HW) || - (!hw_mask && pnext->drv->calc_type != UADK_ALG_HW)) { - pnext = pnext->next; - continue; - } - - if (!strcmp(alg_name, pnext->alg_name) && pnext->available && - pnext->drv->priority > tmp_priority) { - tmp_priority = pnext->drv->priority; - select_node = pnext; - drv = pnext->drv; + if (!strcmp(alg_name, pnext->alg_name) && pnext->available) { + /* HW driver mean to used hardware dev */ + if (drv_type == ALG_DRV_HW && pnext->drv->calc_type == UADK_ALG_HW) + select_node = pnext; + /* CE driver mean to used CE dev */ + else if (drv_type == ALG_DRV_CE_INS && pnext->drv->calc_type == UADK_ALG_CE_INSTR) + select_node = pnext; + /* SVE driver mean to used SVE dev */ + else if (drv_type == ALG_DRV_SVE_INS && pnext->drv->calc_type == UADK_ALG_SVE_INSTR) + select_node = pnext; + /* INS driver mean to used CE and SVE dev */ + else if (drv_type == ALG_DRV_INS && (pnext->drv->calc_type == UADK_ALG_CE_INSTR || + pnext->drv->calc_type == UADK_ALG_SVE_INSTR)) + select_node = pnext; + /* Soft driver mean to used Soft, CE and SVE dev */ + else if (drv_type == ALG_DRV_SOFT && pnext->drv->calc_type != UADK_ALG_HW) + select_node = pnext; + /* Fallback driver mean to used Soft or CE dev */ + else if (drv_type == ALG_DRV_FB && (pnext->drv->calc_type == UADK_ALG_SOFT || + pnext->drv->calc_type == UADK_ALG_CE_INSTR)) + select_node = pnext; + + if (select_node && select_node->drv->priority > tmp_priority) { + drv = select_node->drv; + tmp_priority = select_node->drv->priority; + } } pnext = pnext->next; } diff --git a/wd_util.c b/wd_util.c index a0a4304..848ff15 100644 --- a/wd_util.c +++ b/wd_util.c @@ -168,6 +168,7 @@ static void clone_ctx_to_internal(struct wd_ctx *ctx, ctx_in->ctx = ctx->ctx; ctx_in->op_type = ctx->op_type; ctx_in->ctx_mode = ctx->ctx_mode; + ctx_in->ctx_type = ctx->ctx_type; }
static int wd_shm_create(struct wd_ctx_config_internal *in) @@ -236,9 +237,8 @@ int wd_init_ctx_config(struct wd_ctx_config_internal *in,
for (i = 0; i < cfg->ctx_num; i++) { if (!cfg->ctxs[i].ctx) { - WD_ERR("invalid: ctx is NULL!\n"); - ret = -WD_EINVAL; - goto err_out; + WD_ERR("invalid: ctx<%u> is NULL!\n", i); + break; } clone_ctx_to_internal(cfg->ctxs + i, ctxs + i); ret = pthread_spin_init(&ctxs[i].lock, PTHREAD_PROCESS_SHARED); @@ -1790,7 +1790,7 @@ int wd_check_ctx(struct wd_ctx_config_internal *config, __u8 mode, __u32 idx) }
ctx = config->ctxs + idx; - if (ctx->ctx_mode != mode) { + if (ctx->ctx_type == UADK_CTX_HW && ctx->ctx_mode != mode) { WD_ERR("invalid: ctx(%u) mode is %hhu!\n", idx, ctx->ctx_mode); return -WD_EINVAL; } @@ -1889,6 +1889,38 @@ int wd_init_param_check(struct wd_ctx_config *config, struct wd_sched *sched) return 0; }
+int wd_alg_try_init(enum wd_status *status) +{ + enum wd_status expected; + __u32 count = 0; + bool ret; + + /* + * Here is aimed to protect the security of the initialization interface + * in the multi-thread scenario. Only one thread can get the WD_INITING + * status to initialize algorithm. Other thread will wait for the result. + * And the algorithm initialization interfaces is a liner process. + * So the initing thread will return a result to notify other thread go on. + */ + do { + expected = WD_UNINIT; + ret = __atomic_compare_exchange_n(status, &expected, WD_INITING, true, + __ATOMIC_RELAXED, __ATOMIC_RELAXED); + if (expected == WD_INIT) { + WD_ERR("The algorithm has been initialized!\n"); + return -WD_EEXIST; + } + usleep(WD_INIT_SLEEP_UTIME); + + if (US2S(WD_INIT_SLEEP_UTIME * ++count) >= WD_INIT_RETRY_TIMEOUT) { + WD_ERR("The algorithm initialize wait timeout!\n"); + return -WD_ETIMEDOUT; + } + } while (!ret); + + return 0; +} + static void wd_get_alg_type(const char *alg_name, char *alg_type) { int i; @@ -1937,6 +1969,7 @@ static int wd_alg_init_fallback(struct wd_alg_driver *fb_driver) return -WD_EINVAL; }
+ WD_ERR("debug: call function: %s!\n", __func__); fb_driver->init(NULL, NULL);
return 0; @@ -1952,12 +1985,20 @@ static void wd_alg_uninit_fallback(struct wd_alg_driver *fb_driver) fb_driver->exit(NULL); }
-int wd_alg_init_driver(struct wd_ctx_config_internal *config, - struct wd_alg_driver *driver, void **drv_priv) +static int wd_ctx_init_driver(struct wd_ctx_config_internal *config, + struct wd_ctx_internal *ctx_config) { - void *priv; + struct wd_alg_driver *driver = ctx_config->drv; + void *priv = ctx_config->drv_priv; int ret;
+ if (!driver) + return 0; + + /* Prevent repeated initialization */ + if (driver->init_state) + return 0; + if (!driver->priv_size) { WD_ERR("invalid: driver priv ctx size is zero!\n"); return -WD_EINVAL; @@ -1980,6 +2021,7 @@ int wd_alg_init_driver(struct wd_ctx_config_internal *config, WD_ERR("driver init failed.\n"); goto err_alloc; } + driver->init_state = 1;
if (driver->fallback) { ret = wd_alg_init_fallback((struct wd_alg_driver *)driver->fallback); @@ -1988,21 +2030,30 @@ int wd_alg_init_driver(struct wd_ctx_config_internal *config, WD_ERR("soft alg driver init failed.\n"); } } - *drv_priv = priv;
return 0;
err_alloc: free(priv); + ctx_config->drv_priv = NULL; return ret; }
-void wd_alg_uninit_driver(struct wd_ctx_config_internal *config, - struct wd_alg_driver *driver, void **drv_priv) +static void wd_ctx_uninit_driver(struct wd_ctx_config_internal *config, + struct wd_ctx_internal *ctx_config) { - void *priv = *drv_priv; + struct wd_alg_driver *driver = ctx_config->drv; + void *priv = ctx_config->drv_priv; + + if (!driver) + return; + + /* Prevent repeated uninitialization */ + if (!driver->init_state) + return;
driver->exit(priv); + driver->init_state = 0; /* Ctx config just need clear once */ wd_clear_ctx_config(config);
@@ -2011,8 +2062,51 @@ void wd_alg_uninit_driver(struct wd_ctx_config_internal *config,
if (priv) { free(priv); - *drv_priv = NULL; + ctx_config->drv_priv = NULL; + } +} + +int wd_alg_init_driver(struct wd_ctx_config_internal *config, + struct wd_alg_driver *driver, void **drv_priv) +{ + return 0; +} + +int wd_alg_init_driver_nw(struct wd_ctx_config_internal *config) +{ + __u32 i, j; + int ret; + + WD_ERR("debug: call function: %s!\n", __func__); + for (i = 0; i < config->ctx_num; i++) { + if (!config->ctxs[i].ctx) + continue; + ret = wd_ctx_init_driver(config, &config->ctxs[i]); + if (ret) + goto init_err; } + + return 0; + +init_err: + for (j = 0; j < i; j++) + wd_ctx_uninit_driver(config, &config->ctxs[j]); + + return ret; +} + +void wd_alg_uninit_driver(struct wd_ctx_config_internal *config, + struct wd_alg_driver *driver, void **drv_priv) +{ +} + +void wd_alg_uninit_driver_nw(struct wd_ctx_config_internal *config) +{ + __u32 i; + + for (i = 0; i < config->ctx_num; i++) + wd_ctx_uninit_driver(config, &config->ctxs[i]); + }
void wd_dlclose_drv(void *dlh_list) @@ -2140,6 +2234,14 @@ int wd_ctx_param_init(struct wd_ctx_params *ctx_params, struct wd_ctx_params *user_ctx_params, struct wd_alg_driver *driver, enum wd_type type, int max_op_type) +{ + return 0; +} + +int wd_ctx_param_init_nw(struct wd_ctx_params *ctx_params, + struct wd_ctx_params *user_ctx_params, + char *alg, int task_type, enum wd_type type, + int max_op_type) { const char *env_name = wd_env_name[type]; const char *var_s; @@ -2153,9 +2255,9 @@ int wd_ctx_param_init(struct wd_ctx_params *ctx_params,
/* Only hw driver support environment variable */ var_s = secure_getenv(env_name); - if (var_s && strlen(var_s) && driver->calc_type == UADK_ALG_HW) { + if (var_s && strlen(var_s) && task_type <= TASK_HW) { /* environment variable has the highest priority */ - ret = wd_env_set_ctx_nums(driver->alg_name, env_name, var_s, + ret = wd_env_set_ctx_nums(alg, env_name, var_s, ctx_params, max_op_type); if (ret) { WD_ERR("fail to init ctx nums from %s!\n", env_name); @@ -2166,33 +2268,28 @@ int wd_ctx_param_init(struct wd_ctx_params *ctx_params, /* environment variable is not set, try to use user_ctx_params first */ if (user_ctx_params) { copy_bitmask_to_bitmask(user_ctx_params->bmp, ctx_params->bmp); - ctx_params->cap = user_ctx_params->cap; - ctx_params->ctx_set_num = user_ctx_params->ctx_set_num; - ctx_params->op_type_num = user_ctx_params->op_type_num; - if (ctx_params->op_type_num > (__u32)max_op_type) { + if (user_ctx_params->op_type_num > (__u32)max_op_type) { WD_ERR("fail to check user op type numbers.\n"); numa_free_nodemask(ctx_params->bmp); return -WD_EINVAL; } + ctx_params->cap = user_ctx_params->cap; + ctx_params->ctx_set_num = user_ctx_params->ctx_set_num; + ctx_params->op_type_num = user_ctx_params->op_type_num;
return 0; } - - /* user_ctx_params is also not set, use driver's defalut queue_num */ - numa_bitmask_setall(ctx_params->bmp); - for (i = 0; i < driver->op_type_num; i++) { - ctx_params->ctx_set_num[i].sync_ctx_num = driver->queue_num; - ctx_params->ctx_set_num[i].async_ctx_num = driver->queue_num; - } }
- ctx_params->op_type_num = driver->op_type_num; - if (ctx_params->op_type_num > (__u32)max_op_type) { - WD_ERR("fail to check driver op type numbers.\n"); - numa_free_nodemask(ctx_params->bmp); - return -WD_EAGAIN; + /* user_ctx_params is also not set, use defalut queue_num max_op_type */ + numa_bitmask_setall(ctx_params->bmp); + for (i = 0; i < max_op_type; i++) { + ctx_params->ctx_set_num[i].sync_ctx_num = max_op_type; + ctx_params->ctx_set_num[i].async_ctx_num = max_op_type; }
+ ctx_params->op_type_num = max_op_type; + return 0; }
@@ -2291,6 +2388,7 @@ void *wd_dlopen_drv(const char *cust_lib_dir) node->dlhandle = dlopen(lib_path, RTLD_NODELETE | RTLD_NOW); if (!node->dlhandle) { free(node); + WD_ERR("UADK driver so open failed, error number: %s\n", dlerror()); /* there are many other files need to skip */ continue; } @@ -2319,48 +2417,57 @@ free_list: return NULL; }
-struct wd_alg_driver *wd_alg_drv_bind(int task_type, const char *alg_name) +struct wd_alg_driver *wd_alg_drv_bind(__u8 ctx_prop, char *alg_name) { - struct wd_alg_driver *set_driver = NULL; + struct wd_alg_driver *fb_drv; struct wd_alg_driver *drv;
- /* Get alg driver and dev name */ - switch (task_type) { - case TASK_INSTR: - drv = wd_request_drv(alg_name, true); + /* Get alg driver from ctx type and alg name */ + switch (ctx_prop) { + case UADK_CTX_HW: + drv = wd_request_drv(alg_name, ALG_DRV_HW); if (!drv) { - WD_ERR("no soft %s driver support\n", alg_name); + WD_ERR("no HW %s driver support\n", alg_name); return NULL; } - set_driver = drv; - set_driver->fallback = 0; + + fb_drv = wd_request_drv(alg_name, ALG_DRV_SOFT); + if (!fb_drv) + drv->fallback = 0; + else + drv->fallback = (handle_t)fb_drv; + break; - case TASK_HW: - case TASK_MIX: - drv = wd_request_drv(alg_name, false); + case UADK_CTX_CE_INS: + drv = wd_request_drv(alg_name, ALG_DRV_CE_INS); if (!drv) { - WD_ERR("no HW %s driver support\n", alg_name); + WD_ERR("no CE instr soft %s driver support\n", alg_name); return NULL; } - set_driver = drv; - set_driver->fallback = 0; - if (task_type == TASK_MIX) { - drv = wd_request_drv(alg_name, true); - if (!drv) { - set_driver->fallback = 0; - WD_ERR("no soft %s driver support\n", alg_name); - } else { - set_driver->fallback = (handle_t)drv; - WD_ERR("successful to get soft driver\n"); - } + drv->fallback = 0; + break; + case UADK_CTX_SVE_INS: + drv = wd_request_drv(alg_name, ALG_DRV_SVE_INS); + if (!drv) { + WD_ERR("no SVE instr soft %s driver support\n", alg_name); + return NULL; + } + drv->fallback = 0; + break; + case UADK_CTX_SOFT: + drv = wd_request_drv(alg_name, ALG_DRV_SOFT); + if (!drv) { + WD_ERR("no instr soft %s driver support\n", alg_name); + return NULL; } + drv->fallback = 0; break; default: - WD_ERR("task type error.\n"); - return NULL; + WD_ERR("ctx type error: %d.\n", ctx_prop); + return WD_ERR_PTR(-WD_ENODEV); }
- return set_driver; + return drv; }
void wd_alg_drv_unbind(struct wd_alg_driver *drv) @@ -2376,45 +2483,100 @@ void wd_alg_drv_unbind(struct wd_alg_driver *drv) wd_release_drv(drv); }
-int wd_alg_try_init(enum wd_status *status) +static __u32 wd_ctxs_idx_init(struct wd_init_attrs *attrs, int numa_cnt, int task_type) { - enum wd_status expected; + struct wd_ctx_params *ctx_params = attrs->ctx_params; + int end = ctx_params->op_type_num; + struct wd_ctx_nums *ptr_ctx; __u32 count = 0; - bool ret; + int i, uidx;
- do { - expected = WD_UNINIT; - ret = __atomic_compare_exchange_n(status, &expected, WD_INITING, true, - __ATOMIC_RELAXED, __ATOMIC_RELAXED); - if (expected == WD_INIT) { - WD_ERR("The algorithm has been initialized!\n"); - return -WD_EEXIST; - } - usleep(WD_INIT_SLEEP_UTIME); + if (ctx_params->op_type_num > MAX_CTX_OP_TYPE) { + WD_ERR("invalid: max ctx op type<%u> is wrong!\n", ctx_params->op_type_num); + return 0; + }
- if (US2S(WD_INIT_SLEEP_UTIME * ++count) >= WD_INIT_RETRY_TIMEOUT) { - WD_ERR("The algorithm initialize wait timeout!\n"); - return -WD_ETIMEDOUT; + for (uidx = 0; uidx < UADK_CTX_MAX; uidx++) { + /* If it is a soft computing task, do not use HW queue */ + if (task_type == TASK_INSTR && uidx == UADK_CTX_HW) + continue; + for (i = 0; i < end; i++) { + ptr_ctx = &ctx_params->ctx_set_num[i]; + while (ptr_ctx) { + if (ptr_ctx->ctx_prop == uidx) { + ptr_ctx->ctx_begin = count; + if (uidx == UADK_CTX_HW) { + count += ptr_ctx->sync_ctx_num * numa_cnt; + count += ptr_ctx->async_ctx_num * numa_cnt; + } else { + count += ptr_ctx->sync_ctx_num; + count += ptr_ctx->async_ctx_num; + } + WD_ERR("optype<%u>, prop<%u>, begin<%u>, synx<%u>, async<%u>\n", + i, ptr_ctx->ctx_prop, ptr_ctx->ctx_begin, ptr_ctx->sync_ctx_num, ptr_ctx->async_ctx_num); + } + ptr_ctx = ptr_ctx->other_ctx; + } } - } while (!ret); + }
- return 0; + return count; }
-static __u32 wd_get_ctx_numbers(struct wd_ctx_params ctx_params, int end) +static struct wd_ctx_nums *wd_get_ctx_ptr(struct wd_ctx_params *ctx_params, + __u32 op_type, int ctx_prop) { - __u32 count = 0; - int i; + struct wd_ctx_nums *ptr_ctx;
- for (i = 0; i < end; i++) { - count += ctx_params.ctx_set_num[i].sync_ctx_num; - count += ctx_params.ctx_set_num[i].async_ctx_num; + if (op_type > ctx_params->op_type_num) + return NULL; + + ptr_ctx = &ctx_params->ctx_set_num[op_type]; + while (ptr_ctx) { + if (ptr_ctx->ctx_prop == ctx_prop) + return ptr_ctx; + ptr_ctx = ptr_ctx->other_ctx; }
- return count; + return NULL; +} + +void wd_ctx_drv_deconfig(struct wd_ctx_config_internal *ctx_config) +{ + __u32 i; + + // wd_dlclose_drv after this + for (i = 0; i < ctx_config->ctx_num; i++) + wd_alg_drv_unbind(ctx_config->ctxs[i].drv); + +} + +int wd_ctx_drv_config(char *alg_name, struct wd_ctx_config_internal *ctx_config) +{ + __u32 i, j; + + // wd_dlopen_drv before this + WD_ERR("debug: call function: %s!\n", __func__); + for (i = 0; i < ctx_config->ctx_num; i++) { + ctx_config->ctxs[i].drv = wd_alg_drv_bind(ctx_config->ctxs[i].ctx_type, alg_name); + if (WD_IS_ERR(ctx_config->ctxs[i].drv)) { + continue; + } else if (!ctx_config->ctxs[i].drv) { + WD_ERR("failed to bind %s driver.\n", alg_name); + goto bind_err; + } + } + + return 0; + +bind_err: + for (j = 0; j < i; j++) { + wd_alg_drv_unbind(ctx_config->ctxs[j].drv); + } + return -WD_EINVAL; }
-static struct uacce_dev_list *wd_get_usable_list(struct uacce_dev_list *list, struct bitmask *bmp) +struct uacce_dev_list *wd_get_usable_list(struct uacce_dev_list *list, struct bitmask *bmp) { struct uacce_dev_list *p, *node, *result = NULL; struct uacce_dev *dev; @@ -2464,7 +2626,7 @@ out_free_list: return result; }
-static int wd_init_ctx_set(struct wd_init_attrs *attrs, struct uacce_dev_list *list, +static int wd_init_hw_ctx_set(struct wd_init_attrs *attrs, struct uacce_dev_list *list, __u32 idx, int numa_id, __u32 op_type) { struct wd_ctx_nums ctx_nums = attrs->ctx_params->ctx_set_num[op_type]; @@ -2508,6 +2670,7 @@ static int wd_init_ctx_set(struct wd_init_attrs *attrs, struct uacce_dev_list *l ctx_config->ctxs[i].ctx_mode = ((i - idx) < ctx_nums.sync_ctx_num) ? CTX_MODE_SYNC : CTX_MODE_ASYNC; + ctx_config->ctxs[i].ctx_type = UADK_CTX_HW; }
return 0; @@ -2535,6 +2698,7 @@ static int wd_instance_sched_set(struct wd_sched *sched, struct wd_ctx_nums ctx_ sparams.type = op_type; sparams.mode = i; sparams.begin = idx + ctx_nums.sync_ctx_num * i; + sparams.ctx_prop = UADK_CTX_HW; end = idx - 1 + ctx_nums.sync_ctx_num + ctx_nums.async_ctx_num * i; if (end < 0 || sparams.begin > (__u32)end) continue; @@ -2549,7 +2713,7 @@ out: return ret; }
-static int wd_init_ctx_and_sched(struct wd_init_attrs *attrs, struct bitmask *bmp, +static int wd_hw_ctx_and_sched(struct wd_init_attrs *attrs, struct bitmask *bmp, struct uacce_dev_list *list) { struct wd_ctx_params *ctx_params = attrs->ctx_params; @@ -2563,7 +2727,7 @@ static int wd_init_ctx_and_sched(struct wd_init_attrs *attrs, struct bitmask *bm continue; for (j = 0; j < op_type_num; j++) { ctx_nums = ctx_params->ctx_set_num[j]; - ret = wd_init_ctx_set(attrs, list, idx, i, j); + ret = wd_init_hw_ctx_set(attrs, list, idx, i, j); if (ret) goto free_ctxs; ret = wd_instance_sched_set(attrs->sched, ctx_nums, idx, i, j); @@ -2592,30 +2756,118 @@ static void wd_init_device_nodemask(struct uacce_dev_list *list, struct bitmask } }
-static int wd_alg_ctx_init(struct wd_init_attrs *attrs) +static int wd_alg_other_ctx_init(struct wd_init_attrs *attrs, int ctx_prop) +{ + struct wd_ctx_config *ctx_config = attrs->ctx_config; + struct wd_ctx_params *ctx_params = attrs->ctx_params; + struct wd_ctx_nums *ptr_ctxs; + struct wd_soft_ctx *sfctx; + struct sched_params sparams; + __u32 begin, end, ctx_num; + int sync_type, ret; + __u32 i, j, k; + + WD_ERR("debug: call function: %s!\n", __func__); + for (i = 0; i < ctx_params->op_type_num; i++) { + ptr_ctxs = wd_get_ctx_ptr(ctx_params, i, ctx_prop); + if (!ptr_ctxs) + continue; + + for (sync_type = CTX_MODE_SYNC; sync_type < CTX_MODE_MAX; sync_type++) { + if (sync_type == CTX_MODE_SYNC) { + ctx_num = ptr_ctxs->sync_ctx_num; + begin = ptr_ctxs->ctx_begin; + } else { + ctx_num = ptr_ctxs->async_ctx_num; + begin = ptr_ctxs->ctx_begin + ptr_ctxs->sync_ctx_num; + } + if (ctx_num == 0) + continue; + + end = begin + ctx_num; + for (j = begin; j < end; j++) { + ctx_config->ctxs[j].op_type = i; + ctx_config->ctxs[j].ctx_mode = sync_type; + ctx_config->ctxs[j].ctx_type = ctx_prop; + sfctx = calloc(1, sizeof(struct wd_soft_ctx)); + if (!sfctx) { + WD_ERR("failed to alloc ctx!\n"); + goto ctx_err; + } + ctx_config->ctxs[j].ctx = (handle_t)sfctx; + pthread_spin_init(&sfctx->slock, PTHREAD_PROCESS_SHARED); + pthread_spin_init(&sfctx->rlock, PTHREAD_PROCESS_SHARED); + } + + memset(&sparams, 0x0, sizeof(struct sched_params)); + sparams.begin = begin; + sparams.end = end - 1; + sparams.mode = sync_type; + sparams.numa_id = 0; + sparams.ctx_prop = ctx_prop; + ret = wd_sched_rr_instance(attrs->sched, &sparams); + if (ret) { + WD_ERR("fail to instance scheduler.\n"); + goto ctx_err; + } + } + } + + return WD_SUCCESS; + +ctx_err: + for (k = j; k >= begin; k--) { + free((struct wd_soft_ctx *)ctx_config->ctxs[k].ctx); + ctx_config->ctxs[k].ctx = 0; + } + + return -WD_ENOMEM; +} + +static int wd_alg_other_init(struct wd_init_attrs *attrs) +{ + struct wd_ctx_config *ctx_config = attrs->ctx_config; + struct wd_ctx_params *ctx_params = attrs->ctx_params; + __u32 ctx_set_num, op_type_num; + + WD_ERR("debug: call function: %s!\n", __func__); + op_type_num = ctx_params->op_type_num; + ctx_set_num = wd_ctxs_idx_init(attrs, 1, attrs->task_type); + if (!ctx_set_num || !op_type_num) { + WD_ERR("invalid: ctx_set_num is %u, op_type_num is %u!\n", + ctx_set_num, op_type_num); + return -WD_EINVAL; + } + + ctx_config->ctx_num = ctx_set_num; + ctx_config->ctxs = calloc(ctx_config->ctx_num, sizeof(struct wd_ctx)); + if (!ctx_config->ctxs) { + WD_ERR("failed to alloc ctxs!\n"); + return -WD_ENOMEM; + } + + return 0; +} + +static int wd_alg_hw_ctx_init(struct wd_init_attrs *attrs) { struct wd_ctx_config *ctx_config = attrs->ctx_config; struct wd_ctx_params *ctx_params = attrs->ctx_params; struct bitmask *used_bmp = ctx_params->bmp; struct uacce_dev_list *list, *used_list = NULL; + char alg_type[CRYPTO_MAX_ALG_NAME]; __u32 ctx_set_num, op_type_num; int numa_cnt, ret;
- list = wd_get_accel_list(attrs->alg); + WD_ERR("debug: call function: %s!\n", __func__); + wd_get_alg_type(attrs->alg, alg_type); + + list = wd_get_accel_list(alg_type); if (!list) { WD_ERR("failed to get devices!\n"); return -WD_ENODEV; }
- op_type_num = ctx_params->op_type_num; - ctx_set_num = wd_get_ctx_numbers(*ctx_params, op_type_num); - if (!ctx_set_num || !op_type_num) { - WD_ERR("invalid: ctx_set_num is %u, op_type_num is %u!\n", - ctx_set_num, op_type_num); - ret = -WD_EINVAL; - goto out_freelist; - } - /* * Not every numa has a device. Therefore, the first thing is to * filter the devices in the selected numa node, and the second @@ -2629,7 +2881,6 @@ static int wd_alg_ctx_init(struct wd_init_attrs *attrs) }
wd_init_device_nodemask(used_list, used_bmp); - numa_cnt = numa_bitmask_weight(used_bmp); if (!numa_cnt) { ret = numa_cnt; @@ -2637,7 +2888,17 @@ static int wd_alg_ctx_init(struct wd_init_attrs *attrs) goto out_freeusedlist; }
- ctx_config->ctx_num = ctx_set_num * numa_cnt; + op_type_num = ctx_params->op_type_num; + ctx_set_num = wd_ctxs_idx_init(attrs, numa_cnt, attrs->task_type); + WD_ERR("ctx sum num is: %u, op_type num is: %u!\n", ctx_set_num, op_type_num); + if (!ctx_set_num || !op_type_num) { + WD_ERR("invalid: ctx_set_num is %u, op_type_num is %u!\n", + ctx_set_num, op_type_num); + ret = -WD_EINVAL; + goto out_freelist; + } + + ctx_config->ctx_num = ctx_set_num; ctx_config->ctxs = calloc(ctx_config->ctx_num, sizeof(struct wd_ctx)); if (!ctx_config->ctxs) { ret = -WD_ENOMEM; @@ -2645,226 +2906,154 @@ static int wd_alg_ctx_init(struct wd_init_attrs *attrs) goto out_freeusedlist; }
- ret = wd_init_ctx_and_sched(attrs, used_bmp, used_list); + ret = wd_hw_ctx_and_sched(attrs, used_bmp, used_list); if (ret) free(ctx_config->ctxs);
out_freeusedlist: wd_free_list_accels(used_list); + out_freelist: wd_free_list_accels(list);
return ret; }
-static int wd_alg_ce_ctx_init(struct wd_init_attrs *attrs) -{ - struct wd_ctx_config *ctx_config = attrs->ctx_config; - - ctx_config->ctx_num = 1; - ctx_config->ctxs = calloc(ctx_config->ctx_num, sizeof(struct wd_ctx)); - if (!ctx_config->ctxs) { - WD_ERR("failed to alloc ctxs!\n"); - return -WD_ENOMEM; - } - - ctx_config->ctxs[0].ctx = (handle_t)calloc(1, sizeof(struct wd_ce_ctx)); - if (!ctx_config->ctxs[0].ctx) { - free(ctx_config->ctxs); - return -WD_ENOMEM; - } - - return WD_SUCCESS; -} - -static void wd_alg_ce_ctx_uninit(struct wd_ctx_config *ctx_config) +static void wd_alg_ctxs_uninit(struct wd_ctx_config *ctx_config) { __u32 i;
for (i = 0; i < ctx_config->ctx_num; i++) { if (ctx_config->ctxs[i].ctx) { - free((struct wd_ce_ctx *)ctx_config->ctxs[i].ctx); + if (ctx_config->ctxs[i].ctx_type == UADK_CTX_HW) + wd_release_ctx(ctx_config->ctxs[i].ctx); + else + free((struct wd_soft_ctx *)ctx_config->ctxs[i].ctx); ctx_config->ctxs[i].ctx = 0; } }
- free(ctx_config->ctxs); -} - -static void wd_alg_ctx_uninit(struct wd_ctx_config *ctx_config) -{ - __u32 i; - - for (i = 0; i < ctx_config->ctx_num; i++) { - if (ctx_config->ctxs[i].ctx) { - wd_release_ctx(ctx_config->ctxs[i].ctx); - ctx_config->ctxs[i].ctx = 0; - } + if (ctx_config->ctxs) { + free(ctx_config->ctxs); + ctx_config->ctxs = 0; } - - free(ctx_config->ctxs); -} - -static int wd_alg_init_sve_ctx(struct wd_ctx_config *ctx_config) -{ - struct wd_soft_ctx *ctx_sync, *ctx_async; - - ctx_config->ctx_num = WD_SOFT_CTX_NUM; - ctx_config->ctxs = calloc(ctx_config->ctx_num, sizeof(struct wd_ctx)); - if (!ctx_config->ctxs) - return -WD_ENOMEM; - - ctx_sync = calloc(1, sizeof(struct wd_soft_ctx)); - if (!ctx_sync) - goto free_ctxs; - - ctx_config->ctxs[WD_SOFT_SYNC_CTX].op_type = 0; - ctx_config->ctxs[WD_SOFT_SYNC_CTX].ctx_mode = CTX_MODE_SYNC; - ctx_config->ctxs[WD_SOFT_SYNC_CTX].ctx = (handle_t)ctx_sync; - - ctx_async = calloc(1, sizeof(struct wd_soft_ctx)); - if (!ctx_async) - goto free_ctx_sync; - - ctx_config->ctxs[WD_SOFT_ASYNC_CTX].op_type = 0; - ctx_config->ctxs[WD_SOFT_ASYNC_CTX].ctx_mode = CTX_MODE_ASYNC; - ctx_config->ctxs[WD_SOFT_ASYNC_CTX].ctx = (handle_t)ctx_async; - - return 0; - -free_ctx_sync: - free(ctx_sync); -free_ctxs: - free(ctx_config->ctxs); - return -WD_ENOMEM; -} - -static void wd_alg_uninit_sve_ctx(struct wd_ctx_config *ctx_config) -{ - free((struct wd_soft_ctx *)ctx_config->ctxs[WD_SOFT_ASYNC_CTX].ctx); - free((struct wd_soft_ctx *)ctx_config->ctxs[WD_SOFT_SYNC_CTX].ctx); - free(ctx_config->ctxs); }
int wd_alg_attrs_init(struct wd_init_attrs *attrs) { wd_alg_poll_ctx alg_poll_func = attrs->alg_poll_ctx; wd_alg_init alg_init_func = attrs->alg_init; - __u32 sched_type = attrs->sched_type; struct wd_ctx_config *ctx_config = NULL; struct wd_sched *alg_sched = NULL; - char alg_type[CRYPTO_MAX_ALG_NAME]; - int driver_type = UADK_ALG_HW; - const char *alg = attrs->alg; - int ret = -WD_EINVAL; + struct wd_alg_driver *drv = NULL; + char *alg_name = attrs->alg; + __u32 op_type_num; + int ret = 0;
if (!attrs->ctx_params) return -WD_EINVAL;
- if (attrs->driver) - driver_type = attrs->driver->calc_type; - - switch (driver_type) { - case UADK_ALG_SOFT: - case UADK_ALG_CE_INSTR: - ctx_config = calloc(1, sizeof(*ctx_config)); - if (!ctx_config) { - WD_ERR("fail to alloc ctx config\n"); - return -WD_ENOMEM; - } - attrs->ctx_config = ctx_config; + WD_ERR("debug: call function: %s!\n", __func__); + ctx_config = calloc(1, sizeof(*ctx_config)); + if (!ctx_config) { + WD_ERR("fail to alloc ctx config\n"); + return -WD_ENOMEM; + } + attrs->ctx_config = ctx_config;
- /* Use default sched_type to alloc scheduler */ - alg_sched = wd_sched_rr_alloc(SCHED_POLICY_NONE, 1, 1, alg_poll_func); - if (!alg_sched) { - WD_ERR("fail to alloc scheduler\n"); - goto out_ctx_config; - } + /* Get op_type_num */ + op_type_num = attrs->ctx_params->op_type_num; + if (!op_type_num) + goto out_ctx_config;
- attrs->sched = alg_sched; + /* Use default sched_type to alloc scheduler */ + alg_sched = wd_sched_rr_alloc(attrs->sched_type, op_type_num, + numa_max_node() + 1, alg_poll_func); + if (!alg_sched) { + WD_ERR("fail to alloc scheduler\n"); + goto out_ctx_config; + } + attrs->sched = alg_sched;
- ret = wd_alg_ce_ctx_init(attrs); + /* Initialize queues according to task type */ + switch (attrs->task_type) { + case TASK_HW: + ret = wd_alg_hw_ctx_init(attrs); if (ret) { - WD_ERR("fail to init ce ctx\n"); + WD_ERR("fail to init HW ctx\n"); goto out_freesched; }
- ret = alg_init_func(ctx_config, alg_sched); - if (ret) - goto out_pre_init; - break; - case UADK_ALG_SVE_INSTR: - /* Use default sched_type to alloc scheduler */ - alg_sched = wd_sched_rr_alloc(SCHED_POLICY_SINGLE, 1, 1, alg_poll_func); - if (!alg_sched) { - WD_ERR("fail to alloc scheduler\n"); - return -WD_EINVAL; - } - attrs->sched = alg_sched; - - ctx_config = calloc(1, sizeof(*ctx_config)); - if (!ctx_config) { - WD_ERR("fail to alloc ctx config\n"); - goto out_freesched; - } - attrs->ctx_config = ctx_config; - - ret = wd_alg_init_sve_ctx(ctx_config); - if (ret) { - WD_ERR("fail to init sve ctx!\n"); - goto out_freesched; - } - - ctx_config->cap = attrs->ctx_params->cap; - ret = alg_init_func(ctx_config, alg_sched); + case TASK_MIX: + ret = wd_alg_hw_ctx_init(attrs); if (ret) { - wd_alg_uninit_sve_ctx(ctx_config); + WD_ERR("fail to init mix HW ctx\n"); goto out_freesched; } - break; - case UADK_ALG_HW: - wd_get_alg_type(alg, alg_type); - attrs->alg = alg_type; - - ctx_config = calloc(1, sizeof(*ctx_config)); - if (!ctx_config) { - WD_ERR("fail to alloc ctx config\n"); - return -WD_ENOMEM; - } - attrs->ctx_config = ctx_config;
- alg_sched = wd_sched_rr_alloc(sched_type, attrs->ctx_params->op_type_num, - numa_max_node() + 1, alg_poll_func); - if (!alg_sched) { - WD_ERR("fail to instance scheduler\n"); - goto out_ctx_config; + WD_ERR("debug: call function: %s!\n", __func__); + drv = wd_request_drv(alg_name, ALG_DRV_SOFT); + if (drv == NULL) { + WD_ERR("fail to find soft driver.\n"); + break; + } else if (drv->calc_type == UADK_ALG_CE_INSTR) { + ret = wd_alg_other_ctx_init(attrs, UADK_CTX_CE_INS); + if (ret) { + WD_ERR("fail to init ce ctx\n"); + goto out_ctx_init; + } + } else if (drv->calc_type == UADK_ALG_SVE_INSTR) { + ret = wd_alg_other_ctx_init(attrs, UADK_CTX_SVE_INS); + if (ret) { + WD_ERR("fail to init sve ctx\n"); + goto out_ctx_init; + } } - attrs->sched = alg_sched;
- ret = wd_alg_ctx_init(attrs); + break; + /* Only pure soft queues */ + case TASK_INSTR: + ret = wd_alg_other_init(attrs); if (ret) { - WD_ERR("fail to init ctx\n"); + WD_ERR("fail to init other ctx.\n"); goto out_freesched; }
- ctx_config->cap = attrs->ctx_params->cap; - ret = alg_init_func(ctx_config, alg_sched); - if (ret) - goto out_pre_init; + drv = wd_request_drv(alg_name, ALG_DRV_SOFT); + if (drv == NULL) { + WD_ERR("fail to find soft driver.\n"); + goto out_ctx_init; + } else if (drv->calc_type == UADK_ALG_CE_INSTR) { + ret = wd_alg_other_ctx_init(attrs, UADK_CTX_CE_INS); + if (ret) { + WD_ERR("fail to init ce ctx\n"); + goto out_ctx_init; + } + } else if (drv->calc_type == UADK_ALG_SVE_INSTR) { + ret = wd_alg_other_ctx_init(attrs, UADK_CTX_SVE_INS); + if (ret) { + WD_ERR("fail to init sve ctx\n"); + goto out_ctx_init; + } + } break; default: - WD_ERR("driver type error: %d\n", driver_type); + WD_ERR("driver type error: %d\n", drv->calc_type); return -WD_EINVAL; }
+ ctx_config->cap = attrs->ctx_params->cap; + ret = alg_init_func(ctx_config, alg_sched); + if (ret) + goto out_ctx_init; + + WD_ERR("---->ctx nums: %u\n", ctx_config->ctx_num); + return 0;
-out_pre_init: - if (driver_type == UADK_ALG_CE_INSTR || driver_type == UADK_ALG_SOFT) - wd_alg_ce_ctx_uninit(ctx_config); - else - wd_alg_ctx_uninit(ctx_config); +out_ctx_init: + wd_alg_ctxs_uninit(ctx_config); out_freesched: wd_sched_rr_release(alg_sched); out_ctx_config: @@ -2877,27 +3066,13 @@ void wd_alg_attrs_uninit(struct wd_init_attrs *attrs) { struct wd_ctx_config *ctx_config = attrs->ctx_config; struct wd_sched *alg_sched = attrs->sched; - int driver_type = attrs->driver->calc_type;
if (!ctx_config) { wd_sched_rr_release(alg_sched); return; }
- switch (driver_type) { - case UADK_ALG_SOFT: - case UADK_ALG_CE_INSTR: - wd_alg_ce_ctx_uninit(ctx_config); - break; - case UADK_ALG_SVE_INSTR: - wd_alg_uninit_sve_ctx(ctx_config); - break; - case UADK_ALG_HW: - wd_alg_ctx_uninit(ctx_config); - break; - default: - break; - } + wd_alg_ctxs_uninit(ctx_config);
free(ctx_config); wd_sched_rr_release(alg_sched);
After the uadk framework updates the heterogeneous scheduling function, the corresponding scheduler needs to add a new scheduling solution.
Signed-off-by: Longfang Liu liulongfang@huawei.com --- include/wd_sched.h | 6 + wd_sched.c | 974 ++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 928 insertions(+), 52 deletions(-)
diff --git a/include/wd_sched.h b/include/wd_sched.h index 5a69cb9..e86454c 100644 --- a/include/wd_sched.h +++ b/include/wd_sched.h @@ -23,6 +23,12 @@ enum sched_policy_type { SCHED_POLICY_NONE, /* requests will need a fixed ctx */ SCHED_POLICY_SINGLE, + /* Hard calculation and soft calculation interval loop call */ + SCHED_POLICY_LOOP, + /* Perform heterogeneous calculations through ctx of session key */ + SCHED_POLICY_HUNGRY, + /* Instructions to accelerate heterogeneous computing */ + SCHED_POLICY_INSTR, SCHED_POLICY_BUTT, };
diff --git a/wd_sched.c b/wd_sched.c index aa6c91e..ae2d475 100644 --- a/wd_sched.c +++ b/wd_sched.c @@ -19,6 +19,14 @@ enum sched_region_mode { SCHED_MODE_BUTT };
+struct wd_sched_balancer { + int switch_slice; + __u32 hw_task_num; + __u32 sw_task_num; + __u32 hw_dfx_num; + __u32 sw_dfx_num; +}; + /* * sched_key - The key if schedule region. * @numa_id: The schedule numa region id. @@ -31,9 +39,12 @@ struct sched_key { int numa_id; __u8 type; __u8 mode; - __u32 sync_ctxid; - __u32 async_ctxid; + __u8 ctx_prop; + __u32 sync_ctxid[UADK_CTX_MAX]; + __u32 async_ctxid[UADK_CTX_MAX]; + struct wd_sched_balancer balancer; }; +#define LOOP_SWITH_TIME 5
/* * struct sched_ctx_range - define one ctx pos. @@ -58,11 +69,16 @@ struct sched_ctx_region { * two(e.g. comp and uncomp) the map[x][y]'s value is the ctx * begin and end pos. * @valid: the region used flag. + * @region_type: the region's property + * @next_info: next scheduling domain */ struct wd_sched_info { - struct sched_ctx_region *ctx_region[SCHED_MODE_BUTT]; + struct sched_ctx_region *ctx_region[SCHED_MODE_BUTT]; // default as HW ctxs + int region_type; bool valid; + struct wd_sched_info *next_info; }; +#define MAX_SKEY_REGION_NUM 64
/* * wd_sched_ctx - define the context of the scheduler. @@ -79,9 +95,78 @@ struct wd_sched_ctx { __u16 numa_num; user_poll_func poll_func; int numa_map[NUMA_NUM_NODES]; - struct wd_sched_info sched_info[0]; + + __u32 skey_num; + pthread_mutex_t skey_lock; + struct sched_key *skey[MAX_SKEY_REGION_NUM]; // supports up to 64 threads region + __u32 poll_tid[MAX_SKEY_REGION_NUM]; + + struct wd_sched_balancer balancer; + struct wd_sched_info sched_info[0]; // It's an block memory based on numa nodes };
+#define nop() asm volatile("nop") +static void delay_us(int ustime) +{ + int cycle = 2600; // for 2.6GHz CPU + int i, j; + + for (i = 0; i < ustime; i++) { + for (j = 0; j < cycle; j++) + nop(); + } + usleep(1); +} + +static void sched_skey_param_init(struct wd_sched_ctx *sched_ctx, struct sched_key *skey) +{ + __u32 i; + + pthread_mutex_lock(&sched_ctx->skey_lock); + for (i = 0; i < MAX_SKEY_REGION_NUM; i++) { + if (sched_ctx->skey[i] == NULL) { + sched_ctx->skey[i] = skey; + sched_ctx->skey_num++; + pthread_mutex_unlock(&sched_ctx->skey_lock); + WD_ERR("success: get valid skey node[%u]!\n", i); + return; + } + } + pthread_mutex_unlock(&sched_ctx->skey_lock); + WD_ERR("invalid: skey node number is too much!\n"); +} + +static struct sched_key *sched_get_poll_skey(struct wd_sched_ctx *sched_ctx) +{ + __u32 tid = pthread_self(); + __u16 i, tidx = 0; + + /* Delay processing within 17us is performed */ + delay_us(tid % 17); + /* Set mapping relationship between the recv tid and the send skey id */ + for (i = 0; i < sched_ctx->skey_num; i++) { + if (sched_ctx->poll_tid[i] == tid) { + //WD_ERR("poll tid ---> skey id:<%u, %u>!\n", i, tid); + tidx = i; + break; + } else if (sched_ctx->poll_tid[i] == 0) { + pthread_mutex_lock(&sched_ctx->skey_lock); + if (sched_ctx->poll_tid[i] == 0) { + //WD_ERR("poll tid<%u> <---> skey id:<%u>!\n", i, tid); + sched_ctx->poll_tid[i] = tid; + tidx = i; + } else { + pthread_mutex_unlock(&sched_ctx->skey_lock); + return NULL; + } + pthread_mutex_unlock(&sched_ctx->skey_lock); + break; + } + } + + return sched_ctx->skey[tidx]; +} + static bool sched_key_valid(struct wd_sched_ctx *sched_ctx, const struct sched_key *key) { if (key->numa_id >= sched_ctx->numa_num || key->mode >= SCHED_MODE_BUTT || @@ -201,14 +286,15 @@ static handle_t session_sched_init(handle_t h_sched_ctx, void *sched_param) skey->numa_id = param->numa_id; }
- if (skey->numa_id < 0) { - WD_ERR("failed to get valid sched numa region!\n"); - goto out; - } + //if (skey->numa_id < 0) { + // WD_ERR("failed to get valid sched numa region!\n"); + // goto out; + //} + skey->numa_id = 0;
- skey->sync_ctxid = session_sched_init_ctx(sched_ctx, skey, CTX_MODE_SYNC); - skey->async_ctxid = session_sched_init_ctx(sched_ctx, skey, CTX_MODE_ASYNC); - if (skey->sync_ctxid == INVALID_POS && skey->async_ctxid == INVALID_POS) { + skey->sync_ctxid[0] = session_sched_init_ctx(sched_ctx, skey, CTX_MODE_SYNC); + skey->async_ctxid[0] = session_sched_init_ctx(sched_ctx, skey, CTX_MODE_ASYNC); + if (skey->sync_ctxid[0] == INVALID_POS && skey->async_ctxid[0] == INVALID_POS) { WD_ERR("failed to get valid sync_ctxid or async_ctxid!\n"); goto out; } @@ -240,8 +326,8 @@ static __u32 session_sched_pick_next_ctx(handle_t h_sched_ctx, void *sched_key,
/* return in do task */ if (sched_mode == CTX_MODE_SYNC) - return key->sync_ctxid; - return key->async_ctxid; + return key->sync_ctxid[0]; + return key->async_ctxid[0]; }
static int session_poll_region(struct wd_sched_ctx *sched_ctx, __u32 begin, @@ -444,6 +530,669 @@ static int sched_single_poll_policy(handle_t h_sched_ctx, return 0; }
+static struct sched_ctx_region *loop_get_near_region( + struct wd_sched_ctx *sched_ctx, const struct sched_key *key) +{ + struct wd_sched_info *sched_info, *demon_info; + int numa_id; + + /* If the key->numa_id is not exist, we should scan for a valid region */ + for (numa_id = 0; numa_id < sched_ctx->numa_num; numa_id++) { + sched_info = sched_ctx->sched_info + numa_id; + if (sched_info->valid) { + demon_info = sched_info; + while (demon_info) { + if (demon_info->valid) + return &demon_info->ctx_region[key->mode][key->type]; + demon_info = demon_info->next_info; + } + } + } + + return NULL; +} + +/* + * loop_get_ctx_range - Get ctx range from ctx_map by the wd comp arg + */ +static struct sched_ctx_region *loop_get_ctx_range( + struct wd_sched_ctx *sched_ctx, const struct sched_key *key) +{ + struct wd_sched_info *sched_region, *sched_info; + int ctx_prop = key->ctx_prop; + + if (key->numa_id < 0) + return loop_get_near_region(sched_ctx, key); + + sched_region = sched_ctx->sched_info; + sched_info = sched_region + key->numa_id; + while (sched_info) { + if (sched_info->valid && ctx_prop == sched_info->region_type && + sched_info->ctx_region[key->mode][key->type].valid) + return &sched_info->ctx_region[key->mode][key->type]; + sched_info = sched_info->next_info; + } + + WD_ERR("failed to get valid sched region!\n"); + return NULL; +} + +/* + * loop_sched_init_ctx - Get one ctx from ctxs by the sched_ctx and arg. + * @sched_ctx: Schedule ctx, reference the struct sample_sched_ctx. + * @sched_key: The key of schedule region. + * @sched_mode: The sched async/sync mode. + * + * The user must init the schedule info through wd_sched_rr_instance + */ +static __u32 loop_sched_init_ctx(struct wd_sched_ctx *sched_ctx, + struct sched_key *key, const int sched_mode) +{ + struct sched_ctx_region *region = NULL; + bool ret; + + key->mode = sched_mode; + ret = sched_key_valid(sched_ctx, key); + if (!ret) + return INVALID_POS; + + region = loop_get_ctx_range(sched_ctx, key); + if (!region) + return INVALID_POS; + + return sched_get_next_pos_rr(region, NULL); +} + +static handle_t loop_sched_init(handle_t h_sched_ctx, void *sched_param) +{ + struct wd_sched_ctx *sched_ctx = (struct wd_sched_ctx *)h_sched_ctx; + struct sched_params *param = (struct sched_params *)sched_param; + int cpu = sched_getcpu(); + int node = numa_node_of_cpu(cpu); + struct sched_key *skey; + int ctx_prop; + + if (node < 0) { + WD_ERR("invalid: failed to get numa node!\n"); + return (handle_t)(-WD_EINVAL); + } + + if (!sched_ctx) { + WD_ERR("invalid: sched ctx is NULL!\n"); + return (handle_t)(-WD_EINVAL); + } + + skey = malloc(sizeof(struct sched_key)); + if (!skey) { + WD_ERR("failed to alloc memory for session sched key!\n"); + return (handle_t)(-WD_ENOMEM); + } + + if (!param) { + memset(skey, 0, sizeof(struct sched_key)); + //skey->numa_id = sched_ctx->numa_map[node]; + skey->numa_id = 0; + skey->ctx_prop = UADK_CTX_HW; + WD_INFO("loop don't set scheduler parameters!\n"); + } else if (param->numa_id < 0) { + skey->type = param->type; + //skey->numa_id = sched_ctx->numa_map[node]; + skey->numa_id = 0; + skey->ctx_prop = param->ctx_prop; + } else { + skey->type = param->type; + skey->numa_id = param->numa_id; + skey->ctx_prop = param->ctx_prop; + } + + //if (skey->numa_id < 0) { + // WD_ERR("failed to get valid sched numa region!\n"); + // goto out; + //} + skey->numa_id = 0; + + memset(&skey->sync_ctxid, INVALID_POS, sizeof(__u32) * UADK_CTX_MAX); + memset(&skey->async_ctxid, INVALID_POS, sizeof(__u32) * UADK_CTX_MAX); + skey->sync_ctxid[0] = loop_sched_init_ctx(sched_ctx, skey, CTX_MODE_SYNC); + skey->async_ctxid[0] = loop_sched_init_ctx(sched_ctx, skey, CTX_MODE_ASYNC); + if (skey->sync_ctxid[0] == INVALID_POS && skey->async_ctxid[0] == INVALID_POS) { + WD_ERR("failed to get valid sync_ctxid or async_ctxid!\n"); + goto out; + } + WD_ERR("sync_ctxid is: %u; async_ctxid is: %u!\n", skey->sync_ctxid[0], skey->async_ctxid[0]); + ctx_prop = skey->ctx_prop; + skey->ctx_prop = UADK_CTX_CE_INS; + skey->sync_ctxid[UADK_CTX_CE_INS] = loop_sched_init_ctx(sched_ctx, skey, CTX_MODE_SYNC); + skey->async_ctxid[UADK_CTX_CE_INS] = loop_sched_init_ctx(sched_ctx, skey, CTX_MODE_ASYNC); + skey->ctx_prop = ctx_prop; + if (skey->sync_ctxid[1] == INVALID_POS && skey->async_ctxid[1] == INVALID_POS) { + WD_ERR("failed to get valid CE sync_ctxid or async_ctxid!\n"); + skey->sync_ctxid[1] = skey->sync_ctxid[0]; + skey->async_ctxid[1] = skey->async_ctxid[0]; + } + + WD_ERR("sw ctxid is: %u, %u!\n", skey->sync_ctxid[1], skey->async_ctxid[1]); + + return (handle_t)skey; + +out: + free(skey); + return (handle_t)(-WD_EINVAL); +} + +/* + * loop_sched_pick_next_ctx - Get one ctx from ctxs by the sched_ctx and arg. + * @sched_ctx: Schedule ctx, reference the struct sample_sched_ctx. + * @sched_key: The key of schedule region. + * @sched_mode: The sched async/sync mode. + * + * The user must init the schedule info through session_sched_init + */ +static __u32 loop_sched_pick_next_ctx(handle_t h_sched_ctx, void *sched_key, + const int sched_mode) +{ + struct wd_sched_ctx *sched_ctx = (struct wd_sched_ctx *)h_sched_ctx; + struct sched_key *key = (struct sched_key *)sched_key; + struct wd_sched_balancer *balancer = &sched_ctx->balancer; + + if (unlikely(!h_sched_ctx || !key)) { + WD_ERR("invalid: sched ctx or key is NULL!\n"); + return INVALID_POS; + } + + if (key->sync_ctxid[UADK_CTX_HW] == INVALID_POS || + key->async_ctxid[UADK_CTX_HW] == INVALID_POS) + return session_sched_pick_next_ctx(h_sched_ctx, sched_key, sched_mode); + + if (sched_mode == CTX_MODE_SYNC) { + if (balancer->switch_slice == LOOP_SWITH_TIME) { + balancer->switch_slice = 0; + balancer->hw_dfx_num++; + /* run in HW */ + return key->sync_ctxid[UADK_CTX_HW]; + } else { + balancer->switch_slice++; + /* run in soft CE */ + balancer->sw_dfx_num++; + return key->sync_ctxid[UADK_CTX_CE_INS]; + } + } + // Async mode + if (balancer->hw_task_num > balancer->sw_task_num >> 1) { + /* run in soft CE */ + balancer->sw_task_num++; + + return key->async_ctxid[UADK_CTX_CE_INS]; + } else { + /* run in HW */ + balancer->hw_task_num++; + + return key->async_ctxid[UADK_CTX_HW]; + } +} + +static int loop_poll_policy_rr(struct wd_sched_ctx *sched_ctx, int numa_id, + __u32 expect, __u32 *count) +{ + struct wd_sched_balancer *balancer = &sched_ctx->balancer; + struct wd_sched_info *sched_info, *cur_info, *pnext_info; + struct sched_ctx_region **region; + __u32 begin, end; + __u32 i; + int ret; + + sched_info = sched_ctx->sched_info; + cur_info = sched_info + numa_id; + pnext_info = cur_info; + while (pnext_info) { + if (!pnext_info->valid) { + pnext_info = pnext_info->next_info; + continue; + } + + region = pnext_info->ctx_region; + for (i = 0; i < sched_ctx->type_num; i++) { + if (!region[SCHED_MODE_ASYNC][i].valid) + continue; + + begin = region[SCHED_MODE_ASYNC][i].begin; + end = region[SCHED_MODE_ASYNC][i].end; + // WD_ERR("session_poll_policy_rr numa: %d, from %u ---> %u!\n", numa_id, begin, end); + ret = session_poll_region(sched_ctx, begin, end, expect, count); + if (unlikely(ret)) + return ret; + } + + /* run in HW */ + if (pnext_info->region_type == UADK_CTX_HW) { + if (balancer->hw_task_num > *count) + balancer->hw_task_num -= *count; + else + balancer->hw_task_num = 0; + balancer->hw_dfx_num += *count; + } else { + if (balancer->sw_task_num > *count) + balancer->sw_task_num -= *count; + else + balancer->sw_task_num = 0; + balancer->sw_dfx_num += *count; + } + + pnext_info = pnext_info->next_info; + } + + return 0; +} + +/* + * loop_poll_policy - The polling policy matches the pick next ctx. + * @sched_ctx: Schedule ctx, reference the struct sample_sched_ctx. + * @cfg: The global resoure info. + * @expect: User expect poll msg num. + * @count: The actually poll num. + * + * The user must init the schedule info through wd_sched_rr_instance, the + * func interval will not check the valid, becouse it will affect performance. + */ +static int loop_sched_poll_policy(handle_t h_sched_ctx, __u32 expect, __u32 *count) +{ + struct wd_sched_ctx *sched_ctx = (struct wd_sched_ctx *)h_sched_ctx; + struct wd_sched_info *sched_info; + __u32 loop_time = 0; + __u32 last_count = 0; + __u16 i; + int ret; + + if (unlikely(!count || !sched_ctx || !sched_ctx->poll_func)) { + WD_ERR("invalid: sched ctx or poll_func is NULL or count is zero!\n"); + return -WD_EINVAL; + } + + if (unlikely(sched_ctx->numa_num > NUMA_NUM_NODES)) { + WD_ERR("invalid: ctx's numa number is %u!\n", sched_ctx->numa_num); + return -WD_EINVAL; + } + + sched_info = sched_ctx->sched_info; + + /* + * Try different numa's ctx if we can't receive any + * package last time, it is more efficient. In most + * bad situation, poll ends after MAX_POLL_TIMES loop. + */ + while (++loop_time < MAX_POLL_TIMES) { + for (i = 0; i < sched_ctx->numa_num;) { + /* If current numa is not valid, find next. */ + if (!sched_info[i].valid) { + i++; + continue; + } + + last_count = *count; + ret = loop_poll_policy_rr(sched_ctx, i, expect, count); + if (unlikely(ret)) + return ret; + + if (expect == *count) + return 0; + + /* + * If no package is received, find next numa, + * otherwise, keep receiving packets at this node. + */ + if (last_count == *count) + i++; + } + } + + return 0; +} + +static handle_t skey_sched_init(handle_t h_sched_ctx, void *sched_param) +{ + struct wd_sched_ctx *sched_ctx = (struct wd_sched_ctx *)h_sched_ctx; + struct sched_params *param = (struct sched_params *)sched_param; + int cpu = sched_getcpu(); + int node = numa_node_of_cpu(cpu); + struct sched_key *skey; + int ctx_prop; + + if (node < 0) { + WD_ERR("invalid: failed to get numa node!\n"); + return (handle_t)(-WD_EINVAL); + } + + if (!sched_ctx) { + WD_ERR("invalid: sched ctx is NULL!\n"); + return (handle_t)(-WD_EINVAL); + } + + skey = malloc(sizeof(struct sched_key)); + if (!skey) { + WD_ERR("failed to alloc memory for session sched key!\n"); + return (handle_t)(-WD_ENOMEM); + } + + if (!param) { + memset(skey, 0, sizeof(struct sched_key)); + //skey->numa_id = sched_ctx->numa_map[node]; + skey->numa_id = 0; + skey->ctx_prop = UADK_CTX_HW; + WD_INFO("loop don't set scheduler parameters!\n"); + } else if (param->numa_id < 0) { + skey->type = param->type; + //skey->numa_id = sched_ctx->numa_map[node]; + skey->numa_id = 0; + skey->ctx_prop = param->ctx_prop; + } else { + skey->type = param->type; + skey->numa_id = param->numa_id; + skey->ctx_prop = param->ctx_prop; + } + + //if (skey->numa_id < 0) { + // WD_ERR("failed to get valid sched numa region!\n"); + // goto out; + //} + memset(&skey->balancer, 0x0, sizeof(struct wd_sched_balancer)); + skey->numa_id = 0; + + memset(&skey->sync_ctxid, INVALID_POS, sizeof(__u32) * UADK_CTX_MAX); + memset(&skey->async_ctxid, INVALID_POS, sizeof(__u32) * UADK_CTX_MAX); + skey->sync_ctxid[0] = loop_sched_init_ctx(sched_ctx, skey, CTX_MODE_SYNC); + skey->async_ctxid[0] = loop_sched_init_ctx(sched_ctx, skey, CTX_MODE_ASYNC); + if (skey->sync_ctxid[0] == INVALID_POS && skey->async_ctxid[0] == INVALID_POS) { + WD_ERR("failed to get valid sync_ctxid or async_ctxid!\n"); + goto out; + } + WD_ERR("sync_ctxid is: %u; async_ctxid is: %u!\n", skey->sync_ctxid[0], skey->async_ctxid[0]); + ctx_prop = skey->ctx_prop; + skey->ctx_prop = UADK_CTX_CE_INS; + skey->sync_ctxid[UADK_CTX_CE_INS] = loop_sched_init_ctx(sched_ctx, skey, CTX_MODE_SYNC); + skey->async_ctxid[UADK_CTX_CE_INS] = loop_sched_init_ctx(sched_ctx, skey, CTX_MODE_ASYNC); + skey->ctx_prop = ctx_prop; + if (skey->sync_ctxid[1] == INVALID_POS && skey->async_ctxid[1] == INVALID_POS) { + WD_ERR("failed to get valid CE sync_ctxid or async_ctxid!\n"); + skey->sync_ctxid[1] = skey->sync_ctxid[0]; + skey->async_ctxid[1] = skey->async_ctxid[0]; + } + + sched_skey_param_init(sched_ctx, skey); + WD_ERR("sw ctxid is: %u, %u!\n", skey->sync_ctxid[1], skey->async_ctxid[1]); + + return (handle_t)skey; + +out: + free(skey); + return (handle_t)(-WD_EINVAL); +} + +/* + * loop_sched_pick_next_ctx - Get one ctx from ctxs by the sched_ctx and arg. + * @sched_ctx: Schedule ctx, reference the struct sample_sched_ctx. + * @sched_key: The key of schedule region. + * @sched_mode: The sched async/sync mode. + * + * The user must init the schedule info through session_sched_init + */ +static __u32 skey_sched_pick_next_ctx(handle_t h_sched_ctx, void *sched_key, + const int sched_mode) +{ + struct sched_key *skey = (struct sched_key *)sched_key; + + if (unlikely(!h_sched_ctx || !skey)) { + WD_ERR("invalid: sched ctx or key is NULL!\n"); + return INVALID_POS; + } + + if (skey->sync_ctxid[UADK_CTX_HW] == INVALID_POS || + skey->async_ctxid[UADK_CTX_HW] == INVALID_POS) + return session_sched_pick_next_ctx(h_sched_ctx, sched_key, sched_mode); + + // Async mode + if (sched_mode == CTX_MODE_ASYNC) { + if (skey->balancer.hw_task_num > (skey->balancer.sw_task_num >> 1)) { + /* run in soft CE */ + skey->balancer.sw_task_num++; + return skey->async_ctxid[UADK_CTX_CE_INS]; + } else { + /* run in HW */ + skey->balancer.hw_task_num++; + return skey->async_ctxid[UADK_CTX_HW]; + } + } + + if (skey->balancer.switch_slice == LOOP_SWITH_TIME) { + skey->balancer.switch_slice = 0; + skey->balancer.hw_dfx_num++; + /* run in HW */ + return skey->sync_ctxid[UADK_CTX_HW]; + } else { + skey->balancer.switch_slice++; + skey->balancer.sw_dfx_num++; + /* run in soft CE */ + return skey->sync_ctxid[UADK_CTX_CE_INS]; + } +} + +static int skey_poll_ctx(struct wd_sched_ctx *sched_ctx, struct sched_key *skey, + __u32 expect, __u32 *count) +{ + __u32 hw_num = 0; + __u32 sw_num = 0; + __u32 poll_num; + int i, ret; + + /* + * Collect hardware messages first, multi-threading performance is better; + * Collect software packets first, single-thread performance is better + */ + for (i = UADK_CTX_MAX - 1; i >= 0; i--) { + if (skey->async_ctxid[i] == INVALID_POS) + continue; + + poll_num = 0; + ret = sched_ctx->poll_func(skey->async_ctxid[i], expect, &poll_num); + if ((ret < 0) && (ret != -EAGAIN)) + return ret; + else if (poll_num == 0) + continue; + + if (i == 0) + hw_num += poll_num; + else + sw_num += poll_num; + } + + *count = *count + hw_num + sw_num; + if (hw_num > 0) { + if (skey->balancer.hw_task_num > hw_num) + skey->balancer.hw_task_num -= hw_num; + else + skey->balancer.hw_task_num = 0; + skey->balancer.hw_dfx_num += hw_num; + } + if (sw_num > 0) { + if (skey->balancer.sw_task_num > sw_num) + skey->balancer.sw_task_num -= sw_num; + else + skey->balancer.sw_task_num = 0; + skey->balancer.sw_dfx_num += sw_num; + } + + return 0; +} + +/* + * loop_poll_policy - The polling policy matches the pick next ctx. + * @sched_ctx: Schedule ctx, reference the struct sample_sched_ctx. + * @cfg: The global resoure info. + * @expect: User expect poll msg num. + * @count: The actually poll num. + * + * The user must init the schedule info through wd_sched_rr_instance, the + * func interval will not check the valid, becouse it will affect performance. + */ +static int skey_sched_poll_policy(handle_t h_sched_ctx, __u32 expect, __u32 *count) +{ + struct wd_sched_ctx *sched_ctx = (struct wd_sched_ctx *)h_sched_ctx; + struct sched_key *skey; + int ret; + + if (unlikely(!count || !sched_ctx || !sched_ctx->poll_func)) { + WD_ERR("invalid: sched ctx or poll_func is NULL or count is zero!\n"); + return -WD_EINVAL; + } + + skey = sched_get_poll_skey(sched_ctx); + if (!skey) + return -WD_EAGAIN; + + ret = skey_poll_ctx(sched_ctx, skey, expect, count); + if (unlikely(ret)) + return ret; + + return 0; +} + +static handle_t instr_sched_init(handle_t h_sched_ctx, void *sched_param) +{ + struct wd_sched_ctx *sched_ctx = (struct wd_sched_ctx *)h_sched_ctx; + struct sched_params *param = (struct sched_params *)sched_param; + int cpu = sched_getcpu(); + int node = numa_node_of_cpu(cpu); + struct sched_key *skey; + + if (node < 0) { + WD_ERR("invalid: failed to get numa node!\n"); + return (handle_t)(-WD_EINVAL); + } + + if (!sched_ctx) { + WD_ERR("invalid: sched ctx is NULL!\n"); + return (handle_t)(-WD_EINVAL); + } + + skey = malloc(sizeof(struct sched_key)); + if (!skey) { + WD_ERR("failed to alloc memory for session sched key!\n"); + return (handle_t)(-WD_ENOMEM); + } + + if (!param) { + memset(skey, 0, sizeof(struct sched_key)); + //skey->numa_id = sched_ctx->numa_map[node]; + skey->numa_id = 0; + skey->ctx_prop = UADK_CTX_CE_INS; + WD_INFO("loop don't set scheduler parameters!\n"); + } else if (param->numa_id < 0) { + skey->type = param->type; + //skey->numa_id = sched_ctx->numa_map[node]; + skey->numa_id = 0; + skey->ctx_prop = param->ctx_prop; + } else { + skey->type = param->type; + skey->numa_id = param->numa_id; + skey->ctx_prop = param->ctx_prop; + } + + //if (skey->numa_id < 0) { + // WD_ERR("failed to get valid sched numa region!\n"); + // goto out; + //} + skey->numa_id = 0; + + memset(&skey->sync_ctxid, INVALID_POS, sizeof(__u32) * UADK_CTX_MAX); + memset(&skey->async_ctxid, INVALID_POS, sizeof(__u32) * UADK_CTX_MAX); + skey->sync_ctxid[UADK_CTX_CE_INS] = loop_sched_init_ctx(sched_ctx, skey, CTX_MODE_SYNC); + skey->async_ctxid[UADK_CTX_CE_INS] = loop_sched_init_ctx(sched_ctx, skey, CTX_MODE_ASYNC); + + sched_skey_param_init(sched_ctx, skey); + WD_ERR("sw ctxid is: %u, %u!\n", skey->sync_ctxid[1], skey->async_ctxid[1]); + + return (handle_t)skey; +} + +/* + * loop_sched_pick_next_ctx - Get one ctx from ctxs by the sched_ctx and arg. + * @sched_ctx: Schedule ctx, reference the struct sample_sched_ctx. + * @sched_key: The key of schedule region. + * @sched_mode: The sched async/sync mode. + * + * The user must init the schedule info through session_sched_init + */ +static __u32 instr_sched_pick_next_ctx(handle_t h_sched_ctx, void *sched_key, + const int sched_mode) +{ + struct sched_key *key = (struct sched_key *)sched_key; + + //if (unlikely(!h_sched_ctx || !key)) { + // WD_ERR("invalid: sched ctx or key is NULL!\n"); + // return INVALID_POS; + //} + + key->balancer.sw_dfx_num++; + if (sched_mode == CTX_MODE_SYNC) { + /* run in soft CE */ + return key->sync_ctxid[UADK_CTX_CE_INS]; + } + // Async mode + /* run in soft CE */ + return key->async_ctxid[UADK_CTX_CE_INS]; +} + +static int instr_poll_policy_rr(struct wd_sched_ctx *sched_ctx, struct sched_key *skey, + __u32 expect, __u32 *count) +{ + __u32 recv_cnt, ctx_id; + int ret; + + //WD_ERR("success: sched skey num: %u!\n", i); + recv_cnt = 0; + ctx_id = skey->async_ctxid[UADK_CTX_CE_INS]; + ret = sched_ctx->poll_func(ctx_id, expect, &recv_cnt); + if ((ret < 0) && (ret != -EAGAIN)) + return ret; + *count += recv_cnt; + //WD_ERR("success: sched recv task num: %u!\n", *count); + + return 0; +} + +/* + * loop_poll_policy - The polling policy matches the pick next ctx. + * @sched_ctx: Schedule ctx, reference the struct sample_sched_ctx. + * @cfg: The global resoure info. + * @expect: User expect poll msg num. + * @count: The actually poll num. + * + * The user must init the schedule info through wd_sched_rr_instance, the + * func interval will not check the valid, becouse it will affect performance. + */ +static int instr_sched_poll_policy(handle_t h_sched_ctx, __u32 expect, __u32 *count) +{ + struct wd_sched_ctx *sched_ctx = (struct wd_sched_ctx *)h_sched_ctx; + struct sched_key *skey; + int ret; + + if (unlikely(!count || !sched_ctx || !sched_ctx->poll_func)) { + WD_ERR("invalid: sched ctx or poll_func is NULL or count is zero!\n"); + return -WD_EINVAL; + } + + /* First poll the skey is NULL */ + skey = sched_get_poll_skey(sched_ctx); + if (!skey) + return -WD_EAGAIN; + + ret = instr_poll_policy_rr(sched_ctx, skey, expect, count); + if (unlikely(ret)) + return ret; + + return ret; +} + + static struct wd_sched sched_table[SCHED_POLICY_BUTT] = { { .name = "RR scheduler", @@ -463,7 +1212,25 @@ static struct wd_sched sched_table[SCHED_POLICY_BUTT] = { .sched_init = sched_single_init, .pick_next_ctx = sched_single_pick_next_ctx, .poll_policy = sched_single_poll_policy, - } + }, { + .name = "Loop scheduler", + .sched_policy = SCHED_POLICY_LOOP, + .sched_init = loop_sched_init, + .pick_next_ctx = loop_sched_pick_next_ctx, + .poll_policy = loop_sched_poll_policy, + }, { + .name = "Hungry scheduler", + .sched_policy = SCHED_POLICY_HUNGRY, + .sched_init = skey_sched_init, + .pick_next_ctx = skey_sched_pick_next_ctx, + .poll_policy = skey_sched_poll_policy, + }, { + .name = "Instr scheduler", + .sched_policy = SCHED_POLICY_INSTR, + .sched_init = instr_sched_init, + .pick_next_ctx = instr_sched_pick_next_ctx, + .poll_policy = instr_sched_poll_policy, + }, };
static int wd_sched_get_nearby_numa_id(struct wd_sched_info *sched_info, int node, int numa_num) @@ -499,12 +1266,40 @@ static void wd_sched_map_cpus_to_dev(struct wd_sched_ctx *sched_ctx) } }
-int wd_sched_rr_instance(const struct wd_sched *sched, struct sched_params *param) +static int wd_sched_region_instance(struct wd_sched_info *sched_info, + struct sched_params *param) +{ + struct wd_sched_info *next_info; + __u8 type, mode; + + type = param->type; + mode = param->mode; + next_info = sched_info; + while (next_info) { + if (next_info->region_type == param->ctx_prop) { + next_info->ctx_region[mode][type].begin = param->begin; + next_info->ctx_region[mode][type].end = param->end; + next_info->ctx_region[mode][type].last = param->begin; + next_info->ctx_region[mode][type].valid = true; + next_info->valid = true; + pthread_mutex_init(&next_info->ctx_region[mode][type].lock, NULL); + WD_ERR("instance numa<%d>, property<%d>, mode<%u>, type<%u> ctx: begin: %u ----> end: %u!\n", + param->numa_id, param->ctx_prop, mode, type, param->begin, param->end); + return 0; + } + next_info = next_info->next_info; + } + + return -WD_EINVAL; +} + +int wd_sched_rr_instance(const struct wd_sched *sched, + struct sched_params *param) { struct wd_sched_info *sched_info = NULL; struct wd_sched_ctx *sched_ctx = NULL; __u8 type, mode; - int numa_id; + int numa_id, ret;
if (!sched || !sched->h_sched_ctx || !param) { WD_ERR("invalid: sched or sched_params is NULL!\n"); @@ -539,33 +1334,63 @@ int wd_sched_rr_instance(const struct wd_sched *sched, struct sched_params *para return -WD_EINVAL; }
- sched_info = sched_ctx->sched_info; + if (param->ctx_prop > UADK_CTX_SOFT) { + WD_ERR("invalid: sched_ctx's prop is %d\n", param->ctx_prop); + return -WD_EINVAL; + }
+ sched_info = sched_ctx->sched_info; if (!sched_info[numa_id].ctx_region[mode]) { WD_ERR("invalid: ctx_region is NULL, numa: %d, mode: %u!\n", numa_id, mode); return -WD_EINVAL; }
- sched_info[numa_id].ctx_region[mode][type].begin = param->begin; - sched_info[numa_id].ctx_region[mode][type].end = param->end; - sched_info[numa_id].ctx_region[mode][type].last = param->begin; - sched_info[numa_id].ctx_region[mode][type].valid = true; - sched_info[numa_id].valid = true; + ret = wd_sched_region_instance(&sched_info[numa_id], param); + if (ret) { + WD_ERR("failed to instance ctx_region!\n"); + return ret; + }
wd_sched_map_cpus_to_dev(sched_ctx);
- pthread_mutex_init(&sched_info[numa_id].ctx_region[mode][type].lock, - NULL); - return 0; }
+static void wd_sched_region_release(struct wd_sched_ctx *sched_ctx) +{ + struct wd_sched_info *sched_info, *next_info, *cur_info; + __u32 i, j; + + sched_info = sched_ctx->sched_info; + if (!sched_info) + return; + + for (i = 0; i < sched_ctx->numa_num; i++) { + cur_info = &sched_info[i]; + while (cur_info) { + next_info = cur_info->next_info; + for (j = 0; j < SCHED_MODE_BUTT; j++) { + if (cur_info->ctx_region[j]) { + free(cur_info->ctx_region[j]); + cur_info->ctx_region[j] = NULL; + } + } + /* First info region is alloced by sched ctx */ + if (cur_info->region_type != UADK_CTX_HW) + free(cur_info); + cur_info = next_info; + } + } +} + + void wd_sched_rr_release(struct wd_sched *sched) { - struct wd_sched_info *sched_info; struct wd_sched_ctx *sched_ctx; - int i, j; + __u32 hw_dfx_num = 0; + __u32 sw_dfx_num = 0; + __u32 i;
if (!sched) return; @@ -574,24 +1399,25 @@ void wd_sched_rr_release(struct wd_sched *sched) if (!sched_ctx) goto ctx_out;
- sched_info = sched_ctx->sched_info; - if (!sched_info) - goto info_out; - - for (i = 0; i < sched_ctx->numa_num; i++) { - for (j = 0; j < SCHED_MODE_BUTT; j++) { - if (sched_info[i].ctx_region[j]) { - free(sched_info[i].ctx_region[j]); - sched_info[i].ctx_region[j] = NULL; - } + for (i = 0; i < sched_ctx->skey_num; i++) { + if (sched_ctx->skey[i] != NULL) { + hw_dfx_num += sched_ctx->skey[i]->balancer.hw_dfx_num; + sw_dfx_num += sched_ctx->skey[i]->balancer.sw_dfx_num; } + sched_ctx->skey[i] = NULL; } - -info_out: + hw_dfx_num += sched_ctx->balancer.hw_dfx_num; + sw_dfx_num += sched_ctx->balancer.sw_dfx_num; + sched_ctx->skey_num = 0; + /* Release sched dfx info */ + WD_ERR("scheduler balance hw task num: %u, sw task num: %u\n", + hw_dfx_num, sw_dfx_num); + + wd_sched_region_release(sched_ctx); free(sched_ctx); + ctx_out: free(sched); - return; }
@@ -613,13 +1439,60 @@ static int numa_num_check(__u16 numa_num) return 0; }
+static int wd_sched_region_init(struct wd_sched_ctx *sched_ctx, + __u8 type_num, __u16 numa_num) +{ + struct wd_sched_info *sched_info = sched_ctx->sched_info; + struct wd_sched_info *cur_info; + int i, j, k; + + for (i = 0; i < MAX_SKEY_REGION_NUM; i++) { + sched_ctx->skey[i] = NULL; + sched_ctx->poll_tid[i] = 0; + } + pthread_mutex_init(&sched_ctx->skey_lock, NULL); + sched_ctx->skey_num = 0; + memset(&sched_ctx->balancer, 0x0, sizeof(struct wd_sched_balancer)); + + for (i = 0; i < numa_num; i++) { + /* Init sched_info next list */ + cur_info = &sched_info[i]; + for (j = 0; j < UADK_CTX_MAX; j++) { + for (k = 0; k < SCHED_MODE_BUTT; k++) { + cur_info->ctx_region[k] = + calloc(1, sizeof(struct sched_ctx_region) * type_num); + if (!cur_info->ctx_region[k]) + goto sched_err; + } + cur_info->valid = false; + cur_info->region_type = j; + + /* The last node point to NULL */ + if (j == UADK_CTX_MAX - 1) { + cur_info->next_info = NULL; + break; + } + cur_info->next_info = calloc(1, sizeof(*cur_info)); + if (!cur_info) + goto sched_err; + cur_info = cur_info->next_info; + } + } + + return 0; + +sched_err: + wd_sched_region_release(sched_ctx); + + return -WD_EINVAL; +} + struct wd_sched *wd_sched_rr_alloc(__u8 sched_type, __u8 type_num, __u16 numa_num, user_poll_func func) { - struct wd_sched_info *sched_info; struct wd_sched_ctx *sched_ctx; struct wd_sched *sched; - int i, j; + int ret;
if (numa_num_check(numa_num)) return NULL; @@ -636,6 +1509,7 @@ struct wd_sched *wd_sched_rr_alloc(__u8 sched_type, __u8 type_num, return NULL; }
+ WD_ERR("wd_sched numa number value: %u!\n", numa_num); sched_ctx = calloc(1, sizeof(struct wd_sched_ctx) + sizeof(struct wd_sched_info) * numa_num); if (!sched_ctx) { @@ -649,15 +1523,9 @@ struct wd_sched *wd_sched_rr_alloc(__u8 sched_type, __u8 type_num, sched_type == SCHED_POLICY_SINGLE) goto simple_ok;
- sched_info = sched_ctx->sched_info; - for (i = 0; i < numa_num; i++) { - for (j = 0; j < SCHED_MODE_BUTT; j++) { - sched_info[i].ctx_region[j] = - calloc(1, sizeof(struct sched_ctx_region) * type_num); - if (!sched_info[i].ctx_region[j]) - goto err_out; - } - } + ret = wd_sched_region_init(sched_ctx, type_num, numa_num); + if (ret) + goto ctx_out;
simple_ok: sched_ctx->poll_func = func; @@ -673,7 +1541,9 @@ simple_ok:
return sched;
+ctx_out: + free(sched_ctx); err_out: - wd_sched_rr_release(sched); + free(sched); return NULL; }
After adapting the new heterogeneous hybrid acceleration function. The initialization of the device driver requires adaptation updates. In addition, the instruction acceleration algorithm driver needs to fully adapt to the synchronous and asynchronous mode of the uadk framework.
Signed-off-by: Longfang Liu liulongfang@huawei.com --- Makefile.am | 2 +- drv/hisi_comp.c | 3 +++ drv/hisi_dae.c | 3 +++ drv/hisi_hpre.c | 3 +++ drv/hisi_sec.c | 3 +++ drv/isa_ce_sm3.c | 18 ++++++++++++- drv/isa_ce_sm4.c | 22 ++++++++++++++-- include/wd_util.h | 4 +++ wd_util.c | 65 +++++++++++++++++++++++++++++++++++++++++++++++ 9 files changed, 119 insertions(+), 4 deletions(-)
diff --git a/Makefile.am b/Makefile.am index d671d09..cf50fec 100644 --- a/Makefile.am +++ b/Makefile.am @@ -93,7 +93,7 @@ libhisi_hpre_la_SOURCES=drv/hisi_hpre.c drv/hisi_qm_udrv.c \ hisi_qm_udrv.h
libisa_ce_la_SOURCES=arm_arch_ce.h drv/isa_ce_sm3.c drv/isa_ce_sm3_armv8.S isa_ce_sm3.h \ - drv/isa_ce_sm4.c drv/isa_ce_sm4_armv8.S drv/isa_ce_sm4.h + drv/isa_ce_sm4.c drv/isa_ce_sm4_armv8.S drv/isa_ce_sm4.h wd_util.c wd_util.h
libisa_sve_la_SOURCES=drv/hash_mb/hash_mb.c wd_digest_drv.h drv/hash_mb/hash_mb.h \ drv/hash_mb/sm3_sve_common.S drv/hash_mb/sm3_mb_asimd_x1.S \ diff --git a/drv/hisi_comp.c b/drv/hisi_comp.c index 547e665..6099efb 100644 --- a/drv/hisi_comp.c +++ b/drv/hisi_comp.c @@ -807,6 +807,9 @@ static int hisi_zip_init(void *conf, void *priv) memcpy(&zip_ctx->config, config, sizeof(struct wd_ctx_config_internal)); /* allocate qp for each context */ for (i = 0; i < config->ctx_num; i++) { + if (config->ctxs[i].ctx_type != UADK_CTX_HW || + !config->ctxs[i].ctx) + continue; h_ctx = config->ctxs[i].ctx; qm_priv.sqe_size = sizeof(struct hisi_zip_sqe); qm_priv.op_type = config->ctxs[i].op_type; diff --git a/drv/hisi_dae.c b/drv/hisi_dae.c index 49d6b55..e8a75a4 100644 --- a/drv/hisi_dae.c +++ b/drv/hisi_dae.c @@ -1573,6 +1573,9 @@ static int dae_init(void *conf, void *priv) qm_priv.sqe_size = sizeof(struct dae_sqe); /* Allocate qp for each context */ for (i = 0; i < config->ctx_num; i++) { + if (config->ctxs[i].ctx_type != UADK_CTX_HW || + !config->ctxs[i].ctx) + continue; h_ctx = config->ctxs[i].ctx; qm_priv.qp_mode = config->ctxs[i].ctx_mode; /* Setting the epoll en to 0 for ASYNC ctx */ diff --git a/drv/hisi_hpre.c b/drv/hisi_hpre.c index f91d7a8..8ade9ed 100644 --- a/drv/hisi_hpre.c +++ b/drv/hisi_hpre.c @@ -501,6 +501,9 @@ static int hpre_init_qm_priv(struct wd_ctx_config_internal *config, qm_priv->sqe_size = sizeof(struct hisi_hpre_sqe);
for (i = 0; i < config->ctx_num; i++) { + if (config->ctxs[i].ctx_type != UADK_CTX_HW || + !config->ctxs[i].ctx) + continue; h_ctx = config->ctxs[i].ctx; qm_priv->qp_mode = config->ctxs[i].ctx_mode; /* Setting the epoll en to 0 for ASYNC ctx */ diff --git a/drv/hisi_sec.c b/drv/hisi_sec.c index 7635466..7f46e4b 100644 --- a/drv/hisi_sec.c +++ b/drv/hisi_sec.c @@ -3095,6 +3095,9 @@ static int hisi_sec_init(void *conf, void *priv) qm_priv.sqe_size = sizeof(struct hisi_sec_sqe); /* allocate qp for each context */ for (i = 0; i < config->ctx_num; i++) { + if (config->ctxs[i].ctx_type != UADK_CTX_HW || + !config->ctxs[i].ctx) + continue; h_ctx = config->ctxs[i].ctx; /* setting the type is 0 for sqc_type */ qm_priv.op_type = 0; diff --git a/drv/isa_ce_sm3.c b/drv/isa_ce_sm3.c index c8812df..110eb4d 100644 --- a/drv/isa_ce_sm3.c +++ b/drv/isa_ce_sm3.c @@ -17,7 +17,6 @@ #include "drv/isa_ce_sm3.h" #include "drv/wd_digest_drv.h" #include "wd_digest.h" -#include "wd_util.h"
typedef void (sm3_ce_block_fn)(__u32 word_reg[SM3_STATE_WORDS], const unsigned char *src, size_t blocks); @@ -340,6 +339,7 @@ static int do_hmac_sm3_ce(struct wd_digest_msg *msg, __u8 *out_hmac)
static int sm3_ce_drv_send(handle_t ctx, void *digest_msg) { + struct wd_soft_ctx *sfctx = (struct wd_soft_ctx *)ctx; struct wd_digest_msg *msg = (struct wd_digest_msg *)digest_msg; __u8 digest[SM3_DIGEST_SIZE] = {0}; int ret; @@ -349,6 +349,10 @@ static int sm3_ce_drv_send(handle_t ctx, void *digest_msg) return -WD_EINVAL; }
+ ret = wd_queue_is_busy(sfctx); + if (ret) + return ret; + if (msg->data_fmt == WD_SGL_BUF) { WD_ERR("invalid: SM3 CE driver do not support sgl data format!\n"); return -WD_EINVAL; @@ -363,11 +367,23 @@ static int sm3_ce_drv_send(handle_t ctx, void *digest_msg) ret = -WD_EINVAL; }
+ ret = wd_get_sqe_from_queue(sfctx, msg->tag); + if (ret) + return ret; + return ret; }
static int sm3_ce_drv_recv(handle_t ctx, void *digest_msg) { + struct wd_soft_ctx *sfctx = (struct wd_soft_ctx *)ctx; + struct wd_digest_msg *msg = (struct wd_digest_msg *)digest_msg; + int ret; + + ret = wd_put_sqe_to_queue(sfctx, &msg->tag, &msg->result); + if (ret) + return ret; + return WD_SUCCESS; }
diff --git a/drv/isa_ce_sm4.c b/drv/isa_ce_sm4.c index 4b2f9cf..d3cd217 100644 --- a/drv/isa_ce_sm4.c +++ b/drv/isa_ce_sm4.c @@ -11,9 +11,10 @@ * Copyright 2024 Huawei Technologies Co.,Ltd. All rights reserved. */
+#include "wd_alg.h" #include "drv/wd_cipher_drv.h" -#include "wd_cipher.h" #include "isa_ce_sm4.h" +#include "wd_cipher.h"
#define SM4_ENCRYPT 1 #define SM4_DECRYPT 0 @@ -323,15 +324,20 @@ static int sm4_xts_decrypt(struct wd_cipher_msg *msg, const struct SM4_KEY *rkey
static int isa_ce_cipher_send(handle_t ctx, void *wd_msg) { + struct wd_soft_ctx *sfctx = (struct wd_soft_ctx *)ctx; struct wd_cipher_msg *msg = wd_msg; struct SM4_KEY rkey; int ret = 0;
- if (!msg) { + if (!msg || !ctx) { WD_ERR("invalid: input sm4 msg is NULL!\n"); return -WD_EINVAL; }
+ ret = wd_queue_is_busy(sfctx); + if (ret) + return ret; + if (msg->data_fmt == WD_SGL_BUF) { WD_ERR("invalid: SM4 CE driver do not support sgl data format!\n"); return -WD_EINVAL; @@ -384,11 +390,23 @@ static int isa_ce_cipher_send(handle_t ctx, void *wd_msg) return -WD_EINVAL; }
+ ret = wd_get_sqe_from_queue(sfctx, msg->tag); + if (ret) + return ret; + return ret; }
static int isa_ce_cipher_recv(handle_t ctx, void *wd_msg) { + struct wd_soft_ctx *sfctx = (struct wd_soft_ctx *)ctx; + struct wd_cipher_msg *msg = wd_msg; + int ret; + + ret = wd_put_sqe_to_queue(sfctx, &msg->tag, &msg->result); + if (ret) + return ret; + return 0; }
diff --git a/include/wd_util.h b/include/wd_util.h index bee7f29..ff9aa02 100644 --- a/include/wd_util.h +++ b/include/wd_util.h @@ -561,6 +561,10 @@ static inline void wd_ctx_spin_unlock(struct wd_ctx_internal *ctx, int type) pthread_spin_unlock(&ctx->lock); }
+int wd_queue_is_busy(struct wd_soft_ctx *sctx); +int wd_get_sqe_from_queue(struct wd_soft_ctx *sctx, __u32 tag_id); +int wd_put_sqe_to_queue(struct wd_soft_ctx *sctx, __u32 *tag_id, __u8 *result); + #ifdef __cplusplus } #endif diff --git a/wd_util.c b/wd_util.c index 848ff15..bf6894b 100644 --- a/wd_util.c +++ b/wd_util.c @@ -3077,3 +3077,68 @@ void wd_alg_attrs_uninit(struct wd_init_attrs *attrs) free(ctx_config); wd_sched_rr_release(alg_sched); } + +int wd_queue_is_busy(struct wd_soft_ctx *sctx) +{ + /* The queue is not used */ + if (sctx->run_num >= MAX_SOFT_QUEUE_LENGTH - 1) + return -WD_EBUSY; + + return 0; +} + +int wd_get_sqe_from_queue(struct wd_soft_ctx *sctx, __u32 tag_id) +{ + struct wd_soft_sqe *sqe = NULL; + + pthread_spin_lock(&sctx->slock); + sqe = &sctx->qfifo[sctx->head]; + if (!sqe->used && !sqe->complete) { // find the next not used sqe + sctx->head++; + if (unlikely(sctx->head == MAX_SOFT_QUEUE_LENGTH)) + sctx->head = 0; + + sqe->used = 1; + sqe->complete = 1; + sqe->id = tag_id; + sqe->result = 0; + __atomic_fetch_add(&sctx->run_num, 0x1, __ATOMIC_ACQUIRE); + pthread_spin_unlock(&sctx->slock); + } else { + pthread_spin_unlock(&sctx->slock); + return -WD_EBUSY; + } + + return 0; +} + +int wd_put_sqe_to_queue(struct wd_soft_ctx *sctx, __u32 *tag_id, __u8 *result) +{ + struct wd_soft_sqe *sqe = NULL; + + /* The queue is not used */ + if (sctx->run_num < 1) + return -WD_EAGAIN; + + if (pthread_spin_trylock(&sctx->rlock)) + return -WD_EAGAIN; + sqe = &sctx->qfifo[sctx->tail]; + if (sqe->used && sqe->complete) { // find a used sqe + sctx->tail++; + if (unlikely(sctx->tail == MAX_SOFT_QUEUE_LENGTH)) + sctx->tail = 0; + + *tag_id = sqe->id; + *result = sqe->result; + sqe->used = 0x0; + sqe->complete = 0x0; + __atomic_fetch_sub(&sctx->run_num, 0x1, __ATOMIC_ACQUIRE); + pthread_spin_unlock(&sctx->rlock); + } else { + pthread_spin_unlock(&sctx->rlock); + return -WD_EAGAIN; + } + + return 0; +} +
After the uadk framework updates the heterogeneous scheduling function, the internal implementation functions of the aead algorithm need to be adapted and modified.
Signed-off-by: Longfang Liu liulongfang@huawei.com --- wd_aead.c | 105 +++++++++++++++++++++++++----------------------------- 1 file changed, 48 insertions(+), 57 deletions(-)
diff --git a/wd_aead.c b/wd_aead.c index 1a1e381..3448144 100644 --- a/wd_aead.c +++ b/wd_aead.c @@ -32,7 +32,6 @@ struct wd_aead_setting { enum wd_status status; struct wd_ctx_config_internal config; struct wd_sched sched; - struct wd_alg_driver *driver; struct wd_async_msg_pool pool; void *priv; void *dlhandle; @@ -72,20 +71,16 @@ static void wd_aead_close_driver(int init_type) }
if (wd_aead_setting.dlhandle) { - wd_release_drv(wd_aead_setting.driver); dlclose(wd_aead_setting.dlhandle); wd_aead_setting.dlhandle = NULL; } #else - wd_release_drv(wd_aead_setting.driver); hisi_sec2_remove(); #endif }
static int wd_aead_open_driver(int init_type) { - struct wd_alg_driver *driver = NULL; - const char *alg_name = "gcm(aes)"; #ifndef WD_STATIC_DRV char lib_path[PATH_MAX]; int ret; @@ -119,14 +114,6 @@ static int wd_aead_open_driver(int init_type) if (init_type == WD_TYPE_V2) return WD_SUCCESS; #endif - driver = wd_request_drv(alg_name, false); - if (!driver) { - wd_aead_close_driver(WD_TYPE_V1); - WD_ERR("failed to get %s driver support\n", alg_name); - return -WD_EINVAL; - } - - wd_aead_setting.driver = driver;
return WD_SUCCESS; } @@ -333,7 +320,7 @@ handle_t wd_aead_alloc_sess(struct wd_aead_sess_setup *setup) sess->cmode = setup->cmode; sess->dalg = setup->dalg; sess->dmode = setup->dmode; - ret = wd_drv_alg_support(sess->alg_name, wd_aead_setting.driver); + ret = wd_drv_alg_support(sess->alg_name, &wd_aead_setting.config); if (!ret) { WD_ERR("failed to support this algorithm: %s!\n", sess->alg_name); goto err_sess; @@ -465,16 +452,9 @@ static int wd_aead_init_nolock(struct wd_ctx_config *config, struct wd_sched *sc if (ret < 0) goto out_clear_sched;
- ret = wd_alg_init_driver(&wd_aead_setting.config, - wd_aead_setting.driver, - &wd_aead_setting.priv); - if (ret) - goto out_clear_pool; - + wd_aead_setting.priv = STATUS_ENABLE; return 0;
-out_clear_pool: - wd_uninit_async_request_pool(&wd_aead_setting.pool); out_clear_sched: wd_clear_sched(&wd_aead_setting.sched); out_clear_ctx_config: @@ -483,6 +463,13 @@ out_clear_ctx_config: return ret; }
+static void wd_aead_uninit_nolock(void) +{ + wd_uninit_async_request_pool(&wd_aead_setting.pool); + wd_clear_sched(&wd_aead_setting.sched); + wd_aead_setting.priv = NULL; +} + int wd_aead_init(struct wd_ctx_config *config, struct wd_sched *sched) { int ret; @@ -505,10 +492,21 @@ int wd_aead_init(struct wd_ctx_config *config, struct wd_sched *sched) if (ret) goto out_close_driver;
+ ret = wd_ctx_drv_config("gcm(aes)", &wd_aead_setting.config); + if (ret) + goto out_uninit_nolock; + + ret = wd_alg_init_driver_nw(&wd_aead_setting.config); + if (ret) + goto out_drv_deconfig; wd_alg_set_init(&wd_aead_setting.status);
return 0;
+out_drv_deconfig: + wd_ctx_drv_deconfig(&wd_aead_setting.config); +out_uninit_nolock: + wd_aead_uninit_nolock(); out_close_driver: wd_aead_close_driver(WD_TYPE_V1); out_clear_init: @@ -516,20 +514,14 @@ out_clear_init: return ret; }
-static void wd_aead_uninit_nolock(void) -{ - wd_uninit_async_request_pool(&wd_aead_setting.pool); - wd_clear_sched(&wd_aead_setting.sched); - wd_alg_uninit_driver(&wd_aead_setting.config, - wd_aead_setting.driver, - &wd_aead_setting.priv); -} - void wd_aead_uninit(void) { if (!wd_aead_setting.priv) return;
+ wd_alg_uninit_driver_nw(&wd_aead_setting.config); + wd_ctx_drv_deconfig(&wd_aead_setting.config); + wd_aead_uninit_nolock(); wd_aead_close_driver(WD_TYPE_V1); wd_alg_clear_init(&wd_aead_setting.status); @@ -579,38 +571,26 @@ int wd_aead_init2_(char *alg, __u32 sched_type, int task_type,
while (ret != 0) { memset(&wd_aead_setting.config, 0, sizeof(struct wd_ctx_config_internal)); - - /* Get alg driver and dev name */ - wd_aead_setting.driver = wd_alg_drv_bind(task_type, alg); - if (!wd_aead_setting.driver) { - WD_ERR("failed to bind %s driver.\n", alg); - goto out_dlopen; - } - + /* Init ctx param and prepare for ctx request */ aead_ctx_params.ctx_set_num = aead_ctx_num; - ret = wd_ctx_param_init(&aead_ctx_params, ctx_params, - wd_aead_setting.driver, WD_AEAD_TYPE, - WD_DIGEST_CIPHER_DECRYPTION + 1); + ret = wd_ctx_param_init_nw(&aead_ctx_params, ctx_params, + alg, task_type, WD_AEAD_TYPE, WD_DIGEST_CIPHER_DECRYPTION + 1); if (ret) { if (ret == -WD_EAGAIN) { - wd_disable_drv(wd_aead_setting.driver); - wd_alg_drv_unbind(wd_aead_setting.driver); continue; } - goto out_driver; + goto out_dlclose; }
wd_aead_init_attrs.alg = alg; wd_aead_init_attrs.sched_type = sched_type; - wd_aead_init_attrs.driver = wd_aead_setting.driver; + wd_aead_init_attrs.task_type = task_type; wd_aead_init_attrs.ctx_params = &aead_ctx_params; wd_aead_init_attrs.alg_init = wd_aead_init_nolock; wd_aead_init_attrs.alg_poll_ctx = wd_aead_poll_ctx; ret = wd_alg_attrs_init(&wd_aead_init_attrs); if (ret) { if (ret == -WD_ENODEV) { - wd_disable_drv(wd_aead_setting.driver); - wd_alg_drv_unbind(wd_aead_setting.driver); wd_ctx_param_uninit(&aead_ctx_params); continue; } @@ -618,16 +598,27 @@ int wd_aead_init2_(char *alg, __u32 sched_type, int task_type, goto out_params_uninit; } } + ret = wd_ctx_drv_config(alg, &wd_aead_setting.config); + if (ret) + goto out_uninit_nolock; + + ret = wd_alg_init_driver_nw(&wd_aead_setting.config); + if (ret) + goto out_drv_deconfig; + wd_alg_set_init(&wd_aead_setting.status); wd_ctx_param_uninit(&aead_ctx_params);
return 0;
+out_drv_deconfig: + wd_ctx_drv_deconfig(&wd_aead_setting.config); +out_uninit_nolock: + wd_aead_uninit_nolock(); + wd_alg_attrs_uninit(&wd_aead_init_attrs); out_params_uninit: wd_ctx_param_uninit(&aead_ctx_params); -out_driver: - wd_alg_drv_unbind(wd_aead_setting.driver); -out_dlopen: +out_dlclose: wd_aead_close_driver(WD_TYPE_V2); out_uninit: wd_alg_clear_init(&wd_aead_setting.status); @@ -639,9 +630,9 @@ void wd_aead_uninit2(void) if (!wd_aead_setting.priv) return;
+ wd_ctx_drv_deconfig(&wd_aead_setting.config); wd_aead_uninit_nolock(); wd_alg_attrs_uninit(&wd_aead_init_attrs); - wd_alg_drv_unbind(wd_aead_setting.driver); wd_aead_close_driver(WD_TYPE_V2); wd_aead_setting.dlh_list = NULL; wd_alg_clear_init(&wd_aead_setting.status); @@ -722,8 +713,8 @@ static int send_recv_sync(struct wd_ctx_internal *ctx, struct wd_msg_handle msg_handle; int ret;
- msg_handle.send = wd_aead_setting.driver->send; - msg_handle.recv = wd_aead_setting.driver->recv; + msg_handle.send = ctx->drv->send; + msg_handle.recv = ctx->drv->recv;
pthread_spin_lock(&ctx->lock); ret = wd_handle_msg_sync(&msg_handle, ctx->ctx, msg, NULL, @@ -796,13 +787,13 @@ int wd_do_aead_async(handle_t h_sess, struct wd_aead_req *req) idx, (void **)&msg); if (unlikely(msg_id < 0)) { WD_ERR("failed to get msg from pool!\n"); - return msg_id; + return -WD_EBUSY; }
fill_request_msg(msg, req, sess); msg->tag = msg_id;
- ret = wd_aead_setting.driver->send(ctx->ctx, msg); + ret = ctx->drv->send(ctx->ctx, msg); if (unlikely(ret < 0)) { if (ret != -WD_EBUSY) WD_ERR("failed to send BD, hw is err!\n"); @@ -851,7 +842,7 @@ int wd_aead_poll_ctx(__u32 idx, __u32 expt, __u32 *count) ctx = config->ctxs + idx;
do { - ret = wd_aead_setting.driver->recv(ctx->ctx, &resp_msg); + ret = ctx->drv->recv(ctx->ctx, &resp_msg); if (ret == -WD_EAGAIN) { return ret; } else if (ret < 0) {
After the uadk framework updates the heterogeneous scheduling function, the internal implementation functions of the hash-agg algorithm need to be adapted and modified.
Signed-off-by: Longfang Liu liulongfang@huawei.com --- wd_agg.c | 80 ++++++++++++++++++++++++++++++++------------------------ 1 file changed, 46 insertions(+), 34 deletions(-)
diff --git a/wd_agg.c b/wd_agg.c index 7a4b17c..5cf21d3 100644 --- a/wd_agg.c +++ b/wd_agg.c @@ -32,7 +32,6 @@ struct wd_agg_setting { struct wd_ctx_config_internal config; struct wd_sched sched; struct wd_async_msg_pool pool; - struct wd_alg_driver *driver; void *priv; void *dlhandle; void *dlh_list; @@ -382,6 +381,26 @@ static int wd_agg_init_sess_priv(struct wd_agg_sess *sess, struct wd_agg_sess_se return WD_SUCCESS; }
+static int wd_agg_drv_ops(struct wd_agg_sess *sess, void *param) +{ + struct wd_ctx_config_internal *config = param; + struct wd_alg_driver *drv; + __u32 i; + int ret; + + for (i = 0; i < config->ctx_num; i++) { + drv = config->ctxs[i].drv; + if (!drv) + continue; + + ret = drv->get_extend_ops(&sess->ops); + if (!ret) + return 0; + } + + return -WD_EINVAL; +} + handle_t wd_agg_alloc_sess(struct wd_agg_sess_setup *setup) { __u32 out_agg_cols_num = 0; @@ -401,7 +420,7 @@ handle_t wd_agg_alloc_sess(struct wd_agg_sess_setup *setup) sess->agg_conf.out_cols_num = out_agg_cols_num;
sess->alg_name = wd_agg_alg_name; - ret = wd_drv_alg_support(sess->alg_name, wd_agg_setting.driver); + ret = wd_drv_alg_support(sess->alg_name, &wd_agg_setting.config); if (!ret) { WD_ERR("failed to support agg algorithm: %s!\n", sess->alg_name); goto err_sess; @@ -415,7 +434,7 @@ handle_t wd_agg_alloc_sess(struct wd_agg_sess_setup *setup) goto err_sess; }
- ret = wd_agg_setting.driver->get_extend_ops(&sess->ops); + ret = wd_agg_drv_ops(sess, &wd_agg_setting.config); if (ret) { WD_ERR("failed to get agg extend ops!\n"); goto err_sess; @@ -592,15 +611,10 @@ static int wd_agg_alg_init(struct wd_ctx_config *config, struct wd_sched *sched) if (ret < 0) goto out_clear_sched;
- ret = wd_alg_init_driver(&wd_agg_setting.config, wd_agg_setting.driver, - &wd_agg_setting.priv); - if (ret) - goto out_clear_pool; + wd_agg_setting.priv = STATUS_ENABLE;
return WD_SUCCESS;
-out_clear_pool: - wd_uninit_async_request_pool(&wd_agg_setting.pool); out_clear_sched: wd_clear_sched(&wd_agg_setting.sched); out_clear_ctx_config: @@ -617,12 +631,9 @@ static int wd_agg_alg_uninit(void)
/* Uninit async request pool */ wd_uninit_async_request_pool(&wd_agg_setting.pool); - /* Unset config, sched, driver */ wd_clear_sched(&wd_agg_setting.sched); - - wd_alg_uninit_driver(&wd_agg_setting.config, wd_agg_setting.driver, - &wd_agg_setting.priv); + wd_agg_setting.priv = NULL;
return WD_SUCCESS; } @@ -660,20 +671,12 @@ int wd_agg_init(char *alg, __u32 sched_type, int task_type, struct wd_ctx_params while (ret != 0) { memset(&wd_agg_setting.config, 0, sizeof(struct wd_ctx_config_internal));
- /* Get alg driver and dev name */ - wd_agg_setting.driver = wd_alg_drv_bind(task_type, alg); - if (!wd_agg_setting.driver) { - WD_ERR("failed to bind %s driver.\n", alg); - goto out_dlopen; - } - + /* Init ctx param and prepare for ctx request */ agg_ctx_params.ctx_set_num = &agg_ctx_num; - ret = wd_ctx_param_init(&agg_ctx_params, ctx_params, wd_agg_setting.driver, + ret = wd_ctx_param_init_nw(&agg_ctx_params, ctx_params, alg, task_type, WD_AGG_TYPE, 1); if (ret) { if (ret == -WD_EAGAIN) { - wd_disable_drv(wd_agg_setting.driver); - wd_alg_drv_unbind(wd_agg_setting.driver); continue; } goto out_driver; @@ -681,15 +684,13 @@ int wd_agg_init(char *alg, __u32 sched_type, int task_type, struct wd_ctx_params
wd_agg_init_attrs.alg = alg; wd_agg_init_attrs.sched_type = sched_type; - wd_agg_init_attrs.driver = wd_agg_setting.driver; + wd_agg_init_attrs.task_type = task_type; wd_agg_init_attrs.ctx_params = &agg_ctx_params; wd_agg_init_attrs.alg_init = wd_agg_alg_init; wd_agg_init_attrs.alg_poll_ctx = wd_agg_poll_ctx; ret = wd_alg_attrs_init(&wd_agg_init_attrs); if (ret) { if (ret == -WD_ENODEV) { - wd_disable_drv(wd_agg_setting.driver); - wd_alg_drv_unbind(wd_agg_setting.driver); wd_ctx_param_uninit(&agg_ctx_params); continue; } @@ -697,17 +698,27 @@ int wd_agg_init(char *alg, __u32 sched_type, int task_type, struct wd_ctx_params goto out_params_uninit; } } + ret = wd_ctx_drv_config(alg, &wd_agg_setting.config); + if (ret) + goto out_uninit_nolock; + + ret = wd_alg_init_driver_nw(&wd_agg_setting.config); + if (ret) + goto out_drv_deconfig;
wd_alg_set_init(&wd_agg_setting.status); wd_ctx_param_uninit(&agg_ctx_params);
return WD_SUCCESS;
+out_drv_deconfig: + wd_ctx_drv_deconfig(&wd_agg_setting.config); +out_uninit_nolock: + wd_agg_alg_uninit(); + wd_alg_attrs_uninit(&wd_agg_init_attrs); out_params_uninit: wd_ctx_param_uninit(&agg_ctx_params); out_driver: - wd_alg_drv_unbind(wd_agg_setting.driver); -out_dlopen: wd_agg_close_driver(); out_uninit: wd_alg_clear_init(&wd_agg_setting.status); @@ -722,8 +733,9 @@ void wd_agg_uninit(void) if (ret) return;
- wd_alg_attrs_uninit(&wd_agg_init_attrs); - wd_alg_drv_unbind(wd_agg_setting.driver); + wd_alg_uninit_driver_nw(&wd_agg_setting.config); + wd_ctx_drv_deconfig(&wd_agg_setting.config); + wd_agg_close_driver(); wd_agg_setting.dlh_list = NULL; wd_alg_clear_init(&wd_agg_setting.status); @@ -1096,8 +1108,8 @@ static int wd_agg_sync_job(struct wd_agg_sess *sess, struct wd_agg_req *req, wd_dfx_msg_cnt(config, WD_CTX_CNT_NUM, idx); ctx = config->ctxs + idx;
- msg_handle.send = wd_agg_setting.driver->send; - msg_handle.recv = wd_agg_setting.driver->recv; + msg_handle.send = ctx->drv->send; + msg_handle.recv = ctx->drv->recv;
pthread_spin_lock(&ctx->lock); ret = wd_handle_msg_sync(&msg_handle, ctx->ctx, msg, NULL, config->epoll_en); @@ -1201,7 +1213,7 @@ static int wd_agg_async_job(struct wd_agg_sess *sess, struct wd_agg_req *req, bo else fill_request_msg_output(msg, req, sess, false); msg->tag = msg_id; - ret = wd_agg_setting.driver->send(ctx->ctx, msg); + ret = ctx->drv->send(ctx->ctx, msg); if (unlikely(ret < 0)) { if (ret != -WD_EBUSY) WD_ERR("wd agg async send err!\n"); @@ -1528,7 +1540,7 @@ static int wd_agg_poll_ctx(__u32 idx, __u32 expt, __u32 *count) ctx = config->ctxs + idx;
do { - ret = wd_agg_setting.driver->recv(ctx->ctx, &resp_msg); + ret = ctx->drv->recv(ctx->ctx, &resp_msg); if (ret == -WD_EAGAIN) { return ret; } else if (unlikely(ret < 0)) {
After the uadk framework updates the heterogeneous scheduling function, the internal implementation functions of the cipher algorithm need to be adapted and modified.
Signed-off-by: Longfang Liu liulongfang@huawei.com --- wd_cipher.c | 97 +++++++++++++++++++++++++---------------------------- 1 file changed, 45 insertions(+), 52 deletions(-)
diff --git a/wd_cipher.c b/wd_cipher.c index 239a55c..69bc63f 100644 --- a/wd_cipher.c +++ b/wd_cipher.c @@ -52,7 +52,6 @@ struct wd_cipher_setting { struct wd_ctx_config_internal config; struct wd_sched sched; struct wd_async_msg_pool pool; - struct wd_alg_driver *driver; void *priv; void *dlhandle; void *dlh_list; @@ -82,20 +81,16 @@ static void wd_cipher_close_driver(int init_type) }
if (wd_cipher_setting.dlhandle) { - wd_release_drv(wd_cipher_setting.driver); dlclose(wd_cipher_setting.dlhandle); wd_cipher_setting.dlhandle = NULL; } #else - wd_release_drv(wd_cipher_setting.driver); hisi_sec2_remove(); #endif }
static int wd_cipher_open_driver(int init_type) { - struct wd_alg_driver *driver = NULL; - const char *alg_name = "cbc(aes)"; #ifndef WD_STATIC_DRV char lib_path[PATH_MAX]; int ret; @@ -129,15 +124,6 @@ static int wd_cipher_open_driver(int init_type) if (init_type == WD_TYPE_V2) return WD_SUCCESS; #endif - driver = wd_request_drv(alg_name, false); - if (!driver) { - wd_cipher_close_driver(WD_TYPE_V1); - WD_ERR("failed to get %s driver support\n", alg_name); - return -WD_EINVAL; - } - - wd_cipher_setting.driver = driver; - return WD_SUCCESS; }
@@ -275,7 +261,7 @@ handle_t wd_cipher_alloc_sess(struct wd_cipher_sess_setup *setup) }
sess->alg_name = wd_cipher_alg_name[setup->alg][setup->mode]; - ret = wd_drv_alg_support(sess->alg_name, wd_cipher_setting.driver); + ret = wd_drv_alg_support(sess->alg_name, &wd_cipher_setting.config); if (!ret) { WD_ERR("failed to support this algorithm: %s!\n", sess->alg_name); goto err_sess; @@ -346,16 +332,10 @@ static int wd_cipher_common_init(struct wd_ctx_config *config, if (ret < 0) goto out_clear_sched;
- ret = wd_alg_init_driver(&wd_cipher_setting.config, - wd_cipher_setting.driver, - &wd_cipher_setting.priv); - if (ret) - goto out_clear_pool; + wd_cipher_setting.priv = STATUS_ENABLE;
return 0;
-out_clear_pool: - wd_uninit_async_request_pool(&wd_cipher_setting.pool); out_clear_sched: wd_clear_sched(&wd_cipher_setting.sched); out_clear_ctx_config: @@ -375,10 +355,7 @@ static int wd_cipher_common_uninit(void)
/* unset config, sched, driver */ wd_clear_sched(&wd_cipher_setting.sched); - - wd_alg_uninit_driver(&wd_cipher_setting.config, - wd_cipher_setting.driver, - &wd_cipher_setting.priv); + wd_cipher_setting.priv = NULL;
return 0; } @@ -405,10 +382,22 @@ int wd_cipher_init(struct wd_ctx_config *config, struct wd_sched *sched) if (ret) goto out_close_driver;
+ ret = wd_ctx_drv_config("ecb(aes)", &wd_cipher_setting.config); + if (ret) + goto out_uninit_nolock; + + ret = wd_alg_init_driver_nw(&wd_cipher_setting.config); + if (ret) + goto out_drv_deconfig; + wd_alg_set_init(&wd_cipher_setting.status);
return 0;
+out_drv_deconfig: + wd_ctx_drv_deconfig(&wd_cipher_setting.config); +out_uninit_nolock: + wd_cipher_common_uninit(); out_close_driver: wd_cipher_close_driver(WD_TYPE_V1); out_clear_init: @@ -420,6 +409,9 @@ void wd_cipher_uninit(void) { int ret;
+ wd_alg_uninit_driver_nw(&wd_cipher_setting.config); + wd_ctx_drv_deconfig(&wd_cipher_setting.config); + ret = wd_cipher_common_uninit(); if (ret) return; @@ -460,37 +452,26 @@ int wd_cipher_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_p while (ret != 0) { memset(&wd_cipher_setting.config, 0, sizeof(struct wd_ctx_config_internal));
- /* Get alg driver and dev name */ - wd_cipher_setting.driver = wd_alg_drv_bind(task_type, alg); - if (!wd_cipher_setting.driver) { - WD_ERR("failed to bind %s driver.\n", alg); - goto out_dlopen; - } - + /* Init ctx param and prepare for ctx request */ cipher_ctx_params.ctx_set_num = cipher_ctx_num; - ret = wd_ctx_param_init(&cipher_ctx_params, ctx_params, - wd_cipher_setting.driver, - WD_CIPHER_TYPE, WD_CIPHER_DECRYPTION + 1); + ret = wd_ctx_param_init_nw(&cipher_ctx_params, ctx_params, + alg, task_type, WD_CIPHER_TYPE, WD_CIPHER_DECRYPTION + 1); if (ret) { if (ret == -WD_EAGAIN) { - wd_disable_drv(wd_cipher_setting.driver); - wd_alg_drv_unbind(wd_cipher_setting.driver); continue; } - goto out_driver; + goto out_dlclose; }
wd_cipher_init_attrs.alg = alg; wd_cipher_init_attrs.sched_type = sched_type; - wd_cipher_init_attrs.driver = wd_cipher_setting.driver; + wd_cipher_init_attrs.task_type = task_type; wd_cipher_init_attrs.ctx_params = &cipher_ctx_params; wd_cipher_init_attrs.alg_init = wd_cipher_common_init; wd_cipher_init_attrs.alg_poll_ctx = wd_cipher_poll_ctx; ret = wd_alg_attrs_init(&wd_cipher_init_attrs); if (ret) { if (ret == -WD_ENODEV) { - wd_disable_drv(wd_cipher_setting.driver); - wd_alg_drv_unbind(wd_cipher_setting.driver); wd_ctx_param_uninit(&cipher_ctx_params); continue; } @@ -499,16 +480,28 @@ int wd_cipher_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_p } }
+ WD_ERR("ctxs numbers: %u.\n", wd_cipher_setting.config.ctx_num); + ret = wd_ctx_drv_config(alg, &wd_cipher_setting.config); + if (ret) + goto out_uninit_nolock; + + ret = wd_alg_init_driver_nw(&wd_cipher_setting.config); + if (ret) + goto out_drv_deconfig; + wd_alg_set_init(&wd_cipher_setting.status); wd_ctx_param_uninit(&cipher_ctx_params);
return 0;
+out_drv_deconfig: + wd_ctx_drv_deconfig(&wd_cipher_setting.config); +out_uninit_nolock: + wd_cipher_common_uninit(); + wd_alg_attrs_uninit(&wd_cipher_init_attrs); out_params_uninit: wd_ctx_param_uninit(&cipher_ctx_params); -out_driver: - wd_alg_drv_unbind(wd_cipher_setting.driver); -out_dlopen: +out_dlclose: wd_cipher_close_driver(WD_TYPE_V2); out_uninit: wd_alg_clear_init(&wd_cipher_setting.status); @@ -519,12 +512,12 @@ void wd_cipher_uninit2(void) { int ret;
+ wd_ctx_drv_deconfig(&wd_cipher_setting.config); ret = wd_cipher_common_uninit(); if (ret) return;
wd_alg_attrs_uninit(&wd_cipher_init_attrs); - wd_alg_drv_unbind(wd_cipher_setting.driver); wd_cipher_close_driver(WD_TYPE_V2); wd_cipher_setting.dlh_list = NULL; wd_alg_clear_init(&wd_cipher_setting.status); @@ -684,13 +677,13 @@ static int send_recv_sync(struct wd_ctx_internal *ctx, struct wd_msg_handle msg_handle; int ret;
- msg_handle.send = wd_cipher_setting.driver->send; - msg_handle.recv = wd_cipher_setting.driver->recv; + msg_handle.send = ctx->drv->send; + msg_handle.recv = ctx->drv->recv;
- wd_ctx_spin_lock(ctx, wd_cipher_setting.driver->calc_type); + wd_ctx_spin_lock(ctx, UADK_ALG_HW); ret = wd_handle_msg_sync(&msg_handle, ctx->ctx, msg, NULL, wd_cipher_setting.config.epoll_en); - wd_ctx_spin_unlock(ctx, wd_cipher_setting.driver->calc_type); + wd_ctx_spin_unlock(ctx, UADK_ALG_HW);
return ret; } @@ -764,7 +757,7 @@ int wd_do_cipher_async(handle_t h_sess, struct wd_cipher_req *req) fill_request_msg(msg, req, sess); msg->tag = msg_id;
- ret = wd_cipher_setting.driver->send(ctx->ctx, msg); + ret = ctx->drv->send(ctx->ctx, msg); if (unlikely(ret < 0)) { if (ret != -WD_EBUSY) WD_ERR("wd cipher async send err!\n"); @@ -813,7 +806,7 @@ int wd_cipher_poll_ctx(__u32 idx, __u32 expt, __u32 *count) ctx = config->ctxs + idx;
do { - ret = wd_cipher_setting.driver->recv(ctx->ctx, &resp_msg); + ret = ctx->drv->recv(ctx->ctx, &resp_msg); if (ret == -WD_EAGAIN) return ret; else if (ret < 0) {
After the uadk framework updates the heterogeneous scheduling function, the internal implementation functions of the comp algorithm need to be adapted and modified.
Signed-off-by: Longfang Liu liulongfang@huawei.com --- wd_comp.c | 84 ++++++++++++++++++++++++++----------------------------- 1 file changed, 40 insertions(+), 44 deletions(-)
diff --git a/wd_comp.c b/wd_comp.c index 4914350..52c711c 100644 --- a/wd_comp.c +++ b/wd_comp.c @@ -46,7 +46,6 @@ struct wd_comp_setting { struct wd_ctx_config_internal config; struct wd_sched sched; struct wd_async_msg_pool pool; - struct wd_alg_driver *driver; void *priv; void *dlhandle; void *dlh_list; @@ -64,20 +63,16 @@ static void wd_comp_close_driver(int init_type) }
if (wd_comp_setting.dlhandle) { - wd_release_drv(wd_comp_setting.driver); dlclose(wd_comp_setting.dlhandle); wd_comp_setting.dlhandle = NULL; } #else - wd_release_drv(wd_comp_setting.driver); hisi_zip_remove(); #endif }
static int wd_comp_open_driver(int init_type) { - struct wd_alg_driver *driver = NULL; - const char *alg_name = "zlib"; #ifndef WD_STATIC_DRV char lib_path[PATH_MAX]; int ret; @@ -111,14 +106,6 @@ static int wd_comp_open_driver(int init_type) if (init_type == WD_TYPE_V2) return WD_SUCCESS; #endif - driver = wd_request_drv(alg_name, false); - if (!driver) { - wd_comp_close_driver(WD_TYPE_V1); - WD_ERR("failed to get %s driver support\n", alg_name); - return -WD_EINVAL; - } - - wd_comp_setting.driver = driver;
return WD_SUCCESS; } @@ -166,16 +153,10 @@ static int wd_comp_init_nolock(struct wd_ctx_config *config, struct wd_sched *sc if (ret < 0) goto out_clear_sched;
- ret = wd_alg_init_driver(&wd_comp_setting.config, - wd_comp_setting.driver, - &wd_comp_setting.priv); - if (ret) - goto out_clear_pool; + wd_comp_setting.priv = STATUS_ENABLE;
return 0;
-out_clear_pool: - wd_uninit_async_request_pool(&wd_comp_setting.pool); out_clear_sched: wd_clear_sched(&wd_comp_setting.sched); out_clear_ctx_config: @@ -196,9 +177,7 @@ static int wd_comp_uninit_nolock(void) /* Unset config, sched, driver */ wd_clear_sched(&wd_comp_setting.sched);
- wd_alg_uninit_driver(&wd_comp_setting.config, - wd_comp_setting.driver, - &wd_comp_setting.priv); + wd_comp_setting.priv = NULL;
return 0; } @@ -225,10 +204,22 @@ int wd_comp_init(struct wd_ctx_config *config, struct wd_sched *sched) if (ret) goto out_clear_driver;
+ ret = wd_ctx_drv_config("zlib", &wd_comp_setting.config); + if (ret) + goto out_uninit_nolock; + + ret = wd_alg_init_driver_nw(&wd_comp_setting.config); + if (ret) + goto out_drv_deconfig; + wd_alg_set_init(&wd_comp_setting.status);
return 0;
+out_drv_deconfig: + wd_ctx_drv_deconfig(&wd_comp_setting.config); +out_uninit_nolock: + wd_comp_uninit_nolock(); out_clear_driver: wd_comp_close_driver(WD_TYPE_V1); out_clear_init: @@ -240,6 +231,9 @@ void wd_comp_uninit(void) { int ret;
+ wd_alg_uninit_driver_nw(&wd_comp_setting.config); + wd_ctx_drv_deconfig(&wd_comp_setting.config); + ret = wd_comp_uninit_nolock(); if (ret) return; @@ -280,36 +274,26 @@ int wd_comp_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_par while (ret != 0) { memset(&wd_comp_setting.config, 0, sizeof(struct wd_ctx_config_internal));
- /* Get alg driver and dev name */ - wd_comp_setting.driver = wd_alg_drv_bind(task_type, alg); - if (!wd_comp_setting.driver) { - WD_ERR("failed to bind %s driver.\n", alg); - goto out_dlclose; - } - + /* Init ctx param and prepare for ctx request */ comp_ctx_params.ctx_set_num = comp_ctx_num; - ret = wd_ctx_param_init(&comp_ctx_params, ctx_params, - wd_comp_setting.driver, WD_COMP_TYPE, WD_DIR_MAX); + ret = wd_ctx_param_init_nw(&comp_ctx_params, ctx_params, + alg, task_type, WD_COMP_TYPE, WD_DIR_MAX); if (ret) { if (ret == -WD_EAGAIN) { - wd_disable_drv(wd_comp_setting.driver); - wd_alg_drv_unbind(wd_comp_setting.driver); continue; } - goto out_unbind_drv; + goto out_dlclose; }
wd_comp_init_attrs.alg = alg; wd_comp_init_attrs.sched_type = sched_type; - wd_comp_init_attrs.driver = wd_comp_setting.driver; + wd_comp_init_attrs.task_type = task_type; wd_comp_init_attrs.ctx_params = &comp_ctx_params; wd_comp_init_attrs.alg_init = wd_comp_init_nolock; wd_comp_init_attrs.alg_poll_ctx = wd_comp_poll_ctx; ret = wd_alg_attrs_init(&wd_comp_init_attrs); if (ret) { if (ret == -WD_ENODEV) { - wd_disable_drv(wd_comp_setting.driver); - wd_alg_drv_unbind(wd_comp_setting.driver); wd_ctx_param_uninit(&comp_ctx_params); continue; } @@ -318,15 +302,26 @@ int wd_comp_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_par } }
+ ret = wd_ctx_drv_config(alg, &wd_comp_setting.config); + if (ret) + goto out_uninit_nolock; + + ret = wd_alg_init_driver_nw(&wd_comp_setting.config); + if (ret) + goto out_drv_deconfig; + wd_alg_set_init(&wd_comp_setting.status); wd_ctx_param_uninit(&comp_ctx_params);
return 0;
+out_drv_deconfig: + wd_ctx_drv_deconfig(&wd_comp_setting.config); +out_uninit_nolock: + wd_comp_uninit_nolock(); + wd_alg_attrs_uninit(&wd_comp_init_attrs); out_params_uninit: wd_ctx_param_uninit(&comp_ctx_params); -out_unbind_drv: - wd_alg_drv_unbind(wd_comp_setting.driver); out_dlclose: wd_comp_close_driver(WD_TYPE_V2); out_uninit: @@ -338,12 +333,13 @@ void wd_comp_uninit2(void) { int ret;
+ wd_ctx_drv_deconfig(&wd_comp_setting.config); ret = wd_comp_uninit_nolock(); if (ret) return;
wd_alg_attrs_uninit(&wd_comp_init_attrs); - wd_alg_drv_unbind(wd_comp_setting.driver); + wd_comp_close_driver(WD_TYPE_V2); wd_comp_setting.dlh_list = NULL; wd_alg_clear_init(&wd_comp_setting.status); @@ -379,7 +375,7 @@ int wd_comp_poll_ctx(__u32 idx, __u32 expt, __u32 *count) ctx = config->ctxs + idx;
do { - ret = wd_comp_setting.driver->recv(ctx->ctx, &resp_msg); + ret = ctx->drv->recv(ctx->ctx, &resp_msg); if (unlikely(ret < 0)) { if (ret == -WD_HW_EACCESS) WD_ERR("wd comp recv hw error!\n"); @@ -609,8 +605,8 @@ static int wd_comp_sync_job(struct wd_comp_sess *sess, wd_dfx_msg_cnt(config, WD_CTX_CNT_NUM, idx); ctx = config->ctxs + idx;
- msg_handle.send = wd_comp_setting.driver->send; - msg_handle.recv = wd_comp_setting.driver->recv; + msg_handle.send = ctx->drv->send; + msg_handle.recv = ctx->drv->recv;
pthread_spin_lock(&ctx->lock); ret = wd_handle_msg_sync(&msg_handle, ctx->ctx, msg,
After the uadk framework updates the heterogeneous scheduling function, the internal implementation functions of the dh algorithm need to be adapted and modified.
Signed-off-by: Longfang Liu liulongfang@huawei.com --- wd_dh.c | 87 ++++++++++++++++++++++++++------------------------------- 1 file changed, 40 insertions(+), 47 deletions(-)
diff --git a/wd_dh.c b/wd_dh.c index 82bbf7f..55cced7 100644 --- a/wd_dh.c +++ b/wd_dh.c @@ -33,7 +33,6 @@ static struct wd_dh_setting { struct wd_ctx_config_internal config; struct wd_sched sched; struct wd_async_msg_pool pool; - struct wd_alg_driver *driver; void *priv; void *dlhandle; void *dlh_list; @@ -53,19 +52,15 @@ static void wd_dh_close_driver(int init_type) if (!wd_dh_setting.dlhandle) return;
- wd_release_drv(wd_dh_setting.driver); dlclose(wd_dh_setting.dlhandle); wd_dh_setting.dlhandle = NULL; #else - wd_release_drv(wd_dh_setting.driver); hisi_hpre_remove(); #endif }
static int wd_dh_open_driver(int init_type) { - struct wd_alg_driver *driver = NULL; - const char *alg_name = "dh"; #ifndef WD_STATIC_DRV char lib_path[PATH_MAX]; int ret; @@ -99,14 +94,6 @@ static int wd_dh_open_driver(int init_type) if (init_type == WD_TYPE_V2) return WD_SUCCESS; #endif - driver = wd_request_drv(alg_name, false); - if (!driver) { - wd_dh_close_driver(WD_TYPE_V1); - WD_ERR("failed to get %s driver support\n", alg_name); - return -WD_EINVAL; - } - - wd_dh_setting.driver = driver;
return WD_SUCCESS; } @@ -140,16 +127,10 @@ static int wd_dh_common_init(struct wd_ctx_config *config, struct wd_sched *sche if (ret) goto out_clear_sched;
- ret = wd_alg_init_driver(&wd_dh_setting.config, - wd_dh_setting.driver, - &wd_dh_setting.priv); - if (ret) - goto out_clear_pool; + wd_dh_setting.priv = STATUS_ENABLE;
return WD_SUCCESS;
-out_clear_pool: - wd_uninit_async_request_pool(&wd_dh_setting.pool); out_clear_sched: wd_clear_sched(&wd_dh_setting.sched); out_clear_ctx_config: @@ -169,9 +150,7 @@ static int wd_dh_common_uninit(void)
/* unset config, sched, driver */ wd_clear_sched(&wd_dh_setting.sched); - wd_alg_uninit_driver(&wd_dh_setting.config, - wd_dh_setting.driver, - &wd_dh_setting.priv); + wd_dh_setting.priv = NULL;
return WD_SUCCESS; } @@ -198,10 +177,22 @@ int wd_dh_init(struct wd_ctx_config *config, struct wd_sched *sched) if (ret) goto out_close_driver;
+ ret = wd_ctx_drv_config("dh", &wd_dh_setting.config); + if (ret) + goto out_uninit_nolock; + + ret = wd_alg_init_driver_nw(&wd_dh_setting.config); + if (ret) + goto out_drv_deconfig; + wd_alg_set_init(&wd_dh_setting.status);
return WD_SUCCESS;
+out_drv_deconfig: + wd_ctx_drv_deconfig(&wd_dh_setting.config); +out_uninit_nolock: + wd_dh_common_uninit(); out_close_driver: wd_dh_close_driver(WD_TYPE_V1); out_clear_init: @@ -217,6 +208,9 @@ void wd_dh_uninit(void) if (ret) return;
+ wd_alg_uninit_driver_nw(&wd_dh_setting.config); + wd_ctx_drv_deconfig(&wd_dh_setting.config); + wd_dh_close_driver(WD_TYPE_V1); wd_alg_clear_init(&wd_dh_setting.status); } @@ -251,37 +245,25 @@ int wd_dh_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_param while (ret) { memset(&wd_dh_setting.config, 0, sizeof(struct wd_ctx_config_internal));
- /* Get alg driver and dev name */ - wd_dh_setting.driver = wd_alg_drv_bind(task_type, alg); - if (!wd_dh_setting.driver) { - WD_ERR("fail to bind a valid driver.\n"); - ret = -WD_EINVAL; - goto out_dlopen; - } - + /* Init ctx param and prepare for ctx request */ dh_ctx_params.ctx_set_num = dh_ctx_num; - ret = wd_ctx_param_init(&dh_ctx_params, ctx_params, - wd_dh_setting.driver, WD_DH_TYPE, WD_DH_PHASE2); + ret = wd_ctx_param_init_nw(&dh_ctx_params, ctx_params, + alg, task_type, WD_DH_TYPE, WD_DH_PHASE2); if (ret) { - if (ret == -WD_EAGAIN) { - wd_disable_drv(wd_dh_setting.driver); - wd_alg_drv_unbind(wd_dh_setting.driver); + if (ret == -WD_EAGAIN) continue; - } goto out_driver; }
wd_dh_init_attrs.alg = alg; wd_dh_init_attrs.sched_type = sched_type; - wd_dh_init_attrs.driver = wd_dh_setting.driver; + wd_dh_init_attrs.task_type = task_type; wd_dh_init_attrs.ctx_params = &dh_ctx_params; wd_dh_init_attrs.alg_init = wd_dh_common_init; wd_dh_init_attrs.alg_poll_ctx = wd_dh_poll_ctx; ret = wd_alg_attrs_init(&wd_dh_init_attrs); if (ret) { if (ret == -WD_ENODEV) { - wd_disable_drv(wd_dh_setting.driver); - wd_alg_drv_unbind(wd_dh_setting.driver); wd_ctx_param_uninit(&dh_ctx_params); continue; } @@ -290,16 +272,27 @@ int wd_dh_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_param } }
+ ret = wd_ctx_drv_config(alg, &wd_dh_setting.config); + if (ret) + goto out_uninit_nolock; + + ret = wd_alg_init_driver_nw(&wd_dh_setting.config); + if (ret) + goto out_drv_deconfig; + wd_alg_set_init(&wd_dh_setting.status); wd_ctx_param_uninit(&dh_ctx_params);
return WD_SUCCESS;
+out_drv_deconfig: + wd_ctx_drv_deconfig(&wd_dh_setting.config); +out_uninit_nolock: + wd_dh_common_uninit(); + wd_alg_attrs_uninit(&wd_dh_init_attrs); out_params_uninit: wd_ctx_param_uninit(&dh_ctx_params); out_driver: - wd_alg_drv_unbind(wd_dh_setting.driver); -out_dlopen: wd_dh_close_driver(WD_TYPE_V2); out_clear_init: wd_alg_clear_init(&wd_dh_setting.status); @@ -314,8 +307,8 @@ void wd_dh_uninit2(void) if (ret) return;
+ wd_ctx_drv_deconfig(&wd_dh_setting.config); wd_alg_attrs_uninit(&wd_dh_init_attrs); - wd_alg_drv_unbind(wd_dh_setting.driver); wd_dh_close_driver(WD_TYPE_V2); wd_dh_setting.dlh_list = NULL; wd_alg_clear_init(&wd_dh_setting.status); @@ -383,8 +376,8 @@ int wd_do_dh_sync(handle_t sess, struct wd_dh_req *req) if (unlikely(ret)) return ret;
- msg_handle.send = wd_dh_setting.driver->send; - msg_handle.recv = wd_dh_setting.driver->recv; + msg_handle.send = ctx->drv->send; + msg_handle.recv = ctx->drv->recv;
pthread_spin_lock(&ctx->lock); ret = wd_handle_msg_sync(&msg_handle, ctx->ctx, &msg, &balance, @@ -434,7 +427,7 @@ int wd_do_dh_async(handle_t sess, struct wd_dh_req *req) goto fail_with_msg; msg->tag = mid;
- ret = wd_dh_setting.driver->send(ctx->ctx, msg); + ret = ctx->drv->send(ctx->ctx, msg); if (unlikely(ret)) { if (ret != -WD_EBUSY) WD_ERR("failed to send dh BD, hw is err!\n"); @@ -485,7 +478,7 @@ int wd_dh_poll_ctx(__u32 idx, __u32 expt, __u32 *count) ctx = config->ctxs + idx;
do { - ret = wd_dh_setting.driver->recv(ctx->ctx, &rcv_msg); + ret = ctx->drv->recv(ctx->ctx, &rcv_msg); if (ret == -WD_EAGAIN) { return ret; } else if (unlikely(ret)) {
After the uadk framework updates the heterogeneous scheduling function, the internal implementation functions of the digest algorithm need to be adapted and modified.
Signed-off-by: Longfang Liu liulongfang@huawei.com --- wd_digest.c | 106 +++++++++++++++++++++++++--------------------------- 1 file changed, 50 insertions(+), 56 deletions(-)
diff --git a/wd_digest.c b/wd_digest.c index 379974a..e8113a7 100644 --- a/wd_digest.c +++ b/wd_digest.c @@ -40,7 +40,6 @@ struct wd_digest_setting { enum wd_status status; struct wd_ctx_config_internal config; struct wd_sched sched; - struct wd_alg_driver *driver; struct wd_async_msg_pool pool; void *priv; void *dlhandle; @@ -83,20 +82,16 @@ static void wd_digest_close_driver(int init_type) }
if (wd_digest_setting.dlhandle) { - wd_release_drv(wd_digest_setting.driver); dlclose(wd_digest_setting.dlhandle); wd_digest_setting.dlhandle = NULL; } #else - wd_release_drv(wd_digest_setting.driver); hisi_sec2_remove(); #endif }
static int wd_digest_open_driver(int init_type) { - struct wd_alg_driver *driver = NULL; - const char *alg_name = "sm3"; #ifndef WD_STATIC_DRV char lib_path[PATH_MAX]; int ret; @@ -130,14 +125,6 @@ static int wd_digest_open_driver(int init_type) if (init_type == WD_TYPE_V2) return WD_SUCCESS; #endif - driver = wd_request_drv(alg_name, false); - if (!driver) { - wd_digest_close_driver(WD_TYPE_V1); - WD_ERR("failed to get %s driver support\n", alg_name); - return -WD_EINVAL; - } - - wd_digest_setting.driver = driver;
return WD_SUCCESS; } @@ -210,7 +197,7 @@ handle_t wd_digest_alloc_sess(struct wd_digest_sess_setup *setup) sess->alg_name = wd_digest_alg_name[setup->alg]; sess->alg = setup->alg; sess->mode = setup->mode; - ret = wd_drv_alg_support(sess->alg_name, wd_digest_setting.driver); + ret = wd_drv_alg_support(sess->alg_name, &wd_digest_setting.config); if (!ret) { WD_ERR("failed to support this algorithm: %s!\n", sess->alg_name); goto err_sess; @@ -277,16 +264,10 @@ static int wd_digest_init_nolock(struct wd_ctx_config *config, if (ret < 0) goto out_clear_sched;
- ret = wd_alg_init_driver(&wd_digest_setting.config, - wd_digest_setting.driver, - &wd_digest_setting.priv); - if (ret) - goto out_clear_pool; + wd_digest_setting.priv = STATUS_ENABLE;
return 0;
-out_clear_pool: - wd_uninit_async_request_pool(&wd_digest_setting.pool); out_clear_sched: wd_clear_sched(&wd_digest_setting.sched); out_clear_ctx_config: @@ -295,6 +276,13 @@ out_clear_ctx_config: return ret; }
+static void wd_digest_uninit_nolock(void) +{ + wd_uninit_async_request_pool(&wd_digest_setting.pool); + wd_clear_sched(&wd_digest_setting.sched); + wd_digest_setting.priv = NULL; +} + int wd_digest_init(struct wd_ctx_config *config, struct wd_sched *sched) { int ret; @@ -317,10 +305,22 @@ int wd_digest_init(struct wd_ctx_config *config, struct wd_sched *sched) if (ret) goto out_close_driver;
+ ret = wd_ctx_drv_config("sm3", &wd_digest_setting.config); + if (ret) + goto out_uninit_nolock; + + ret = wd_alg_init_driver_nw(&wd_digest_setting.config); + if (ret) + goto out_drv_deconfig; + wd_alg_set_init(&wd_digest_setting.status);
return 0;
+out_drv_deconfig: + wd_ctx_drv_deconfig(&wd_digest_setting.config); +out_uninit_nolock: + wd_digest_uninit_nolock(); out_close_driver: wd_digest_close_driver(WD_TYPE_V1); out_clear_init: @@ -328,20 +328,14 @@ out_clear_init: return ret; }
-static void wd_digest_uninit_nolock(void) -{ - wd_uninit_async_request_pool(&wd_digest_setting.pool); - wd_clear_sched(&wd_digest_setting.sched); - wd_alg_uninit_driver(&wd_digest_setting.config, - wd_digest_setting.driver, - &wd_digest_setting.priv); -} - void wd_digest_uninit(void) { if (!wd_digest_setting.priv) return;
+ wd_alg_uninit_driver_nw(&wd_digest_setting.config); + wd_ctx_drv_deconfig(&wd_digest_setting.config); + wd_digest_uninit_nolock(); wd_digest_close_driver(WD_TYPE_V1); wd_alg_clear_init(&wd_digest_setting.status); @@ -389,36 +383,26 @@ int wd_digest_init2_(char *alg, __u32 sched_type, int task_type, while (ret != 0) { memset(&wd_digest_setting.config, 0, sizeof(struct wd_ctx_config_internal));
- /* Get alg driver and dev name */ - wd_digest_setting.driver = wd_alg_drv_bind(task_type, alg); - if (!wd_digest_setting.driver) { - WD_ERR("failed to bind %s driver.\n", alg); - goto out_dlopen; - } - + /* Init ctx param and prepare for ctx request */ digest_ctx_params.ctx_set_num = &digest_ctx_num; - ret = wd_ctx_param_init(&digest_ctx_params, ctx_params, - wd_digest_setting.driver, WD_DIGEST_TYPE, 1); + ret = wd_ctx_param_init_nw(&digest_ctx_params, ctx_params, + alg, task_type, WD_DIGEST_TYPE, 1); if (ret) { if (ret == -WD_EAGAIN) { - wd_disable_drv(wd_digest_setting.driver); - wd_alg_drv_unbind(wd_digest_setting.driver); continue; } - goto out_driver; + goto out_dlclose; }
wd_digest_init_attrs.alg = alg; wd_digest_init_attrs.sched_type = sched_type; - wd_digest_init_attrs.driver = wd_digest_setting.driver; + wd_digest_init_attrs.task_type = task_type; wd_digest_init_attrs.ctx_params = &digest_ctx_params; wd_digest_init_attrs.alg_init = wd_digest_init_nolock; wd_digest_init_attrs.alg_poll_ctx = wd_digest_poll_ctx; ret = wd_alg_attrs_init(&wd_digest_init_attrs); if (ret) { if (ret == -WD_ENODEV) { - wd_disable_drv(wd_digest_setting.driver); - wd_alg_drv_unbind(wd_digest_setting.driver); wd_ctx_param_uninit(&digest_ctx_params); continue; } @@ -426,16 +410,27 @@ int wd_digest_init2_(char *alg, __u32 sched_type, int task_type, goto out_params_uninit; } } + ret = wd_ctx_drv_config(alg, &wd_digest_setting.config); + if (ret) + goto out_uninit_nolock; + + ret = wd_alg_init_driver_nw(&wd_digest_setting.config); + if (ret) + goto out_drv_deconfig; + wd_alg_set_init(&wd_digest_setting.status); wd_ctx_param_uninit(&digest_ctx_params);
return 0;
+out_drv_deconfig: + wd_ctx_drv_deconfig(&wd_digest_setting.config); +out_uninit_nolock: + wd_digest_uninit_nolock(); + wd_alg_attrs_uninit(&wd_digest_init_attrs); out_params_uninit: wd_ctx_param_uninit(&digest_ctx_params); -out_driver: - wd_alg_drv_unbind(wd_digest_setting.driver); -out_dlopen: +out_dlclose: wd_digest_close_driver(WD_TYPE_V2); out_uninit: wd_alg_clear_init(&wd_digest_setting.status); @@ -447,9 +442,9 @@ void wd_digest_uninit2(void) if (!wd_digest_setting.priv) return;
+ wd_ctx_drv_deconfig(&wd_digest_setting.config); wd_digest_uninit_nolock(); wd_alg_attrs_uninit(&wd_digest_init_attrs); - wd_alg_drv_unbind(wd_digest_setting.driver); wd_digest_close_driver(WD_TYPE_V2); wd_digest_setting.dlh_list = NULL; wd_alg_clear_init(&wd_digest_setting.status); @@ -606,13 +601,13 @@ static int send_recv_sync(struct wd_ctx_internal *ctx, struct wd_digest_sess *ds struct wd_msg_handle msg_handle; int ret;
- msg_handle.send = wd_digest_setting.driver->send; - msg_handle.recv = wd_digest_setting.driver->recv; + msg_handle.send = ctx->drv->send; + msg_handle.recv = ctx->drv->recv;
- wd_ctx_spin_lock(ctx, wd_digest_setting.driver->calc_type); + wd_ctx_spin_lock(ctx, UADK_ALG_HW); ret = wd_handle_msg_sync(&msg_handle, ctx->ctx, msg, NULL, wd_digest_setting.config.epoll_en); - wd_ctx_spin_unlock(ctx, wd_digest_setting.driver->calc_type); + wd_ctx_spin_unlock(ctx, UADK_ALG_HW); if (unlikely(ret)) return ret;
@@ -705,7 +700,7 @@ int wd_do_digest_async(handle_t h_sess, struct wd_digest_req *req) fill_request_msg(msg, req, dsess); msg->tag = msg_id;
- ret = wd_digest_setting.driver->send(ctx->ctx, msg); + ret = ctx->drv->send(ctx->ctx, msg); if (unlikely(ret < 0)) { if (ret != -WD_EBUSY) WD_ERR("failed to send BD, hw is err!\n"); @@ -754,8 +749,7 @@ int wd_digest_poll_ctx(__u32 idx, __u32 expt, __u32 *count) ctx = config->ctxs + idx;
do { - ret = wd_digest_setting.driver->recv(ctx->ctx, - &recv_msg); + ret = ctx->drv->recv(ctx->ctx, &recv_msg); if (ret == -WD_EAGAIN) { return ret; } else if (ret < 0) {
After the uadk framework updates the heterogeneous scheduling function, the internal implementation functions of the ECC algorithm need to be adapted and modified.
Signed-off-by: Longfang Liu liulongfang@huawei.com --- wd_ecc.c | 89 ++++++++++++++++++++++++++------------------------------ 1 file changed, 41 insertions(+), 48 deletions(-)
diff --git a/wd_ecc.c b/wd_ecc.c index 80a2679..e63d66d 100644 --- a/wd_ecc.c +++ b/wd_ecc.c @@ -64,7 +64,6 @@ static struct wd_ecc_setting { struct wd_ctx_config_internal config; struct wd_sched sched; struct wd_async_msg_pool pool; - struct wd_alg_driver *driver; void *priv; void *dlhandle; void *dlh_list; @@ -107,19 +106,15 @@ static void wd_ecc_close_driver(int init_type) if (!wd_ecc_setting.dlhandle) return;
- wd_release_drv(wd_ecc_setting.driver); dlclose(wd_ecc_setting.dlhandle); wd_ecc_setting.dlhandle = NULL; #else - wd_release_drv(wd_ecc_setting.driver); hisi_hpre_remove(); #endif }
static int wd_ecc_open_driver(int init_type) { - struct wd_alg_driver *driver = NULL; - const char *alg_name = "sm2"; #ifndef WD_STATIC_DRV char lib_path[PATH_MAX]; int ret; @@ -153,14 +148,6 @@ static int wd_ecc_open_driver(int init_type) if (init_type == WD_TYPE_V2) return WD_SUCCESS; #endif - driver = wd_request_drv(alg_name, false); - if (!driver) { - wd_ecc_close_driver(WD_TYPE_V1); - WD_ERR("failed to get %s driver support\n", alg_name); - return -WD_EINVAL; - } - - wd_ecc_setting.driver = driver;
return WD_SUCCESS; } @@ -203,16 +190,10 @@ static int wd_ecc_common_init(struct wd_ctx_config *config, struct wd_sched *sch if (ret < 0) goto out_clear_sched;
- ret = wd_alg_init_driver(&wd_ecc_setting.config, - wd_ecc_setting.driver, - &wd_ecc_setting.priv); - if (ret) - goto out_clear_pool; + wd_ecc_setting.priv = STATUS_ENABLE;
return WD_SUCCESS;
-out_clear_pool: - wd_uninit_async_request_pool(&wd_ecc_setting.pool); out_clear_sched: wd_clear_sched(&wd_ecc_setting.sched); out_clear_ctx_config: @@ -232,9 +213,7 @@ static int wd_ecc_common_uninit(void)
/* unset config, sched, driver */ wd_clear_sched(&wd_ecc_setting.sched); - wd_alg_uninit_driver(&wd_ecc_setting.config, - wd_ecc_setting.driver, - &wd_ecc_setting.priv); + wd_ecc_setting.priv = NULL;
return WD_SUCCESS; } @@ -261,10 +240,22 @@ int wd_ecc_init(struct wd_ctx_config *config, struct wd_sched *sched) if (ret) goto out_close_driver;
+ ret = wd_ctx_drv_config("sm2", &wd_ecc_setting.config); + if (ret) + goto out_uninit_nolock; + + ret = wd_alg_init_driver_nw(&wd_ecc_setting.config); + if (ret) + goto out_drv_deconfig; + wd_alg_set_init(&wd_ecc_setting.status);
return WD_SUCCESS;
+out_drv_deconfig: + wd_ctx_drv_deconfig(&wd_ecc_setting.config); +out_uninit_nolock: + wd_ecc_common_uninit(); out_close_driver: wd_ecc_close_driver(WD_TYPE_V1); out_clear_init: @@ -276,6 +267,8 @@ void wd_ecc_uninit(void) { int ret;
+ wd_alg_uninit_driver_nw(&wd_ecc_setting.config); + wd_ctx_drv_deconfig(&wd_ecc_setting.config); ret = wd_ecc_common_uninit(); if (ret) return; @@ -316,37 +309,26 @@ int wd_ecc_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_para while (ret) { memset(&wd_ecc_setting.config, 0, sizeof(struct wd_ctx_config_internal));
- /* Get alg driver and dev name */ - wd_ecc_setting.driver = wd_alg_drv_bind(task_type, alg); - if (!wd_ecc_setting.driver) { - WD_ERR("failed to bind a valid driver!\n"); - ret = -WD_EINVAL; - goto out_dlopen; - } - + /* Init ctx param and prepare for ctx request */ ecc_ctx_params.ctx_set_num = ecc_ctx_num; - ret = wd_ctx_param_init(&ecc_ctx_params, ctx_params, - wd_ecc_setting.driver, WD_ECC_TYPE, WD_EC_OP_MAX); + ret = wd_ctx_param_init_nw(&ecc_ctx_params, ctx_params, + alg, task_type, WD_ECC_TYPE, WD_EC_OP_MAX); if (ret) { - if (ret == -WD_EAGAIN) { - wd_disable_drv(wd_ecc_setting.driver); - wd_alg_drv_unbind(wd_ecc_setting.driver); + if (ret == -WD_EAGAIN) continue; - } + goto out_driver; }
wd_ecc_init_attrs.alg = alg; wd_ecc_init_attrs.sched_type = sched_type; - wd_ecc_init_attrs.driver = wd_ecc_setting.driver; + wd_ecc_init_attrs.task_type = task_type; wd_ecc_init_attrs.ctx_params = &ecc_ctx_params; wd_ecc_init_attrs.alg_init = wd_ecc_common_init; wd_ecc_init_attrs.alg_poll_ctx = wd_ecc_poll_ctx; ret = wd_alg_attrs_init(&wd_ecc_init_attrs); if (ret) { if (ret == -WD_ENODEV) { - wd_disable_drv(wd_ecc_setting.driver); - wd_alg_drv_unbind(wd_ecc_setting.driver); wd_ctx_param_uninit(&ecc_ctx_params); continue; } @@ -355,16 +337,27 @@ int wd_ecc_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_para } }
+ ret = wd_ctx_drv_config(alg, &wd_ecc_setting.config); + if (ret) + goto out_uninit_nolock; + + ret = wd_alg_init_driver_nw(&wd_ecc_setting.config); + if (ret) + goto out_drv_deconfig; + wd_alg_set_init(&wd_ecc_setting.status); wd_ctx_param_uninit(&ecc_ctx_params);
return WD_SUCCESS;
+out_drv_deconfig: + wd_ctx_drv_deconfig(&wd_ecc_setting.config); +out_uninit_nolock: + wd_ecc_common_uninit(); + wd_alg_attrs_uninit(&wd_ecc_init_attrs); out_params_uninit: wd_ctx_param_uninit(&ecc_ctx_params); out_driver: - wd_alg_drv_unbind(wd_ecc_setting.driver); -out_dlopen: wd_ecc_close_driver(WD_TYPE_V2); out_clear_init: wd_alg_clear_init(&wd_ecc_setting.status); @@ -375,12 +368,12 @@ void wd_ecc_uninit2(void) { int ret;
+ wd_ctx_drv_deconfig(&wd_ecc_setting.config); ret = wd_ecc_common_uninit(); if (ret) return;
wd_alg_attrs_uninit(&wd_ecc_init_attrs); - wd_alg_drv_unbind(wd_ecc_setting.driver); wd_ecc_close_driver(WD_TYPE_V2); wd_ecc_setting.dlh_list = NULL; wd_alg_clear_init(&wd_ecc_setting.status); @@ -1176,7 +1169,7 @@ handle_t wd_ecc_alloc_sess(struct wd_ecc_sess_setup *setup) if (setup_param_check(setup)) return (handle_t)0;
- ret = wd_drv_alg_support(setup->alg, wd_ecc_setting.driver); + ret = wd_drv_alg_support(setup->alg, &wd_ecc_setting.config); if (!ret) { WD_ERR("failed to support this algorithm: %s!\n", setup->alg); return (handle_t)0; @@ -1575,8 +1568,8 @@ int wd_do_ecc_sync(handle_t h_sess, struct wd_ecc_req *req) if (unlikely(ret)) return ret;
- msg_handle.send = wd_ecc_setting.driver->send; - msg_handle.recv = wd_ecc_setting.driver->recv; + msg_handle.send = ctx->drv->send; + msg_handle.recv = ctx->drv->recv;
pthread_spin_lock(&ctx->lock); ret = wd_handle_msg_sync(&msg_handle, ctx->ctx, &msg, &balance, @@ -2267,7 +2260,7 @@ int wd_do_ecc_async(handle_t sess, struct wd_ecc_req *req) goto fail_with_msg; msg->tag = mid;
- ret = wd_ecc_setting.driver->send(ctx->ctx, msg); + ret = ctx->drv->send(ctx->ctx, msg); if (unlikely(ret)) { if (ret != -WD_EBUSY) WD_ERR("failed to send ecc BD, hw is err!\n"); @@ -2316,7 +2309,7 @@ int wd_ecc_poll_ctx(__u32 idx, __u32 expt, __u32 *count) ctx = config->ctxs + idx;
do { - ret = wd_ecc_setting.driver->recv(ctx->ctx, &recv_msg); + ret = ctx->drv->recv(ctx->ctx, &recv_msg); if (ret == -WD_EAGAIN) { return ret; } else if (ret < 0) {
After the uadk framework updates the heterogeneous scheduling function, the internal implementation functions of the rsa algorithm need to be adapted and modified.
Signed-off-by: Longfang Liu liulongfang@huawei.com --- wd_rsa.c | 87 ++++++++++++++++++++++++++------------------------------ 1 file changed, 40 insertions(+), 47 deletions(-)
diff --git a/wd_rsa.c b/wd_rsa.c index b1458da..80acf58 100644 --- a/wd_rsa.c +++ b/wd_rsa.c @@ -74,7 +74,6 @@ static struct wd_rsa_setting { struct wd_ctx_config_internal config; struct wd_sched sched; struct wd_async_msg_pool pool; - struct wd_alg_driver *driver; void *priv; void *dlhandle; void *dlh_list; @@ -94,19 +93,15 @@ static void wd_rsa_close_driver(int init_type) if (!wd_rsa_setting.dlhandle) return;
- wd_release_drv(wd_rsa_setting.driver); dlclose(wd_rsa_setting.dlhandle); wd_rsa_setting.dlhandle = NULL; #else - wd_release_drv(wd_rsa_setting.driver); hisi_hpre_remove(); #endif }
static int wd_rsa_open_driver(int init_type) { - struct wd_alg_driver *driver = NULL; - const char *alg_name = "rsa"; #ifndef WD_STATIC_DRV char lib_path[PATH_MAX]; int ret; @@ -140,14 +135,6 @@ static int wd_rsa_open_driver(int init_type) if (init_type == WD_TYPE_V2) return WD_SUCCESS; #endif - driver = wd_request_drv(alg_name, false); - if (!driver) { - wd_rsa_close_driver(WD_TYPE_V1); - WD_ERR("failed to get %s driver support!\n", alg_name); - return -WD_EINVAL; - } - - wd_rsa_setting.driver = driver;
return WD_SUCCESS; } @@ -180,16 +167,10 @@ static int wd_rsa_common_init(struct wd_ctx_config *config, struct wd_sched *sch if (ret < 0) goto out_clear_sched;
- ret = wd_alg_init_driver(&wd_rsa_setting.config, - wd_rsa_setting.driver, - &wd_rsa_setting.priv); - if (ret) - goto out_clear_pool; + wd_rsa_setting.priv = STATUS_ENABLE;
return WD_SUCCESS;
-out_clear_pool: - wd_uninit_async_request_pool(&wd_rsa_setting.pool); out_clear_sched: wd_clear_sched(&wd_rsa_setting.sched); out_clear_ctx_config: @@ -209,9 +190,7 @@ static int wd_rsa_common_uninit(void)
/* unset config, sched, driver */ wd_clear_sched(&wd_rsa_setting.sched); - wd_alg_uninit_driver(&wd_rsa_setting.config, - wd_rsa_setting.driver, - &wd_rsa_setting.priv); + wd_rsa_setting.priv = NULL;
return WD_SUCCESS; } @@ -238,10 +217,22 @@ int wd_rsa_init(struct wd_ctx_config *config, struct wd_sched *sched) if (ret) goto out_close_driver;
+ ret = wd_ctx_drv_config("rsa", &wd_rsa_setting.config); + if (ret) + goto out_uninit_nolock; + + ret = wd_alg_init_driver_nw(&wd_rsa_setting.config); + if (ret) + goto out_drv_deconfig; + wd_alg_set_init(&wd_rsa_setting.status);
return WD_SUCCESS;
+out_drv_deconfig: + wd_ctx_drv_deconfig(&wd_rsa_setting.config); +out_uninit_nolock: + wd_rsa_common_uninit(); out_close_driver: wd_rsa_close_driver(WD_TYPE_V1); out_clear_init: @@ -253,6 +244,8 @@ void wd_rsa_uninit(void) { int ret;
+ wd_alg_uninit_driver_nw(&wd_rsa_setting.config); + wd_ctx_drv_deconfig(&wd_rsa_setting.config); ret = wd_rsa_common_uninit(); if (ret) return; @@ -291,37 +284,26 @@ int wd_rsa_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_para while (ret) { memset(&wd_rsa_setting.config, 0, sizeof(struct wd_ctx_config_internal));
- /* Get alg driver and dev name */ - wd_rsa_setting.driver = wd_alg_drv_bind(task_type, alg); - if (!wd_rsa_setting.driver) { - WD_ERR("failed to bind a valid driver!\n"); - ret = -WD_EINVAL; - goto out_dlopen; - } - + /* Init ctx param and prepare for ctx request */ rsa_ctx_params.ctx_set_num = rsa_ctx_num; - ret = wd_ctx_param_init(&rsa_ctx_params, ctx_params, - wd_rsa_setting.driver, WD_RSA_TYPE, WD_RSA_GENKEY); + ret = wd_ctx_param_init_nw(&rsa_ctx_params, ctx_params, + alg, task_type, WD_RSA_TYPE, WD_RSA_GENKEY); if (ret) { - if (ret == -WD_EAGAIN) { - wd_disable_drv(wd_rsa_setting.driver); - wd_alg_drv_unbind(wd_rsa_setting.driver); + if (ret == -WD_EAGAIN) continue; - } + goto out_driver; }
wd_rsa_init_attrs.alg = alg; wd_rsa_init_attrs.sched_type = sched_type; - wd_rsa_init_attrs.driver = wd_rsa_setting.driver; + wd_rsa_init_attrs.task_type = task_type; wd_rsa_init_attrs.ctx_params = &rsa_ctx_params; wd_rsa_init_attrs.alg_init = wd_rsa_common_init; wd_rsa_init_attrs.alg_poll_ctx = wd_rsa_poll_ctx; ret = wd_alg_attrs_init(&wd_rsa_init_attrs); if (ret) { if (ret == -WD_ENODEV) { - wd_disable_drv(wd_rsa_setting.driver); - wd_alg_drv_unbind(wd_rsa_setting.driver); wd_ctx_param_uninit(&rsa_ctx_params); continue; } @@ -330,16 +312,27 @@ int wd_rsa_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_para } }
+ ret = wd_ctx_drv_config(alg, &wd_rsa_setting.config); + if (ret) + goto out_uninit_nolock; + + ret = wd_alg_init_driver_nw(&wd_rsa_setting.config); + if (ret) + goto out_drv_deconfig; + wd_alg_set_init(&wd_rsa_setting.status); wd_ctx_param_uninit(&rsa_ctx_params);
return WD_SUCCESS;
+out_drv_deconfig: + wd_ctx_drv_deconfig(&wd_rsa_setting.config); +out_uninit_nolock: + wd_rsa_common_uninit(); + wd_alg_attrs_uninit(&wd_rsa_init_attrs); out_params_uninit: wd_ctx_param_uninit(&rsa_ctx_params); out_driver: - wd_alg_drv_unbind(wd_rsa_setting.driver); -out_dlopen: wd_rsa_close_driver(WD_TYPE_V2); out_clear_init: wd_alg_clear_init(&wd_rsa_setting.status); @@ -350,12 +343,12 @@ void wd_rsa_uninit2(void) { int ret;
+ wd_ctx_drv_deconfig(&wd_rsa_setting.config); ret = wd_rsa_common_uninit(); if (ret) return;
wd_alg_attrs_uninit(&wd_rsa_init_attrs); - wd_alg_drv_unbind(wd_rsa_setting.driver); wd_rsa_close_driver(WD_TYPE_V2); wd_rsa_setting.dlh_list = NULL; wd_alg_clear_init(&wd_rsa_setting.status); @@ -444,8 +437,8 @@ int wd_do_rsa_sync(handle_t h_sess, struct wd_rsa_req *req) if (unlikely(ret)) return ret;
- msg_handle.send = wd_rsa_setting.driver->send; - msg_handle.recv = wd_rsa_setting.driver->recv; + msg_handle.send = ctx->drv->send; + msg_handle.recv = ctx->drv->recv;
pthread_spin_lock(&ctx->lock); ret = wd_handle_msg_sync(&msg_handle, ctx->ctx, &msg, &balance, @@ -495,7 +488,7 @@ int wd_do_rsa_async(handle_t sess, struct wd_rsa_req *req) goto fail_with_msg; msg->tag = mid;
- ret = wd_rsa_setting.driver->send(ctx->ctx, msg); + ret = ctx->drv->send(ctx->ctx, msg); if (unlikely(ret)) { if (ret != -WD_EBUSY) WD_ERR("failed to send rsa BD, hw is err!\n"); @@ -544,7 +537,7 @@ int wd_rsa_poll_ctx(__u32 idx, __u32 expt, __u32 *count) ctx = config->ctxs + idx;
do { - ret = wd_rsa_setting.driver->recv(ctx->ctx, &recv_msg); + ret = ctx->drv->recv(ctx->ctx, &recv_msg); if (ret == -WD_EAGAIN) { return ret; } else if (ret < 0) {
After adapting to uadk's heterogeneous scheduling framework, all uadk algorithms have completed functional adaptation. After the adaptation is completed, the old functions need to be deleted.
Signed-off-by: Longfang Liu liulongfang@huawei.com --- include/wd_util.h | 15 +++------------ wd_aead.c | 8 ++++---- wd_agg.c | 6 +++--- wd_cipher.c | 8 ++++---- wd_comp.c | 8 ++++---- wd_dh.c | 8 ++++---- wd_digest.c | 8 ++++---- wd_ecc.c | 8 ++++---- wd_rsa.c | 8 ++++---- wd_util.c | 23 ++--------------------- 10 files changed, 36 insertions(+), 64 deletions(-)
diff --git a/include/wd_util.h b/include/wd_util.h index ff9aa02..2850341 100644 --- a/include/wd_util.h +++ b/include/wd_util.h @@ -131,7 +131,6 @@ struct wd_init_attrs { struct wd_ctx_config *ctx_config; wd_alg_init alg_init; wd_alg_poll_ctx alg_poll_ctx; - struct wd_alg_driver *driver; //stub for old code };
/* @@ -454,14 +453,10 @@ static inline void wd_alg_clear_init(enum wd_status *status) * * Return 0 if succeed and other error number if fail. */ -int wd_ctx_param_init_nw(struct wd_ctx_params *ctx_params, +int wd_ctx_param_init(struct wd_ctx_params *ctx_params, struct wd_ctx_params *user_ctx_params, char *alg, int task_type, enum wd_type type, int max_op_type); -int wd_ctx_param_init(struct wd_ctx_params *ctx_params, - struct wd_ctx_params *user_ctx_params, - struct wd_alg_driver *driver, - enum wd_type type, int max_op_type);
void wd_ctx_param_uninit(struct wd_ctx_params *ctx_params);
@@ -495,12 +490,8 @@ void wd_alg_drv_unbind(struct wd_alg_driver *drv); * * Return 0 if succeed and other error number if fail. */ -int wd_alg_init_driver_nw(struct wd_ctx_config_internal *config); -void wd_alg_uninit_driver_nw(struct wd_ctx_config_internal *config); -int wd_alg_init_driver(struct wd_ctx_config_internal *config, - struct wd_alg_driver *driver, void **drv_priv); -void wd_alg_uninit_driver(struct wd_ctx_config_internal *config, - struct wd_alg_driver *driver, void **drv_priv); +int wd_alg_init_driver(struct wd_ctx_config_internal *config); +void wd_alg_uninit_driver(struct wd_ctx_config_internal *config);
/** * wd_dlopen_drv() - Open the dynamic library file of the device driver. diff --git a/wd_aead.c b/wd_aead.c index 3448144..5525e72 100644 --- a/wd_aead.c +++ b/wd_aead.c @@ -496,7 +496,7 @@ int wd_aead_init(struct wd_ctx_config *config, struct wd_sched *sched) if (ret) goto out_uninit_nolock;
- ret = wd_alg_init_driver_nw(&wd_aead_setting.config); + ret = wd_alg_init_driver(&wd_aead_setting.config); if (ret) goto out_drv_deconfig; wd_alg_set_init(&wd_aead_setting.status); @@ -519,7 +519,7 @@ void wd_aead_uninit(void) if (!wd_aead_setting.priv) return;
- wd_alg_uninit_driver_nw(&wd_aead_setting.config); + wd_alg_uninit_driver(&wd_aead_setting.config); wd_ctx_drv_deconfig(&wd_aead_setting.config);
wd_aead_uninit_nolock(); @@ -573,7 +573,7 @@ int wd_aead_init2_(char *alg, __u32 sched_type, int task_type, memset(&wd_aead_setting.config, 0, sizeof(struct wd_ctx_config_internal)); /* Init ctx param and prepare for ctx request */ aead_ctx_params.ctx_set_num = aead_ctx_num; - ret = wd_ctx_param_init_nw(&aead_ctx_params, ctx_params, + ret = wd_ctx_param_init(&aead_ctx_params, ctx_params, alg, task_type, WD_AEAD_TYPE, WD_DIGEST_CIPHER_DECRYPTION + 1); if (ret) { if (ret == -WD_EAGAIN) { @@ -602,7 +602,7 @@ int wd_aead_init2_(char *alg, __u32 sched_type, int task_type, if (ret) goto out_uninit_nolock;
- ret = wd_alg_init_driver_nw(&wd_aead_setting.config); + ret = wd_alg_init_driver(&wd_aead_setting.config); if (ret) goto out_drv_deconfig;
diff --git a/wd_agg.c b/wd_agg.c index 5cf21d3..26613ef 100644 --- a/wd_agg.c +++ b/wd_agg.c @@ -673,7 +673,7 @@ int wd_agg_init(char *alg, __u32 sched_type, int task_type, struct wd_ctx_params
/* Init ctx param and prepare for ctx request */ agg_ctx_params.ctx_set_num = &agg_ctx_num; - ret = wd_ctx_param_init_nw(&agg_ctx_params, ctx_params, alg, task_type, + ret = wd_ctx_param_init(&agg_ctx_params, ctx_params, alg, task_type, WD_AGG_TYPE, 1); if (ret) { if (ret == -WD_EAGAIN) { @@ -702,7 +702,7 @@ int wd_agg_init(char *alg, __u32 sched_type, int task_type, struct wd_ctx_params if (ret) goto out_uninit_nolock;
- ret = wd_alg_init_driver_nw(&wd_agg_setting.config); + ret = wd_alg_init_driver(&wd_agg_setting.config); if (ret) goto out_drv_deconfig;
@@ -733,7 +733,7 @@ void wd_agg_uninit(void) if (ret) return;
- wd_alg_uninit_driver_nw(&wd_agg_setting.config); + wd_alg_uninit_driver(&wd_agg_setting.config); wd_ctx_drv_deconfig(&wd_agg_setting.config);
wd_agg_close_driver(); diff --git a/wd_cipher.c b/wd_cipher.c index 69bc63f..8c34985 100644 --- a/wd_cipher.c +++ b/wd_cipher.c @@ -386,7 +386,7 @@ int wd_cipher_init(struct wd_ctx_config *config, struct wd_sched *sched) if (ret) goto out_uninit_nolock;
- ret = wd_alg_init_driver_nw(&wd_cipher_setting.config); + ret = wd_alg_init_driver(&wd_cipher_setting.config); if (ret) goto out_drv_deconfig;
@@ -409,7 +409,7 @@ void wd_cipher_uninit(void) { int ret;
- wd_alg_uninit_driver_nw(&wd_cipher_setting.config); + wd_alg_uninit_driver(&wd_cipher_setting.config); wd_ctx_drv_deconfig(&wd_cipher_setting.config);
ret = wd_cipher_common_uninit(); @@ -454,7 +454,7 @@ int wd_cipher_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_p
/* Init ctx param and prepare for ctx request */ cipher_ctx_params.ctx_set_num = cipher_ctx_num; - ret = wd_ctx_param_init_nw(&cipher_ctx_params, ctx_params, + ret = wd_ctx_param_init(&cipher_ctx_params, ctx_params, alg, task_type, WD_CIPHER_TYPE, WD_CIPHER_DECRYPTION + 1); if (ret) { if (ret == -WD_EAGAIN) { @@ -485,7 +485,7 @@ int wd_cipher_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_p if (ret) goto out_uninit_nolock;
- ret = wd_alg_init_driver_nw(&wd_cipher_setting.config); + ret = wd_alg_init_driver(&wd_cipher_setting.config); if (ret) goto out_drv_deconfig;
diff --git a/wd_comp.c b/wd_comp.c index 52c711c..0e1be5f 100644 --- a/wd_comp.c +++ b/wd_comp.c @@ -208,7 +208,7 @@ int wd_comp_init(struct wd_ctx_config *config, struct wd_sched *sched) if (ret) goto out_uninit_nolock;
- ret = wd_alg_init_driver_nw(&wd_comp_setting.config); + ret = wd_alg_init_driver(&wd_comp_setting.config); if (ret) goto out_drv_deconfig;
@@ -231,7 +231,7 @@ void wd_comp_uninit(void) { int ret;
- wd_alg_uninit_driver_nw(&wd_comp_setting.config); + wd_alg_uninit_driver(&wd_comp_setting.config); wd_ctx_drv_deconfig(&wd_comp_setting.config);
ret = wd_comp_uninit_nolock(); @@ -276,7 +276,7 @@ int wd_comp_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_par
/* Init ctx param and prepare for ctx request */ comp_ctx_params.ctx_set_num = comp_ctx_num; - ret = wd_ctx_param_init_nw(&comp_ctx_params, ctx_params, + ret = wd_ctx_param_init(&comp_ctx_params, ctx_params, alg, task_type, WD_COMP_TYPE, WD_DIR_MAX); if (ret) { if (ret == -WD_EAGAIN) { @@ -306,7 +306,7 @@ int wd_comp_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_par if (ret) goto out_uninit_nolock;
- ret = wd_alg_init_driver_nw(&wd_comp_setting.config); + ret = wd_alg_init_driver(&wd_comp_setting.config); if (ret) goto out_drv_deconfig;
diff --git a/wd_dh.c b/wd_dh.c index 55cced7..907876d 100644 --- a/wd_dh.c +++ b/wd_dh.c @@ -181,7 +181,7 @@ int wd_dh_init(struct wd_ctx_config *config, struct wd_sched *sched) if (ret) goto out_uninit_nolock;
- ret = wd_alg_init_driver_nw(&wd_dh_setting.config); + ret = wd_alg_init_driver(&wd_dh_setting.config); if (ret) goto out_drv_deconfig;
@@ -208,7 +208,7 @@ void wd_dh_uninit(void) if (ret) return;
- wd_alg_uninit_driver_nw(&wd_dh_setting.config); + wd_alg_uninit_driver(&wd_dh_setting.config); wd_ctx_drv_deconfig(&wd_dh_setting.config);
wd_dh_close_driver(WD_TYPE_V1); @@ -247,7 +247,7 @@ int wd_dh_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_param
/* Init ctx param and prepare for ctx request */ dh_ctx_params.ctx_set_num = dh_ctx_num; - ret = wd_ctx_param_init_nw(&dh_ctx_params, ctx_params, + ret = wd_ctx_param_init(&dh_ctx_params, ctx_params, alg, task_type, WD_DH_TYPE, WD_DH_PHASE2); if (ret) { if (ret == -WD_EAGAIN) @@ -276,7 +276,7 @@ int wd_dh_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_param if (ret) goto out_uninit_nolock;
- ret = wd_alg_init_driver_nw(&wd_dh_setting.config); + ret = wd_alg_init_driver(&wd_dh_setting.config); if (ret) goto out_drv_deconfig;
diff --git a/wd_digest.c b/wd_digest.c index e8113a7..10d075d 100644 --- a/wd_digest.c +++ b/wd_digest.c @@ -309,7 +309,7 @@ int wd_digest_init(struct wd_ctx_config *config, struct wd_sched *sched) if (ret) goto out_uninit_nolock;
- ret = wd_alg_init_driver_nw(&wd_digest_setting.config); + ret = wd_alg_init_driver(&wd_digest_setting.config); if (ret) goto out_drv_deconfig;
@@ -333,7 +333,7 @@ void wd_digest_uninit(void) if (!wd_digest_setting.priv) return;
- wd_alg_uninit_driver_nw(&wd_digest_setting.config); + wd_alg_uninit_driver(&wd_digest_setting.config); wd_ctx_drv_deconfig(&wd_digest_setting.config);
wd_digest_uninit_nolock(); @@ -385,7 +385,7 @@ int wd_digest_init2_(char *alg, __u32 sched_type, int task_type,
/* Init ctx param and prepare for ctx request */ digest_ctx_params.ctx_set_num = &digest_ctx_num; - ret = wd_ctx_param_init_nw(&digest_ctx_params, ctx_params, + ret = wd_ctx_param_init(&digest_ctx_params, ctx_params, alg, task_type, WD_DIGEST_TYPE, 1); if (ret) { if (ret == -WD_EAGAIN) { @@ -414,7 +414,7 @@ int wd_digest_init2_(char *alg, __u32 sched_type, int task_type, if (ret) goto out_uninit_nolock;
- ret = wd_alg_init_driver_nw(&wd_digest_setting.config); + ret = wd_alg_init_driver(&wd_digest_setting.config); if (ret) goto out_drv_deconfig;
diff --git a/wd_ecc.c b/wd_ecc.c index e63d66d..146f3ac 100644 --- a/wd_ecc.c +++ b/wd_ecc.c @@ -244,7 +244,7 @@ int wd_ecc_init(struct wd_ctx_config *config, struct wd_sched *sched) if (ret) goto out_uninit_nolock;
- ret = wd_alg_init_driver_nw(&wd_ecc_setting.config); + ret = wd_alg_init_driver(&wd_ecc_setting.config); if (ret) goto out_drv_deconfig;
@@ -267,7 +267,7 @@ void wd_ecc_uninit(void) { int ret;
- wd_alg_uninit_driver_nw(&wd_ecc_setting.config); + wd_alg_uninit_driver(&wd_ecc_setting.config); wd_ctx_drv_deconfig(&wd_ecc_setting.config); ret = wd_ecc_common_uninit(); if (ret) @@ -311,7 +311,7 @@ int wd_ecc_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_para
/* Init ctx param and prepare for ctx request */ ecc_ctx_params.ctx_set_num = ecc_ctx_num; - ret = wd_ctx_param_init_nw(&ecc_ctx_params, ctx_params, + ret = wd_ctx_param_init(&ecc_ctx_params, ctx_params, alg, task_type, WD_ECC_TYPE, WD_EC_OP_MAX); if (ret) { if (ret == -WD_EAGAIN) @@ -341,7 +341,7 @@ int wd_ecc_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_para if (ret) goto out_uninit_nolock;
- ret = wd_alg_init_driver_nw(&wd_ecc_setting.config); + ret = wd_alg_init_driver(&wd_ecc_setting.config); if (ret) goto out_drv_deconfig;
diff --git a/wd_rsa.c b/wd_rsa.c index 80acf58..5f012c9 100644 --- a/wd_rsa.c +++ b/wd_rsa.c @@ -221,7 +221,7 @@ int wd_rsa_init(struct wd_ctx_config *config, struct wd_sched *sched) if (ret) goto out_uninit_nolock;
- ret = wd_alg_init_driver_nw(&wd_rsa_setting.config); + ret = wd_alg_init_driver(&wd_rsa_setting.config); if (ret) goto out_drv_deconfig;
@@ -244,7 +244,7 @@ void wd_rsa_uninit(void) { int ret;
- wd_alg_uninit_driver_nw(&wd_rsa_setting.config); + wd_alg_uninit_driver(&wd_rsa_setting.config); wd_ctx_drv_deconfig(&wd_rsa_setting.config); ret = wd_rsa_common_uninit(); if (ret) @@ -286,7 +286,7 @@ int wd_rsa_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_para
/* Init ctx param and prepare for ctx request */ rsa_ctx_params.ctx_set_num = rsa_ctx_num; - ret = wd_ctx_param_init_nw(&rsa_ctx_params, ctx_params, + ret = wd_ctx_param_init(&rsa_ctx_params, ctx_params, alg, task_type, WD_RSA_TYPE, WD_RSA_GENKEY); if (ret) { if (ret == -WD_EAGAIN) @@ -316,7 +316,7 @@ int wd_rsa_init2_(char *alg, __u32 sched_type, int task_type, struct wd_ctx_para if (ret) goto out_uninit_nolock;
- ret = wd_alg_init_driver_nw(&wd_rsa_setting.config); + ret = wd_alg_init_driver(&wd_rsa_setting.config); if (ret) goto out_drv_deconfig;
diff --git a/wd_util.c b/wd_util.c index bf6894b..b3c155d 100644 --- a/wd_util.c +++ b/wd_util.c @@ -2066,13 +2066,7 @@ static void wd_ctx_uninit_driver(struct wd_ctx_config_internal *config, } }
-int wd_alg_init_driver(struct wd_ctx_config_internal *config, - struct wd_alg_driver *driver, void **drv_priv) -{ - return 0; -} - -int wd_alg_init_driver_nw(struct wd_ctx_config_internal *config) +int wd_alg_init_driver(struct wd_ctx_config_internal *config) { __u32 i, j; int ret; @@ -2095,12 +2089,7 @@ init_err: return ret; }
-void wd_alg_uninit_driver(struct wd_ctx_config_internal *config, - struct wd_alg_driver *driver, void **drv_priv) -{ -} - -void wd_alg_uninit_driver_nw(struct wd_ctx_config_internal *config) +void wd_alg_uninit_driver(struct wd_ctx_config_internal *config) { __u32 i;
@@ -2231,14 +2220,6 @@ void wd_ctx_param_uninit(struct wd_ctx_params *ctx_params) }
int wd_ctx_param_init(struct wd_ctx_params *ctx_params, - struct wd_ctx_params *user_ctx_params, - struct wd_alg_driver *driver, - enum wd_type type, int max_op_type) -{ - return 0; -} - -int wd_ctx_param_init_nw(struct wd_ctx_params *ctx_params, struct wd_ctx_params *user_ctx_params, char *alg, int task_type, enum wd_type type, int max_op_type)
Completed the update of uadk test tool function to adapt to heterogeneous scheduling function
Signed-off-by: Longfang Liu liulongfang@huawei.com --- uadk_tool/benchmark/sec_uadk_benchmark.c | 71 +++++++++++++----------- 1 file changed, 39 insertions(+), 32 deletions(-)
diff --git a/uadk_tool/benchmark/sec_uadk_benchmark.c b/uadk_tool/benchmark/sec_uadk_benchmark.c index 469ef8b..032293b 100644 --- a/uadk_tool/benchmark/sec_uadk_benchmark.c +++ b/uadk_tool/benchmark/sec_uadk_benchmark.c @@ -793,13 +793,15 @@ static void uninit_ctx_config2(int subtype) } }
+struct wd_ctx_nums ctx_set_num[2]; +struct wd_ctx_nums ce_ctx_set_num[2]; +struct wd_cap_config cap; + static int init_ctx_config2(struct acc_option *options) { - struct wd_ctx_params cparams = {0}; - struct wd_ctx_nums *ctx_set_num; int subtype = options->subtype; - int mode = options->syncmode; char alg_name[MAX_ALG_NAME]; + struct wd_ctx_params ctx_params = {0}; int ret;
ret = get_alg_name(options->algtype, alg_name); @@ -808,32 +810,36 @@ static int init_ctx_config2(struct acc_option *options) return -EINVAL; }
- ctx_set_num = calloc(1, sizeof(*ctx_set_num)); - if (!ctx_set_num) { - WD_ERR("failed to alloc ctx_set_size!\n"); - return -WD_ENOMEM; - } - - cparams.op_type_num = 1; - cparams.ctx_set_num = ctx_set_num; - cparams.bmp = numa_allocate_nodemask(); - if (!cparams.bmp) { - WD_ERR("failed to create nodemask!\n"); - ret = -WD_ENOMEM; - goto out_freectx; - } - - numa_bitmask_setall(cparams.bmp); - - if (mode == CTX_MODE_SYNC) - ctx_set_num->sync_ctx_num = g_ctxnum; - else - ctx_set_num->async_ctx_num = g_ctxnum; + cap.ctx_msg_num = 1024; + // HW ctx set + ctx_set_num[0].sync_ctx_num = options->ctxnums; + ctx_set_num[0].async_ctx_num = options->ctxnums; + ctx_set_num[1].sync_ctx_num = options->ctxnums; + ctx_set_num[1].async_ctx_num = options->ctxnums; + // CE ctx set + ce_ctx_set_num[0].sync_ctx_num = options->ctxnums; + ce_ctx_set_num[0].async_ctx_num = options->ctxnums; + ce_ctx_set_num[0].ctx_prop = UADK_CTX_CE_INS; + ce_ctx_set_num[0].other_ctx = NULL; + ce_ctx_set_num[1].sync_ctx_num = options->ctxnums; + ce_ctx_set_num[1].async_ctx_num = options->ctxnums; + ce_ctx_set_num[1].ctx_prop = UADK_CTX_CE_INS; + ce_ctx_set_num[1].other_ctx = NULL; + + ctx_set_num[0].other_ctx = &ce_ctx_set_num[0]; + ctx_set_num[1].other_ctx = &ce_ctx_set_num[1]; + + ctx_params.op_type_num = 2; + ctx_params.bmp = numa_allocate_nodemask(); + numa_bitmask_setbit(ctx_params.bmp, 0); + numa_bitmask_setbit(ctx_params.bmp, 1); + ctx_params.cap = ∩ + ctx_params.ctx_set_num = &ctx_set_num[0];
/* init */ switch(subtype) { case CIPHER_TYPE: - ret = wd_cipher_init2_(alg_name, SCHED_POLICY_RR, TASK_HW, &cparams); + ret = wd_cipher_init2_(alg_name, SCHED_POLICY_HUNGRY, TASK_MIX, &ctx_params); if (ret) SEC_TST_PRT("failed to do cipher init2!\n"); break; @@ -843,26 +849,23 @@ static int init_ctx_config2(struct acc_option *options) SEC_TST_PRT("failed to do cipher intruction init2!\n"); break; case AEAD_TYPE: - ret = wd_aead_init2_(alg_name, SCHED_POLICY_RR, TASK_HW, &cparams); + ret = wd_aead_init2_(alg_name, SCHED_POLICY_RR, TASK_HW, &ctx_params); if (ret) SEC_TST_PRT("failed to do aead init2!\n"); break; case DIGEST_TYPE: - ret = wd_digest_init2_(alg_name, options->sched_type, options->task_type, &cparams); + ctx_params.op_type_num = 1; + ret = wd_digest_init2_(alg_name, SCHED_POLICY_HUNGRY, TASK_MIX, &ctx_params); if (ret) SEC_TST_PRT("failed to do digest init2!\n"); break; } if (ret) { - SEC_TST_PRT("failed to do cipher init2!\n"); + SEC_TST_PRT("failed to do sec init2!\n"); return ret; }
-out_freectx: - free(ctx_set_num); - return ret; - }
static void get_aead_data(u8 *addr, u32 size) @@ -1182,6 +1185,9 @@ static void *sec_uadk_poll2(void *data) case DIGEST_TYPE: uadk_poll_policy = wd_digest_poll; break; + case CIPHER_INSTR_TYPE: + uadk_poll_policy = wd_cipher_poll; + break; default: SEC_TST_PRT("<<<<<<async poll interface is NULL!\n"); return NULL; @@ -1744,6 +1750,7 @@ int sec_uadk_async_threads(struct acc_option *options)
switch (options->subtype) { case CIPHER_TYPE: + case CIPHER_INSTR_TYPE: uadk_sec_async_run = sec_uadk_cipher_async; break; case AEAD_TYPE: