Adjust scheduler queue processing after unified scheduling
Signed-off-by: Longfang Liu liulongfang@huawei.com --- include/wd_alg_common.h | 1 + wd_cipher.c | 51 ++++++++++++++++++++++++++++++----------- wd_sched.c | 48 ++++++++++++++++++++++++++++++++++---- wd_util.c | 2 ++ 4 files changed, 85 insertions(+), 17 deletions(-)
diff --git a/include/wd_alg_common.h b/include/wd_alg_common.h index 90ddc956..75b3bfd9 100644 --- a/include/wd_alg_common.h +++ b/include/wd_alg_common.h @@ -160,6 +160,7 @@ struct wd_ctx_internal { __u16 sqn; pthread_spinlock_t lock; __u8 ctx_type; + __u32 req_nums; struct wd_alg_driver *drv; void *drv_priv; }; diff --git a/wd_cipher.c b/wd_cipher.c index 75dbb216..282604a1 100644 --- a/wd_cipher.c +++ b/wd_cipher.c @@ -729,6 +729,7 @@ int wd_do_cipher_async(handle_t h_sess, struct wd_cipher_req *req) struct wd_cipher_sess *sess = (struct wd_cipher_sess *)h_sess; struct wd_ctx_internal *ctx; struct wd_cipher_msg *msg; + static int soft_slice = 0; int msg_id, ret; __u32 idx;
@@ -747,22 +748,41 @@ int wd_do_cipher_async(handle_t h_sess, struct wd_cipher_req *req)
ctx = config->ctxs + idx;
- msg_id = wd_get_msg_from_pool(&wd_cipher_setting.pool, idx, - (void **)&msg); - if (unlikely(msg_id < 0)) { - WD_ERR("busy, failed to get msg from pool!\n"); - return -WD_EBUSY; - } +try_again: + if (soft_slice > 0) { + soft_slice--; + struct wd_alg_driver *fb_drv; + struct wd_cipher_msg msg; + //WD_ERR("busy, failed to get msg from pool!\n"); + + fill_request_msg(&msg, req, sess); + fb_drv = (struct wd_alg_driver *)ctx->drv->fallback; + ret = fb_drv->send(ctx->ctx, &msg); + if (unlikely(ret < 0)) { + WD_ERR("wd cipher soft async send err!\n"); + return ret; + }
- fill_request_msg(msg, req, sess); - msg->tag = msg_id; + req->cb(req, req->cb_param); + __atomic_add_fetch(&ctx->req_nums, 0x1, __ATOMIC_RELAXED); + } else { + msg_id = wd_get_msg_from_pool(&wd_cipher_setting.pool, idx, + (void **)&msg); + if (unlikely(msg_id > 0)) { + fill_request_msg(msg, req, sess); + msg->tag = msg_id;
- ret = ctx->drv->send(ctx->ctx, msg); - if (unlikely(ret < 0)) { - if (ret != -WD_EBUSY) - WD_ERR("wd cipher async send err!\n"); + ret = ctx->drv->send(ctx->ctx, msg); + if (unlikely(ret < 0)) { + if (ret != -WD_EBUSY) + WD_ERR("wd cipher async send err!\n");
- goto fail_with_msg; + goto fail_with_msg; + } + } else { + soft_slice = 8; + goto try_again; + } }
wd_dfx_msg_cnt(config, WD_CTX_CNT_NUM, idx); @@ -832,6 +852,11 @@ int wd_cipher_poll_ctx(__u32 idx, __u32 expt, __u32 *count) *count = recv_count; } while (--tmp);
+ /* Add soft fallback completed request */ + recv_count = ctx->req_nums; + __atomic_sub_fetch(&ctx->req_nums, recv_count, __ATOMIC_RELAXED); + *count += recv_count; + return ret; }
diff --git a/wd_sched.c b/wd_sched.c index ddce9f43..f0deb5f2 100644 --- a/wd_sched.c +++ b/wd_sched.c @@ -33,6 +33,7 @@ struct sched_key { __u8 mode; __u32 sync_ctxid; __u32 async_ctxid; + __u32 fb_ctxid; };
/* @@ -168,6 +169,31 @@ static __u32 session_sched_init_ctx(struct wd_sched_ctx *sched_ctx, struct sched return sched_get_next_pos_rr(region, NULL); }
+static __u32 session_sched_fb_ctx(struct wd_sched_ctx *sched_ctx, struct sched_key *key, + const int sched_mode) +{ + struct sched_ctx_region *region = NULL; + struct wd_sched_info *sched_info; + int numa_id; + + sched_info = sched_ctx->sched_info; + if (key->numa_id >= 0 && + sched_info[key->numa_id].ce_ctx_region[key->mode][key->type].valid) { + region = &sched_info[key->numa_id].ce_ctx_region[key->mode][key->type]; + return sched_get_next_pos_rr(region, NULL); + } + + /* If the key->numa_id is not exist, we should scan for a region */ + for (numa_id = 0; numa_id < sched_ctx->numa_num; numa_id++) { + if (sched_info[numa_id].ce_ctx_region[key->mode][key->type].valid) { + region = &sched_info[numa_id].ce_ctx_region[key->mode][key->type]; + return sched_get_next_pos_rr(region, NULL); + } + } + + return INVALID_POS; +} + static handle_t session_sched_init(handle_t h_sched_ctx, void *sched_param) { struct wd_sched_ctx *sched_ctx = (struct wd_sched_ctx *)h_sched_ctx; @@ -211,12 +237,19 @@ static handle_t session_sched_init(handle_t h_sched_ctx, void *sched_param) goto out; }
+ if (param->ctx_prop > 0) { // not HW task + // Todo Perform ctxid initialization and acquisition processing + // based on ctx property + ; + } + skey->sync_ctxid = session_sched_init_ctx(sched_ctx, skey, CTX_MODE_SYNC); skey->async_ctxid = session_sched_init_ctx(sched_ctx, skey, CTX_MODE_ASYNC); if (skey->sync_ctxid == INVALID_POS && skey->async_ctxid == INVALID_POS) { WD_ERR("failed to get valid sync_ctxid or async_ctxid!\n"); goto out; } + skey->fb_ctxid = session_sched_fb_ctx(sched_ctx, skey, CTX_MODE_SYNC);
return (handle_t)skey;
@@ -237,16 +270,23 @@ static __u32 session_sched_pick_next_ctx(handle_t h_sched_ctx, void *sched_key, const int sched_mode) { struct sched_key *key = (struct sched_key *)sched_key; + static int soft_slice = 0;
if (unlikely(!h_sched_ctx || !key)) { WD_ERR("invalid: sched ctx or key is NULL!\n"); return INVALID_POS; }
- /* return in do task */ - if (sched_mode == CTX_MODE_SYNC) - return key->sync_ctxid; - return key->async_ctxid; + if (soft_slice == 0) { + soft_slice = 1; + /* return in do task */ + if (sched_mode == CTX_MODE_SYNC) + return key->sync_ctxid; + return key->async_ctxid; + } else { + soft_slice = 0; + return key->fb_ctxid; + } }
static int session_poll_region(struct wd_sched_ctx *sched_ctx, __u32 begin, diff --git a/wd_util.c b/wd_util.c index 64983c41..5cbc6571 100644 --- a/wd_util.c +++ b/wd_util.c @@ -1962,6 +1962,7 @@ static int wd_alg_init_fallback(struct wd_alg_driver *fb_driver) return -WD_EINVAL; }
+ WD_ERR("debug: call function: %s!\n", __func__); fb_driver->init(NULL, NULL);
return 0; @@ -2742,6 +2743,7 @@ static int wd_alg_ce_ctx_init(struct wd_ctx_config *ctx_config, bool fb_flag) __u32 old_num = 0; size_t buf_size;
+ WD_ERR("debug: call function: %s!\n", __func__); if (!fb_flag) { ctx_config->ctx_num = 1; buf_size = sizeof(struct wd_ctx);