This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "".
The branch, master has been updated via fd5939c3ae2a2a38c0a1f87428a787ee7ae00789 (commit) via 94e47dc62e340818b91c471788c29af3ba167d96 (commit) via 92e59d9e816a99db318ba24dcb12cb55f2e7392d (commit) via b1812f17ae652f11ce21f26fd24c8fd27818339b (commit) via 23102db002f522cc90d1b616e2725d21e525b1fc (commit) via 049d80427d0145a3c1738d28ba595717ae43d5c2 (commit) via 46d507adef3902a26b7e311506437211e7417a10 (commit) via e7ad8003e34195a3900e1dd3d3a93235896d7628 (commit) via 294856cc30d48d57e12485076bae49da36d346ed (commit) via 6b79ac4b1640e8050b076ba0ecb590cc297320b0 (commit) via 3cb35813da911a94eef6e07ae71ce0f5f325ebd8 (commit) via 3aad0e2ce0e5901fd49e50e26ac7d762c2b9a6aa (commit) via 98eb7327113fbd33a8e5448406e8f47d8d0ad5fb (commit) via d64232f45abae8d4f1222313ce44532cc26e2336 (commit) from 536cce998e84a559e125b4741d00f2a760a0d575 (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit fd5939c3ae2a2a38c0a1f87428a787ee7ae00789 Author: Matias Elo matias.elo@nokia.com Date: Fri Nov 23 14:03:53 2018 +0200
linux-gen: traffic_mngr: allocate all global data from shm
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c index e29cb3cd..23982dce 100644 --- a/platform/linux-generic/odp_traffic_mngr.c +++ b/platform/linux-generic/odp_traffic_mngr.c @@ -46,7 +46,7 @@ static const pkt_desc_t EMPTY_PKT_DESC = { .word = 0 }; #define MAX_PRIORITIES ODP_TM_MAX_PRIORITIES #define NUM_SHAPER_COLORS ODP_NUM_SHAPER_COLORS
-static tm_prop_t basic_prop_tbl[MAX_PRIORITIES][NUM_SHAPER_COLORS] = { +static const tm_prop_t basic_prop_tbl[MAX_PRIORITIES][NUM_SHAPER_COLORS] = { [0] = { [ODP_TM_SHAPER_GREEN] = { 0, DECR_BOTH }, [ODP_TM_SHAPER_YELLOW] = { 0, DECR_BOTH }, @@ -106,6 +106,8 @@ typedef struct { } profile_tbl_t;
typedef struct { + tm_system_t system[ODP_TM_MAX_NUM_SYSTEMS]; + struct { tm_system_group_t group[ODP_TM_MAX_NUM_SYSTEMS]; odp_ticketlock_t lock; @@ -121,20 +123,24 @@ typedef struct {
profile_tbl_t profile_tbl;
- odp_shm_t shm; -} tm_global_t; + odp_ticketlock_t create_lock; + odp_ticketlock_t profile_lock; + odp_barrier_t first_enq;
-static tm_global_t *tm_glb; + int main_thread_cpu; + int cpu_num;
-/* TM systems table. */ -static tm_system_t odp_tm_systems[ODP_TM_MAX_NUM_SYSTEMS]; + /* Service threads */ + uint64_t busy_wait_counter; + odp_bool_t main_loop_running; + odp_atomic_u64_t atomic_request_cnt; + odp_atomic_u64_t currently_serving_cnt; + odp_atomic_u64_t atomic_done_cnt;
-static odp_ticketlock_t tm_create_lock; -static odp_ticketlock_t tm_profile_lock; -static odp_barrier_t tm_first_enq; + odp_shm_t shm; +} tm_global_t;
-static int g_main_thread_cpu = -1; -static int g_tm_cpu_num; +static tm_global_t *tm_glb;
/* Forward function declarations. */ static void tm_queue_cnts_decrement(tm_system_t *tm_system, @@ -472,8 +478,8 @@ static tm_system_t *tm_system_alloc(void)
/* Find an open slot in the odp_tm_systems array. */ for (tm_idx = 0; tm_idx < ODP_TM_MAX_NUM_SYSTEMS; tm_idx++) { - if (odp_tm_systems[tm_idx].status == TM_STATUS_FREE) { - tm_system = &odp_tm_systems[tm_idx]; + if (tm_glb->system[tm_idx].status == TM_STATUS_FREE) { + tm_system = &tm_glb->system[tm_idx]; memset(tm_system, 0, sizeof(tm_system_t)); tm_system->tm_idx = tm_idx; tm_system->status = TM_STATUS_RESERVED; @@ -486,7 +492,7 @@ static tm_system_t *tm_system_alloc(void)
static void tm_system_free(tm_system_t *tm_system) { - odp_tm_systems[tm_system->tm_idx].status = TM_STATUS_FREE; + tm_glb->system[tm_system->tm_idx].status = TM_STATUS_FREE; }
static void *tm_common_profile_create(const char *name, @@ -2373,31 +2379,24 @@ static int tm_process_expired_timers(tm_system_t *tm_system, return work_done; }
-static volatile uint64_t busy_wait_counter; - -static odp_bool_t main_loop_running; -static odp_atomic_u64_t atomic_request_cnt; -static odp_atomic_u64_t currently_serving_cnt; -static odp_atomic_u64_t atomic_done_cnt; - static void busy_wait(uint32_t iterations) { uint32_t cnt;
for (cnt = 1; cnt <= iterations; cnt++) - busy_wait_counter++; + tm_glb->busy_wait_counter++; }
static void signal_request(void) { - uint64_t my_request_num, serving_cnt; + uint64_t request_num, serving;
- my_request_num = odp_atomic_fetch_inc_u64(&atomic_request_cnt) + 1; + request_num = odp_atomic_fetch_inc_u64(&tm_glb->atomic_request_cnt) + 1;
- serving_cnt = odp_atomic_load_u64(¤tly_serving_cnt); - while (serving_cnt != my_request_num) { + serving = odp_atomic_load_u64(&tm_glb->currently_serving_cnt); + while (serving != request_num) { busy_wait(100); - serving_cnt = odp_atomic_load_u64(¤tly_serving_cnt); + serving = odp_atomic_load_u64(&tm_glb->currently_serving_cnt); } }
@@ -2405,26 +2404,26 @@ static void check_for_request(void) { uint64_t request_num, serving_cnt, done_cnt;
- request_num = odp_atomic_load_u64(&atomic_request_cnt); - serving_cnt = odp_atomic_load_u64(¤tly_serving_cnt); + request_num = odp_atomic_load_u64(&tm_glb->atomic_request_cnt); + serving_cnt = odp_atomic_load_u64(&tm_glb->currently_serving_cnt); if (serving_cnt == request_num) return;
/* Signal the other requesting thread to proceed and then * wait for their done indication */ - odp_atomic_inc_u64(¤tly_serving_cnt); + odp_atomic_inc_u64(&tm_glb->currently_serving_cnt); busy_wait(100);
- done_cnt = odp_atomic_load_u64(&atomic_done_cnt); + done_cnt = odp_atomic_load_u64(&tm_glb->atomic_done_cnt); while (done_cnt != request_num) { busy_wait(100); - done_cnt = odp_atomic_load_u64(&atomic_done_cnt); + done_cnt = odp_atomic_load_u64(&tm_glb->atomic_done_cnt); } }
static void signal_request_done(void) { - odp_atomic_inc_u64(&atomic_done_cnt); + odp_atomic_inc_u64(&tm_glb->atomic_done_cnt); }
static int thread_affinity_get(odp_cpumask_t *odp_cpu_mask) @@ -2470,7 +2469,7 @@ static void *tm_system_thread(void *arg)
/* Wait here until we have seen the first enqueue operation. */ odp_barrier_wait(&tm_group->tm_group_barrier); - main_loop_running = true; + tm_glb->main_loop_running = true;
destroying = odp_atomic_load_u64(&tm_system->destroying);
@@ -2674,7 +2673,7 @@ static int affinitize_main_thread(void) * just record this value and return. */ cpu_count = odp_cpumask_count(&odp_cpu_mask); if (cpu_count == 1) { - g_main_thread_cpu = odp_cpumask_first(&odp_cpu_mask); + tm_glb->main_thread_cpu = odp_cpumask_first(&odp_cpu_mask); return 0; } else if (cpu_count == 0) { return -1; @@ -2686,7 +2685,7 @@ static int affinitize_main_thread(void) CPU_SET(cpu_num, &linux_cpu_set); rc = sched_setaffinity(0, sizeof(cpu_set_t), &linux_cpu_set); if (rc == 0) - g_main_thread_cpu = cpu_num; + tm_glb->main_thread_cpu = cpu_num; else ODP_DBG("%s sched_setaffinity failed with rc=%d\n", __func__, rc); @@ -2699,32 +2698,32 @@ static uint32_t tm_thread_cpu_select(void) int cpu_count, cpu;
odp_cpumask_default_worker(&odp_cpu_mask, 0); - if ((g_main_thread_cpu != -1) && - odp_cpumask_isset(&odp_cpu_mask, g_main_thread_cpu)) - odp_cpumask_clr(&odp_cpu_mask, g_main_thread_cpu); + if ((tm_glb->main_thread_cpu != -1) && + odp_cpumask_isset(&odp_cpu_mask, tm_glb->main_thread_cpu)) + odp_cpumask_clr(&odp_cpu_mask, tm_glb->main_thread_cpu);
cpu_count = odp_cpumask_count(&odp_cpu_mask); if (cpu_count < 1) { odp_cpumask_all_available(&odp_cpu_mask); - if ((g_main_thread_cpu != -1) && - odp_cpumask_isset(&odp_cpu_mask, g_main_thread_cpu)) + if ((tm_glb->main_thread_cpu != -1) && + odp_cpumask_isset(&odp_cpu_mask, tm_glb->main_thread_cpu)) cpu_count = odp_cpumask_count(&odp_cpu_mask);
if (cpu_count < 1) odp_cpumask_all_available(&odp_cpu_mask); }
- if (g_tm_cpu_num == 0) { + if (tm_glb->cpu_num == 0) { cpu = odp_cpumask_first(&odp_cpu_mask); } else { - cpu = odp_cpumask_next(&odp_cpu_mask, g_tm_cpu_num); + cpu = odp_cpumask_next(&odp_cpu_mask, tm_glb->cpu_num); if (cpu == -1) { - g_tm_cpu_num = 0; + tm_glb->cpu_num = 0; cpu = odp_cpumask_first(&odp_cpu_mask); } }
- g_tm_cpu_num++; + tm_glb->cpu_num++; return cpu; }
@@ -2760,8 +2759,8 @@ static void _odp_tm_group_destroy(_odp_tm_group_t odp_tm_group) rc = pthread_join(tm_group->thread, NULL); ODP_ASSERT(rc == 0); pthread_attr_destroy(&tm_group->attr); - if (g_tm_cpu_num > 0) - g_tm_cpu_num--; + if (tm_glb->cpu_num > 0) + tm_glb->cpu_num--;
odp_ticketlock_lock(&tm_glb->system_group.lock); tm_group->status = TM_STATUS_FREE; @@ -2931,10 +2930,10 @@ odp_tm_t odp_tm_create(const char *name, return ODP_TM_INVALID;
/* Allocate tm_system_t record. */ - odp_ticketlock_lock(&tm_create_lock); + odp_ticketlock_lock(&tm_glb->create_lock); tm_system = tm_system_alloc(); if (!tm_system) { - odp_ticketlock_unlock(&tm_create_lock); + odp_ticketlock_unlock(&tm_glb->create_lock); return ODP_TM_INVALID; }
@@ -2942,7 +2941,7 @@ odp_tm_t odp_tm_create(const char *name, name_tbl_id = _odp_int_name_tbl_add(name, ODP_TM_HANDLE, odp_tm); if (name_tbl_id == ODP_INVALID_NAME) { tm_system_free(tm_system); - odp_ticketlock_unlock(&tm_create_lock); + odp_ticketlock_unlock(&tm_glb->create_lock); return ODP_TM_INVALID; }
@@ -3023,11 +3022,11 @@ odp_tm_t odp_tm_create(const char *name, tm_system->_odp_int_timer_wheel);
tm_system_free(tm_system); - odp_ticketlock_unlock(&tm_create_lock); + odp_ticketlock_unlock(&tm_glb->create_lock); return ODP_TM_INVALID; }
- odp_ticketlock_unlock(&tm_create_lock); + odp_ticketlock_unlock(&tm_glb->create_lock); return odp_tm; }
@@ -3274,7 +3273,7 @@ int odp_tm_shaper_params_update(odp_tm_shaper_t shaper_profile, if (!profile_obj) return -1;
- if (!main_loop_running) { + if (!tm_glb->main_loop_running) { tm_shaper_params_cvt_to(params, profile_obj); return 0; } @@ -3399,7 +3398,7 @@ int odp_tm_sched_params_update(odp_tm_sched_t sched_profile, if (!profile_obj) return -1;
- if (!main_loop_running) { + if (!tm_glb->main_loop_running) { tm_sched_params_cvt_to(params, profile_obj); return 0; } @@ -3497,7 +3496,7 @@ int odp_tm_thresholds_params_update(odp_tm_threshold_t threshold_profile, if (!profile_obj) return -1;
- if (!main_loop_running) { + if (!tm_glb->main_loop_running) { profile_obj->max_pkts = params->enable_max_pkts ? params->max_pkts : 0; profile_obj->max_bytes = @@ -3615,7 +3614,7 @@ int odp_tm_wred_params_update(odp_tm_wred_t wred_profile, if (!wred_params) return -1;
- if (!main_loop_running) { + if (!tm_glb->main_loop_running) { tm_wred_params_cvt_to(params, wred_params); return 0; } @@ -3757,7 +3756,7 @@ int odp_tm_node_destroy(odp_tm_node_t tm_node) if (!tm_node_obj) return -1;
- tm_system = &odp_tm_systems[tm_node_obj->tm_idx]; + tm_system = &tm_glb->system[tm_node_obj->tm_idx]; if (!tm_system) return -1;
@@ -3820,14 +3819,14 @@ int odp_tm_node_shaper_config(odp_tm_node_t tm_node, if (!tm_node_obj) return -1;
- tm_system = &odp_tm_systems[tm_node_obj->tm_idx]; + tm_system = &tm_glb->system[tm_node_obj->tm_idx]; if (!tm_system) return -1;
- odp_ticketlock_lock(&tm_profile_lock); + odp_ticketlock_lock(&tm_glb->profile_lock); tm_shaper_config_set(tm_system, shaper_profile, &tm_node_obj->shaper_obj); - odp_ticketlock_unlock(&tm_profile_lock); + odp_ticketlock_unlock(&tm_glb->profile_lock); return 0; }
@@ -3846,10 +3845,10 @@ int odp_tm_node_sched_config(odp_tm_node_t tm_node, if (!child_tm_node_obj) return -1;
- odp_ticketlock_lock(&tm_profile_lock); + odp_ticketlock_lock(&tm_glb->profile_lock); child_shaper_obj = &child_tm_node_obj->shaper_obj; tm_sched_config_set(child_shaper_obj, sched_profile); - odp_ticketlock_unlock(&tm_profile_lock); + odp_ticketlock_unlock(&tm_glb->profile_lock); return 0; }
@@ -3862,9 +3861,9 @@ int odp_tm_node_threshold_config(odp_tm_node_t tm_node, if (!tm_node_obj) return -1;
- odp_ticketlock_lock(&tm_profile_lock); + odp_ticketlock_lock(&tm_glb->profile_lock); tm_threshold_config_set(&tm_node_obj->tm_wred_node, thresholds_profile); - odp_ticketlock_unlock(&tm_profile_lock); + odp_ticketlock_unlock(&tm_glb->profile_lock); return 0; }
@@ -3883,7 +3882,7 @@ int odp_tm_node_wred_config(odp_tm_node_t tm_node,
wred_node = &tm_node_obj->tm_wred_node;
- odp_ticketlock_lock(&tm_profile_lock); + odp_ticketlock_lock(&tm_glb->profile_lock); rc = 0; if (pkt_color == ODP_PACKET_ALL_COLORS) { for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) @@ -3894,7 +3893,7 @@ int odp_tm_node_wred_config(odp_tm_node_t tm_node, rc = -1; }
- odp_ticketlock_unlock(&tm_profile_lock); + odp_ticketlock_unlock(&tm_glb->profile_lock); return rc; }
@@ -4032,7 +4031,7 @@ int odp_tm_queue_destroy(odp_tm_queue_t tm_queue) if (!tm_queue_obj) return -1;
- tm_system = &odp_tm_systems[tm_queue_obj->tm_idx]; + tm_system = &tm_glb->system[tm_queue_obj->tm_idx]; if (!tm_system) return -1;
@@ -4090,14 +4089,14 @@ int odp_tm_queue_shaper_config(odp_tm_queue_t tm_queue, if (!tm_queue_obj) return -1;
- tm_system = &odp_tm_systems[tm_queue_obj->tm_idx]; + tm_system = &tm_glb->system[tm_queue_obj->tm_idx]; if (!tm_system) return -1;
- odp_ticketlock_lock(&tm_profile_lock); + odp_ticketlock_lock(&tm_glb->profile_lock); tm_shaper_config_set(tm_system, shaper_profile, &tm_queue_obj->shaper_obj); - odp_ticketlock_unlock(&tm_profile_lock); + odp_ticketlock_unlock(&tm_glb->profile_lock); return 0; }
@@ -4117,10 +4116,10 @@ int odp_tm_queue_sched_config(odp_tm_node_t tm_node, if (!child_tm_queue_obj) return -1;
- odp_ticketlock_lock(&tm_profile_lock); + odp_ticketlock_lock(&tm_glb->profile_lock); child_shaper_obj = &child_tm_queue_obj->shaper_obj; tm_sched_config_set(child_shaper_obj, sched_profile); - odp_ticketlock_unlock(&tm_profile_lock); + odp_ticketlock_unlock(&tm_glb->profile_lock); return 0; }
@@ -4134,10 +4133,10 @@ int odp_tm_queue_threshold_config(odp_tm_queue_t tm_queue, if (!tm_queue_obj) return -1;
- odp_ticketlock_lock(&tm_profile_lock); + odp_ticketlock_lock(&tm_glb->profile_lock); ret = tm_threshold_config_set(&tm_queue_obj->tm_wred_node, thresholds_profile); - odp_ticketlock_unlock(&tm_profile_lock); + odp_ticketlock_unlock(&tm_glb->profile_lock); return ret; }
@@ -4156,7 +4155,7 @@ int odp_tm_queue_wred_config(odp_tm_queue_t tm_queue,
wred_node = &tm_queue_obj->tm_wred_node;
- odp_ticketlock_lock(&tm_profile_lock); + odp_ticketlock_lock(&tm_glb->profile_lock); rc = 0; if (pkt_color == ODP_PACKET_ALL_COLORS) { for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) @@ -4167,7 +4166,7 @@ int odp_tm_queue_wred_config(odp_tm_queue_t tm_queue, rc = -1; }
- odp_ticketlock_unlock(&tm_profile_lock); + odp_ticketlock_unlock(&tm_glb->profile_lock); return rc; }
@@ -4232,7 +4231,7 @@ int odp_tm_node_connect(odp_tm_node_t src_tm_node, odp_tm_node_t dst_tm_node) if ((!src_tm_node_obj) || src_tm_node_obj->is_root_node) return -1;
- tm_system = &odp_tm_systems[src_tm_node_obj->tm_idx]; + tm_system = &tm_glb->system[src_tm_node_obj->tm_idx]; if (!tm_system) return -1;
@@ -4302,7 +4301,7 @@ int odp_tm_queue_connect(odp_tm_queue_t tm_queue, odp_tm_node_t dst_tm_node) if (!src_tm_queue_obj) return -1;
- tm_system = &odp_tm_systems[src_tm_queue_obj->tm_idx]; + tm_system = &tm_glb->system[src_tm_queue_obj->tm_idx]; if (!tm_system) return -1;
@@ -4366,7 +4365,7 @@ int odp_tm_enq(odp_tm_queue_t tm_queue, odp_packet_t pkt) if (!tm_queue_obj) return -1;
- tm_system = &odp_tm_systems[tm_queue_obj->tm_idx]; + tm_system = &tm_glb->system[tm_queue_obj->tm_idx]; if (!tm_system) return -1;
@@ -4387,7 +4386,7 @@ int odp_tm_enq_with_cnt(odp_tm_queue_t tm_queue, odp_packet_t pkt) if (!tm_queue_obj) return -1;
- tm_system = &odp_tm_systems[tm_queue_obj->tm_idx]; + tm_system = &tm_glb->system[tm_queue_obj->tm_idx]; if (!tm_system) return -1;
@@ -4653,11 +4652,11 @@ int odp_tm_priority_threshold_config(odp_tm_t odp_tm, if (thresholds_profile == ODP_TM_INVALID) return -1;
- odp_ticketlock_lock(&tm_profile_lock); + odp_ticketlock_lock(&tm_glb->profile_lock); tm_system->priority_info[priority].threshold_params = tm_get_profile_params(thresholds_profile, TM_THRESHOLD_PROFILE); - odp_ticketlock_unlock(&tm_profile_lock); + odp_ticketlock_unlock(&tm_glb->profile_lock); return 0; }
@@ -4670,10 +4669,10 @@ int odp_tm_total_threshold_config(odp_tm_t odp_tm, if (thresholds_profile == ODP_TM_INVALID) return -1;
- odp_ticketlock_lock(&tm_profile_lock); + odp_ticketlock_lock(&tm_glb->profile_lock); tm_system->total_info.threshold_params = tm_get_profile_params( thresholds_profile, TM_THRESHOLD_PROFILE); - odp_ticketlock_unlock(&tm_profile_lock); + odp_ticketlock_unlock(&tm_glb->profile_lock); return 0; }
@@ -4737,21 +4736,22 @@ int odp_tm_init_global(void) memset(tm_glb, 0, sizeof(tm_global_t));
tm_glb->shm = shm; + tm_glb->main_thread_cpu = -1;
odp_ticketlock_init(&tm_glb->queue_obj.lock); odp_ticketlock_init(&tm_glb->node_obj.lock); odp_ticketlock_init(&tm_glb->system_group.lock); - odp_ticketlock_init(&tm_create_lock); - odp_ticketlock_init(&tm_profile_lock); + odp_ticketlock_init(&tm_glb->create_lock); + odp_ticketlock_init(&tm_glb->profile_lock); odp_ticketlock_init(&tm_glb->profile_tbl.sched.lock); odp_ticketlock_init(&tm_glb->profile_tbl.shaper.lock); odp_ticketlock_init(&tm_glb->profile_tbl.threshold.lock); odp_ticketlock_init(&tm_glb->profile_tbl.wred.lock); - odp_barrier_init(&tm_first_enq, 2); + odp_barrier_init(&tm_glb->first_enq, 2);
- odp_atomic_init_u64(&atomic_request_cnt, 0); - odp_atomic_init_u64(¤tly_serving_cnt, 0); - odp_atomic_init_u64(&atomic_done_cnt, 0); + odp_atomic_init_u64(&tm_glb->atomic_request_cnt, 0); + odp_atomic_init_u64(&tm_glb->currently_serving_cnt, 0); + odp_atomic_init_u64(&tm_glb->atomic_done_cnt, 0); return 0; }
commit 94e47dc62e340818b91c471788c29af3ba167d96 Author: Matias Elo matias.elo@nokia.com Date: Fri Nov 23 13:55:55 2018 +0200
linux-gen: traffic_mngr: don't reserve memory if tm is disabled
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c index 2b7e58f7..e29cb3cd 100644 --- a/platform/linux-generic/odp_traffic_mngr.c +++ b/platform/linux-generic/odp_traffic_mngr.c @@ -2918,6 +2918,11 @@ odp_tm_t odp_tm_create(const char *name, uint32_t max_tm_queues, max_sorted_lists; int rc;
+ if (odp_global_ro.init_param.not_used.feat.tm) { + ODP_ERR("TM has been disabled\n"); + return ODP_TM_INVALID; + } + /* If we are using pktio output (usual case) get the first associated * pktout_queue for this pktio and fail if there isn't one. */ @@ -4719,6 +4724,11 @@ int odp_tm_init_global(void) { odp_shm_t shm;
+ if (odp_global_ro.init_param.not_used.feat.tm) { + ODP_DBG("TM disabled\n"); + return 0; + } + shm = odp_shm_reserve("_odp_traffic_mng", sizeof(tm_global_t), 0, 0); if (shm == ODP_SHM_INVALID) return -1; @@ -4747,6 +4757,9 @@ int odp_tm_init_global(void)
int odp_tm_term_global(void) { + if (odp_global_ro.init_param.not_used.feat.tm) + return 0; + if (odp_shm_free(tm_glb->shm)) { ODP_ERR("shm free failed\n"); return -1;
commit 92e59d9e816a99db318ba24dcb12cb55f2e7392d Author: Matias Elo matias.elo@nokia.com Date: Fri Nov 23 13:32:28 2018 +0200
linux-gen: traffic_mngr: allocate profile objects from shm
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h b/platform/linux-generic/include/odp_traffic_mngr_internal.h index 587f9d1a..040bb117 100644 --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h @@ -83,13 +83,6 @@ typedef uint64_t tm_handle_t; #define PF_REACHED_EGRESS 0x40 #define PF_ERROR 0x80
-typedef struct { - uint32_t num_allocd; - uint32_t num_used; - uint32_t num_freed; - void **array_ptrs; /* Ptr to an array of num_allocd void * ptrs. */ -} dynamic_tbl_t; - #define ODP_TM_NUM_PROFILES 4
typedef enum { @@ -115,6 +108,7 @@ typedef struct { _odp_int_name_t name_tbl_id; odp_tm_threshold_t thresholds_profile; uint32_t ref_cnt; + tm_status_t status; } tm_queue_thresholds_t;
typedef struct { @@ -127,6 +121,7 @@ typedef struct { odp_tm_percent_t max_drop_prob; odp_bool_t enable_wred; odp_bool_t use_byte_fullness; + tm_status_t status; } tm_wred_params_t;
typedef struct { @@ -165,6 +160,7 @@ typedef struct { uint32_t ref_cnt; odp_tm_sched_mode_t sched_modes[ODP_TM_MAX_PRIORITIES]; uint16_t inverted_weights[ODP_TM_MAX_PRIORITIES]; + tm_status_t status; } tm_sched_params_t;
typedef enum { @@ -200,6 +196,7 @@ typedef struct { int8_t len_adjust; odp_bool_t dual_rate; odp_bool_t enabled; + tm_status_t status; } tm_shaper_params_t;
typedef enum { NO_CALLBACK, UNDELAY_PKT } tm_shaper_callback_reason_t; diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c index 416e7376..2b7e58f7 100644 --- a/platform/linux-generic/odp_traffic_mngr.c +++ b/platform/linux-generic/odp_traffic_mngr.c @@ -81,6 +81,30 @@ static tm_prop_t basic_prop_tbl[MAX_PRIORITIES][NUM_SHAPER_COLORS] = { [ODP_TM_SHAPER_RED] = { 7, DELAY_PKT } } };
+#define MAX_SHAPER_PROFILES 128 +#define MAX_SCHED_PROFILES 128 +#define MAX_THRESHOLD_PROFILES 128 +#define MAX_WRED_PROFILES 128 + +typedef struct { + struct { + tm_shaper_params_t profile[MAX_SHAPER_PROFILES]; + odp_ticketlock_t lock; + } shaper; + struct { + tm_sched_params_t profile[MAX_SCHED_PROFILES]; + odp_ticketlock_t lock; + } sched; + struct { + tm_queue_thresholds_t profile[MAX_THRESHOLD_PROFILES]; + odp_ticketlock_t lock; + } threshold; + struct { + tm_wred_params_t profile[MAX_WRED_PROFILES]; + odp_ticketlock_t lock; + } wred; +} profile_tbl_t; + typedef struct { struct { tm_system_group_t group[ODP_TM_MAX_NUM_SYSTEMS]; @@ -95,14 +119,13 @@ typedef struct { odp_ticketlock_t lock; } node_obj;
+ profile_tbl_t profile_tbl; + odp_shm_t shm; } tm_global_t;
static tm_global_t *tm_glb;
-/* Profile tables. */ -static dynamic_tbl_t odp_tm_profile_tbls[ODP_TM_NUM_PROFILES]; - /* TM systems table. */ static tm_system_t odp_tm_systems[ODP_TM_MAX_NUM_SYSTEMS];
@@ -245,62 +268,125 @@ static odp_bool_t tm_random_drop(tm_random_data_t *tm_random_data, return drop; }
-static void *alloc_entry_in_dynamic_tbl(dynamic_tbl_t *dynamic_tbl, - uint32_t record_size, - uint32_t *dynamic_idx_ptr) +static void *alloc_entry_in_tbl(profile_tbl_t *profile_tbl, + profile_kind_t profile_kind, + uint32_t *idx) { - uint32_t num_allocd, new_num_allocd, idx; - void **new_array_ptrs, *new_record; - - num_allocd = dynamic_tbl->num_allocd; - if (num_allocd <= dynamic_tbl->num_used) { - /* Need to alloc or realloc the array of ptrs. */ - if (num_allocd <= 32) - new_num_allocd = 64; - else - new_num_allocd = 4 * num_allocd; + uint32_t i;
- new_array_ptrs = malloc(new_num_allocd * sizeof(void *)); - memset(new_array_ptrs, 0, new_num_allocd * sizeof(void *)); + switch (profile_kind) { + case TM_SHAPER_PROFILE: { + tm_shaper_params_t *profile = NULL;
- if (dynamic_tbl->num_used != 0) - memcpy(new_array_ptrs, dynamic_tbl->array_ptrs, - dynamic_tbl->num_used * sizeof(void *)); + odp_ticketlock_lock(&profile_tbl->shaper.lock); + for (i = 0; i < MAX_SHAPER_PROFILES; i++) { + if (profile_tbl->shaper.profile[i].status != + TM_STATUS_FREE) + continue;
- if (dynamic_tbl->array_ptrs) - free(dynamic_tbl->array_ptrs); + profile = &profile_tbl->shaper.profile[i]; + memset(profile, 0, sizeof(tm_shaper_params_t)); + profile->status = TM_STATUS_RESERVED; + *idx = i; + break; + } + odp_ticketlock_unlock(&profile_tbl->shaper.lock); + return profile; + } + case TM_SCHED_PROFILE: { + tm_sched_params_t *profile = NULL; + + odp_ticketlock_lock(&profile_tbl->sched.lock); + for (i = 0; i < MAX_SCHED_PROFILES; i++) { + if (profile_tbl->sched.profile[i].status != + TM_STATUS_FREE) + continue; + + profile = &profile_tbl->sched.profile[i]; + memset(profile, 0, sizeof(tm_sched_params_t)); + profile->status = TM_STATUS_RESERVED; + *idx = i; + break; + } + odp_ticketlock_unlock(&profile_tbl->sched.lock); + return profile; + } + case TM_THRESHOLD_PROFILE: { + tm_queue_thresholds_t *profile = NULL; + + odp_ticketlock_lock(&profile_tbl->threshold.lock); + for (i = 0; i < MAX_THRESHOLD_PROFILES; i++) { + if (profile_tbl->threshold.profile[i].status != + TM_STATUS_FREE) + continue; + + profile = &profile_tbl->threshold.profile[i]; + memset(profile, 0, sizeof(tm_queue_thresholds_t)); + profile->status = TM_STATUS_RESERVED; + *idx = i; + break; + } + odp_ticketlock_unlock(&profile_tbl->threshold.lock); + return profile; + } + case TM_WRED_PROFILE: { + tm_wred_params_t *profile = NULL; + + odp_ticketlock_lock(&profile_tbl->wred.lock); + for (i = 0; i < MAX_WRED_PROFILES; i++) { + if (profile_tbl->wred.profile[i].status != + TM_STATUS_FREE) + continue; + + profile = &profile_tbl->wred.profile[i]; + memset(profile, 0, sizeof(tm_wred_params_t)); + profile->status = TM_STATUS_RESERVED; + *idx = i; + break; + } + odp_ticketlock_unlock(&profile_tbl->wred.lock); + return profile; + } + default: + ODP_ERR("Invalid TM profile\n"); + return NULL;
- dynamic_tbl->num_allocd = new_num_allocd; - dynamic_tbl->array_ptrs = new_array_ptrs; } +}
- idx = dynamic_tbl->num_used; - new_record = malloc(record_size); - memset(new_record, 0, record_size); +static void free_tbl_entry(profile_tbl_t *profile_tbl, + profile_kind_t profile_kind, + uint32_t idx) +{ + switch (profile_kind) { + case TM_SHAPER_PROFILE: + odp_ticketlock_lock(&profile_tbl->shaper.lock); + profile_tbl->shaper.profile[idx].status = TM_STATUS_RESERVED; + odp_ticketlock_unlock(&profile_tbl->shaper.lock); + return;
- dynamic_tbl->array_ptrs[idx] = new_record; - dynamic_tbl->num_used++; - if (dynamic_idx_ptr) - *dynamic_idx_ptr = idx; + case TM_SCHED_PROFILE: + odp_ticketlock_lock(&profile_tbl->sched.lock); + profile_tbl->sched.profile[idx].status = TM_STATUS_RESERVED; + odp_ticketlock_unlock(&profile_tbl->sched.lock); + return;
- return new_record; -} + case TM_THRESHOLD_PROFILE: + odp_ticketlock_lock(&profile_tbl->threshold.lock); + profile_tbl->threshold.profile[idx].status = TM_STATUS_RESERVED; + odp_ticketlock_unlock(&profile_tbl->threshold.lock); + return;
-static void free_dynamic_tbl_entry(dynamic_tbl_t *dynamic_tbl, - uint32_t record_size ODP_UNUSED, - uint32_t dynamic_idx) -{ - void *record; + case TM_WRED_PROFILE: + odp_ticketlock_lock(&profile_tbl->wred.lock); + profile_tbl->wred.profile[idx].status = TM_STATUS_RESERVED; + odp_ticketlock_unlock(&profile_tbl->wred.lock); + return; + + default: + ODP_ERR("Invalid TM profile\n"); + return;
- record = dynamic_tbl->array_ptrs[dynamic_idx]; - if (record) { - free(record); - dynamic_tbl->array_ptrs[dynamic_idx] = NULL; - dynamic_tbl->num_freed++; - if (dynamic_tbl->num_freed == dynamic_tbl->num_used) { - free(dynamic_tbl->array_ptrs); - memset(dynamic_tbl, 0, sizeof(dynamic_tbl_t)); - } } }
@@ -405,35 +491,33 @@ static void tm_system_free(tm_system_t *tm_system)
static void *tm_common_profile_create(const char *name, profile_kind_t profile_kind, - uint32_t object_size, tm_handle_t *profile_handle_ptr, _odp_int_name_t *name_tbl_id_ptr) { _odp_int_name_kind_t handle_kind; _odp_int_name_t name_tbl_id; - dynamic_tbl_t *dynamic_tbl; tm_handle_t profile_handle; - uint32_t dynamic_tbl_idx; + uint32_t idx; void *object_ptr;
- /* Note that alloc_entry_in_dynamic_tbl will zero out all of the memory - * that it allocates, so an additional memset here is unnnecessary. */ - dynamic_tbl = &odp_tm_profile_tbls[profile_kind]; - object_ptr = alloc_entry_in_dynamic_tbl(dynamic_tbl, object_size, - &dynamic_tbl_idx); - if (!object_ptr) + /* Note that alloc_entry_in_tbl will zero out all of the memory that it + * allocates, so an additional memset here is unnecessary. */ + object_ptr = alloc_entry_in_tbl(&tm_glb->profile_tbl, profile_kind, + &idx); + if (!object_ptr) { + ODP_ERR("No free profiles left\n"); return NULL; + }
handle_kind = PROFILE_TO_HANDLE_KIND[profile_kind]; - profile_handle = MAKE_PROFILE_HANDLE(profile_kind, dynamic_tbl_idx); + profile_handle = MAKE_PROFILE_HANDLE(profile_kind, idx); name_tbl_id = ODP_INVALID_NAME;
if ((name != NULL) && (name[0] != '\0')) { name_tbl_id = _odp_int_name_tbl_add(name, handle_kind, profile_handle); if (name_tbl_id == ODP_INVALID_NAME) { - free_dynamic_tbl_entry(dynamic_tbl, object_size, - dynamic_tbl_idx); + free_tbl_entry(&tm_glb->profile_tbl, profile_kind, idx); return NULL; } } @@ -445,20 +529,18 @@ static void *tm_common_profile_create(const char *name, }
static int tm_common_profile_destroy(tm_handle_t profile_handle, - uint32_t object_size, _odp_int_name_t name_tbl_id) { profile_kind_t profile_kind; - dynamic_tbl_t *dynamic_tbl; - uint32_t dynamic_tbl_idx; + uint32_t idx;
if (name_tbl_id != ODP_INVALID_NAME) _odp_int_name_tbl_delete(name_tbl_id);
- profile_kind = GET_PROFILE_KIND(profile_handle); - dynamic_tbl = &odp_tm_profile_tbls[profile_kind]; - dynamic_tbl_idx = GET_TBL_IDX(profile_handle); - free_dynamic_tbl_entry(dynamic_tbl, object_size, dynamic_tbl_idx); + profile_kind = GET_PROFILE_KIND(profile_handle); + idx = GET_TBL_IDX(profile_handle); + free_tbl_entry(&tm_glb->profile_tbl, profile_kind, idx); + return 0; }
@@ -466,16 +548,31 @@ static void *tm_get_profile_params(tm_handle_t profile_handle, profile_kind_t expected_profile_kind) { profile_kind_t profile_kind; - dynamic_tbl_t *dynamic_tbl; - uint32_t dynamic_tbl_idx; + uint32_t idx;
profile_kind = GET_PROFILE_KIND(profile_handle); if (profile_kind != expected_profile_kind) return NULL;
- dynamic_tbl = &odp_tm_profile_tbls[profile_kind]; - dynamic_tbl_idx = GET_TBL_IDX(profile_handle); - return dynamic_tbl->array_ptrs[dynamic_tbl_idx]; + idx = GET_TBL_IDX(profile_handle); + + switch (profile_kind) { + case TM_SHAPER_PROFILE: + return &tm_glb->profile_tbl.shaper.profile[idx]; + + case TM_SCHED_PROFILE: + return &tm_glb->profile_tbl.sched.profile[idx]; + + case TM_THRESHOLD_PROFILE: + return &tm_glb->profile_tbl.threshold.profile[idx]; + + case TM_WRED_PROFILE: + return &tm_glb->profile_tbl.wred.profile[idx]; + + default: + ODP_ERR("Invalid TM profile\n"); + return NULL; + } }
static uint64_t tm_bps_to_rate(uint64_t bps) @@ -3116,7 +3213,6 @@ odp_tm_shaper_t odp_tm_shaper_create(const char *name, _odp_int_name_t name_tbl_id;
profile_obj = tm_common_profile_create(name, TM_SHAPER_PROFILE, - sizeof(tm_shaper_params_t), &shaper_handle, &name_tbl_id); if (!profile_obj) return ODP_TM_INVALID; @@ -3142,7 +3238,6 @@ int odp_tm_shaper_destroy(odp_tm_shaper_t shaper_profile) return -1;
return tm_common_profile_destroy(shaper_profile, - sizeof(tm_shaper_params_t), profile_obj->name_tbl_id); }
@@ -3243,7 +3338,6 @@ odp_tm_sched_t odp_tm_sched_create(const char *name, odp_tm_sched_t sched_handle;
profile_obj = tm_common_profile_create(name, TM_SCHED_PROFILE, - sizeof(tm_sched_params_t), &sched_handle, &name_tbl_id); if (!profile_obj) return ODP_TM_INVALID; @@ -3269,7 +3363,6 @@ int odp_tm_sched_destroy(odp_tm_sched_t sched_profile) return -1;
return tm_common_profile_destroy(sched_profile, - sizeof(tm_sched_params_t), profile_obj->name_tbl_id); }
@@ -3335,7 +3428,6 @@ odp_tm_threshold_t odp_tm_threshold_create(const char *name, _odp_int_name_t name_tbl_id;
profile_obj = tm_common_profile_create(name, TM_THRESHOLD_PROFILE, - sizeof(tm_queue_thresholds_t), &threshold_handle, &name_tbl_id); if (!profile_obj) return ODP_TM_INVALID; @@ -3364,7 +3456,6 @@ int odp_tm_threshold_destroy(odp_tm_threshold_t threshold_profile) return -1;
return tm_common_profile_destroy(threshold_profile, - sizeof(odp_tm_threshold_params_t), threshold_params->name_tbl_id); }
@@ -3462,7 +3553,6 @@ odp_tm_wred_t odp_tm_wred_create(const char *name, odp_tm_wred_params_t *params) _odp_int_name_t name_tbl_id;
profile_obj = tm_common_profile_create(name, TM_WRED_PROFILE, - sizeof(tm_wred_params_t), &wred_handle, &name_tbl_id);
if (!profile_obj) @@ -3489,7 +3579,6 @@ int odp_tm_wred_destroy(odp_tm_wred_t wred_profile) return -1;
return tm_common_profile_destroy(wred_profile, - sizeof(tm_wred_params_t), ODP_INVALID_NAME); }
@@ -4644,6 +4733,10 @@ int odp_tm_init_global(void) odp_ticketlock_init(&tm_glb->system_group.lock); odp_ticketlock_init(&tm_create_lock); odp_ticketlock_init(&tm_profile_lock); + odp_ticketlock_init(&tm_glb->profile_tbl.sched.lock); + odp_ticketlock_init(&tm_glb->profile_tbl.shaper.lock); + odp_ticketlock_init(&tm_glb->profile_tbl.threshold.lock); + odp_ticketlock_init(&tm_glb->profile_tbl.wred.lock); odp_barrier_init(&tm_first_enq, 2);
odp_atomic_init_u64(&atomic_request_cnt, 0);
commit b1812f17ae652f11ce21f26fd24c8fd27818339b Author: Matias Elo matias.elo@nokia.com Date: Thu Nov 22 14:37:28 2018 +0200
linux-gen: traffic_mngr: store input_work_queue inside tm_system_t
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h b/platform/linux-generic/include/odp_traffic_mngr_internal.h index ca321ed7..587f9d1a 100644 --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h @@ -377,7 +377,7 @@ struct tm_system_s { void *trace_buffer; uint32_t next_queue_num; tm_queue_obj_t *queue_num_tbl[ODP_TM_MAX_TM_QUEUES]; - input_work_queue_t *input_work_queue; + input_work_queue_t input_work_queue; tm_queue_cnts_t priority_queue_cnts; tm_queue_cnts_t total_queue_cnts; pkt_desc_t egress_pkt_desc; diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c index f928d468..416e7376 100644 --- a/platform/linux-generic/odp_traffic_mngr.c +++ b/platform/linux-generic/odp_traffic_mngr.c @@ -304,15 +304,11 @@ static void free_dynamic_tbl_entry(dynamic_tbl_t *dynamic_tbl, } }
-static input_work_queue_t *input_work_queue_create(void) +static void input_work_queue_init(input_work_queue_t *input_work_queue) { - input_work_queue_t *input_work_queue; - - input_work_queue = malloc(sizeof(input_work_queue_t)); memset(input_work_queue, 0, sizeof(input_work_queue_t)); odp_atomic_init_u64(&input_work_queue->queue_cnt, 0); odp_ticketlock_init(&input_work_queue->lock); - return input_work_queue; }
static void input_work_queue_destroy(input_work_queue_t *input_work_queue) @@ -322,7 +318,7 @@ static void input_work_queue_destroy(input_work_queue_t *input_work_queue) * stopped new tm_enq() (et al) calls from succeeding. */ odp_ticketlock_lock(&input_work_queue->lock); - free(input_work_queue); + memset(input_work_queue, 0, sizeof(input_work_queue_t)); }
static int input_work_queue_append(tm_system_t *tm_system, @@ -332,7 +328,7 @@ static int input_work_queue_append(tm_system_t *tm_system, input_work_item_t *entry_ptr; uint32_t queue_cnt, tail_idx;
- input_work_queue = tm_system->input_work_queue; + input_work_queue = &tm_system->input_work_queue; queue_cnt = odp_atomic_load_u64(&input_work_queue->queue_cnt); if (INPUT_WORK_RING_SIZE <= queue_cnt) { input_work_queue->enqueue_fail_cnt++; @@ -2373,7 +2369,7 @@ static void *tm_system_thread(void *arg)
tm_system = tm_group->first_tm_system; _odp_int_timer_wheel = tm_system->_odp_int_timer_wheel; - input_work_queue = tm_system->input_work_queue; + input_work_queue = &tm_system->input_work_queue;
/* Wait here until we have seen the first enqueue operation. */ odp_barrier_wait(&tm_group->tm_group_barrier); @@ -2427,7 +2423,7 @@ static void *tm_system_thread(void *arg) /* Advance to the next tm_system in the tm_system_group. */ tm_system = tm_system->next; _odp_int_timer_wheel = tm_system->_odp_int_timer_wheel; - input_work_queue = tm_system->input_work_queue; + input_work_queue = &tm_system->input_work_queue; }
odp_barrier_wait(&tm_system->tm_system_destroy_barrier); @@ -2897,10 +2893,7 @@ odp_tm_t odp_tm_create(const char *name, == _ODP_INT_TIMER_WHEEL_INVALID; }
- if (create_fail == 0) { - tm_system->input_work_queue = input_work_queue_create(); - create_fail |= !tm_system->input_work_queue; - } + input_work_queue_init(&tm_system->input_work_queue);
if (create_fail == 0) { /* Pass any odp_groups or hints to tm_group_attach here. */ @@ -2911,8 +2904,6 @@ odp_tm_t odp_tm_create(const char *name,
if (create_fail) { _odp_int_name_tbl_delete(name_tbl_id); - if (tm_system->input_work_queue) - input_work_queue_destroy(tm_system->input_work_queue);
if (tm_system->_odp_int_sorted_pool != _ODP_INT_SORTED_POOL_INVALID) @@ -2980,7 +2971,7 @@ int odp_tm_destroy(odp_tm_t odp_tm) * allocated by this group. */ _odp_tm_group_remove(tm_system->odp_tm_group, odp_tm);
- input_work_queue_destroy(tm_system->input_work_queue); + input_work_queue_destroy(&tm_system->input_work_queue); _odp_sorted_pool_destroy(tm_system->_odp_int_sorted_pool); _odp_queue_pool_destroy(tm_system->_odp_int_queue_pool); _odp_timer_wheel_destroy(tm_system->_odp_int_timer_wheel); @@ -4600,7 +4591,7 @@ void odp_tm_stats_print(odp_tm_t odp_tm) uint32_t queue_num, max_queue_num;
tm_system = GET_TM_SYSTEM(odp_tm); - input_work_queue = tm_system->input_work_queue; + input_work_queue = &tm_system->input_work_queue;
ODP_PRINT("odp_tm_stats_print - tm_system=0x%" PRIX64 " tm_idx=%u\n", odp_tm, tm_system->tm_idx);
commit 23102db002f522cc90d1b616e2725d21e525b1fc Author: Matias Elo matias.elo@nokia.com Date: Thu Nov 22 14:07:16 2018 +0200
linux-gen: traffic_mngr: store root node inside tm_system_t
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h b/platform/linux-generic/include/odp_traffic_mngr_internal.h index 53b5e944..ca321ed7 100644 --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h @@ -386,7 +386,7 @@ struct tm_system_s { _odp_timer_wheel_t _odp_int_timer_wheel; _odp_int_sorted_pool_t _odp_int_sorted_pool;
- tm_node_obj_t *root_node; + tm_node_obj_t root_node; odp_tm_egress_t egress; odp_tm_requirements_t requirements; odp_tm_capabilities_t capabilities; diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c index 1c8be7a7..f928d468 100644 --- a/platform/linux-generic/odp_traffic_mngr.c +++ b/platform/linux-generic/odp_traffic_mngr.c @@ -404,9 +404,6 @@ static tm_system_t *tm_system_alloc(void)
static void tm_system_free(tm_system_t *tm_system) { - if (tm_system->root_node) - free(tm_system->root_node); - odp_tm_systems[tm_system->tm_idx].status = TM_STATUS_FREE; }
@@ -2456,19 +2453,6 @@ void odp_tm_egress_init(odp_tm_egress_t *egress) memset(egress, 0, sizeof(odp_tm_egress_t)); }
-static tm_node_obj_t *create_dummy_root_node(void) -{ - tm_node_obj_t *tm_node_obj; - - tm_node_obj = malloc(sizeof(tm_node_obj_t)); - if (!tm_node_obj) - return NULL; - - memset(tm_node_obj, 0, sizeof(tm_node_obj_t)); - tm_node_obj->is_root_node = true; - return tm_node_obj; -} - int odp_tm_capabilities(odp_tm_capabilities_t capabilities[] ODP_UNUSED, uint32_t capabilities_size) { @@ -2877,6 +2861,7 @@ odp_tm_t odp_tm_create(const char *name, &tm_system->requirements);
tm_system->next_queue_num = 1; + tm_system->root_node.is_root_node = true;
tm_init_random_data(&tm_system->tm_random_data);
@@ -2912,11 +2897,6 @@ odp_tm_t odp_tm_create(const char *name, == _ODP_INT_TIMER_WHEEL_INVALID; }
- if (create_fail == 0) { - tm_system->root_node = create_dummy_root_node(); - create_fail |= tm_system->root_node == NULL; - } - if (create_fail == 0) { tm_system->input_work_queue = input_work_queue_create(); create_fail |= !tm_system->input_work_queue; @@ -4173,7 +4153,9 @@ int odp_tm_node_connect(odp_tm_node_t src_tm_node, odp_tm_node_t dst_tm_node)
src_tm_wred_node = &src_tm_node_obj->tm_wred_node; if (dst_tm_node == ODP_TM_ROOT) { - src_tm_node_obj->shaper_obj.next_tm_node = tm_system->root_node; + tm_node_obj_t *root_node = &tm_system->root_node; + + src_tm_node_obj->shaper_obj.next_tm_node = root_node; src_tm_wred_node->next_tm_wred_node = NULL; return 0; } @@ -4241,7 +4223,7 @@ int odp_tm_queue_connect(odp_tm_queue_t tm_queue, odp_tm_node_t dst_tm_node)
src_tm_wred_node = &src_tm_queue_obj->tm_wred_node; if (dst_tm_node == ODP_TM_ROOT) { - root_node = tm_system->root_node; + root_node = &tm_system->root_node; src_tm_queue_obj->shaper_obj.next_tm_node = root_node; src_tm_wred_node->next_tm_wred_node = NULL; return 0;
commit 049d80427d0145a3c1738d28ba595717ae43d5c2 Author: Matias Elo matias.elo@nokia.com Date: Thu Nov 22 13:41:55 2018 +0200
linux-gen: traffic_mngr: store tm_queue_obj_t array inside tm_system_t
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h b/platform/linux-generic/include/odp_traffic_mngr_internal.h index eb8a3d53..53b5e944 100644 --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h @@ -376,7 +376,7 @@ struct tm_system_s {
void *trace_buffer; uint32_t next_queue_num; - tm_queue_obj_t **queue_num_tbl; + tm_queue_obj_t *queue_num_tbl[ODP_TM_MAX_TM_QUEUES]; input_work_queue_t *input_work_queue; tm_queue_cnts_t priority_queue_cnts; tm_queue_cnts_t total_queue_cnts; diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c index 0fb6088f..1c8be7a7 100644 --- a/platform/linux-generic/odp_traffic_mngr.c +++ b/platform/linux-generic/odp_traffic_mngr.c @@ -407,9 +407,6 @@ static void tm_system_free(tm_system_t *tm_system) if (tm_system->root_node) free(tm_system->root_node);
- if (tm_system->queue_num_tbl) - free(tm_system->queue_num_tbl); - odp_tm_systems[tm_system->tm_idx].status = TM_STATUS_FREE; }
@@ -2840,7 +2837,7 @@ odp_tm_t odp_tm_create(const char *name, odp_bool_t create_fail; odp_tm_t odp_tm; odp_pktout_queue_t pktout; - uint32_t malloc_len, max_num_queues, max_queued_pkts, max_timers; + uint32_t max_num_queues, max_queued_pkts, max_timers; uint32_t max_tm_queues, max_sorted_lists; int rc;
@@ -2879,9 +2876,6 @@ odp_tm_t odp_tm_create(const char *name, tm_system_capabilities_set(&tm_system->capabilities, &tm_system->requirements);
- malloc_len = max_tm_queues * sizeof(tm_queue_obj_t *); - tm_system->queue_num_tbl = malloc(malloc_len); - memset(tm_system->queue_num_tbl, 0, malloc_len); tm_system->next_queue_num = 1;
tm_init_random_data(&tm_system->tm_random_data);
commit 46d507adef3902a26b7e311506437211e7417a10 Author: Matias Elo matias.elo@nokia.com Date: Thu Nov 22 13:28:08 2018 +0200
linux-gen: traffic_mngr: allocate tm_node_obj_t from shm
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h b/platform/linux-generic/include/odp_traffic_mngr_internal.h index 4289e5e0..eb8a3d53 100644 --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h @@ -307,6 +307,7 @@ struct tm_node_obj_s { uint8_t level; /* Primarily for debugging */ uint8_t tm_idx; uint8_t marked; + tm_status_t status; };
typedef struct { diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c index 35254096..0fb6088f 100644 --- a/platform/linux-generic/odp_traffic_mngr.c +++ b/platform/linux-generic/odp_traffic_mngr.c @@ -90,6 +90,10 @@ typedef struct { tm_queue_obj_t obj[ODP_TM_MAX_TM_QUEUES]; odp_ticketlock_t lock; } queue_obj; + struct { + tm_node_obj_t obj[ODP_TM_MAX_NUM_TM_NODES]; + odp_ticketlock_t lock; + } node_obj;
odp_shm_t shm; } tm_global_t; @@ -126,6 +130,11 @@ static inline tm_queue_obj_t *tm_qobj_from_index(uint32_t queue_id) return &tm_glb->queue_obj.obj[queue_id]; }
+static inline tm_node_obj_t *tm_nobj_from_index(uint32_t node_id) +{ + return &tm_glb->node_obj.obj[node_id]; +} + static int queue_tm_reenq(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr) { odp_tm_queue_t tm_queue = MAKE_ODP_TM_QUEUE(odp_queue_context(queue)); @@ -3582,34 +3591,50 @@ odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm, tm_schedulers_obj_t *schedulers_obj; _odp_int_name_t name_tbl_id; tm_wred_node_t *tm_wred_node; - tm_node_obj_t *tm_node_obj; + tm_node_obj_t *tm_node_obj = NULL; odp_tm_node_t odp_tm_node; odp_tm_wred_t wred_profile; tm_system_t *tm_system; uint32_t level, num_priorities, priority, color; + uint32_t i;
/* Allocate a tm_node_obj_t record. */ tm_system = GET_TM_SYSTEM(odp_tm); - tm_node_obj = malloc(sizeof(tm_node_obj_t)); - if (!tm_node_obj) - return ODP_TM_INVALID;
- level = params->level; - requirements = &tm_system->requirements.per_level[level]; - num_priorities = requirements->max_priority + 1; + odp_ticketlock_lock(&tm_glb->node_obj.lock);
- odp_tm_node = MAKE_ODP_TM_NODE(tm_node_obj); - name_tbl_id = ODP_INVALID_NAME; - if ((name) && (name[0] != '\0')) { - name_tbl_id = _odp_int_name_tbl_add(name, ODP_TM_NODE_HANDLE, - odp_tm_node); - if (name_tbl_id == ODP_INVALID_NAME) { - free(tm_node_obj); - return ODP_TM_INVALID; + for (i = 0; i < ODP_TM_MAX_NUM_TM_NODES; i++) { + tm_node_obj_t *cur_node_obj = tm_nobj_from_index(i); + + if (cur_node_obj->status != TM_STATUS_FREE) + continue; + + level = params->level; + requirements = &tm_system->requirements.per_level[level]; + num_priorities = requirements->max_priority + 1; + + odp_tm_node = MAKE_ODP_TM_NODE(cur_node_obj); + name_tbl_id = ODP_INVALID_NAME; + if ((name) && (name[0] != '\0')) { + name_tbl_id = _odp_int_name_tbl_add(name, + ODP_TM_NODE_HANDLE, + odp_tm_node); + if (name_tbl_id == ODP_INVALID_NAME) + break; } + tm_node_obj = cur_node_obj; + + memset(tm_node_obj, 0, sizeof(tm_node_obj_t)); + tm_node_obj->status = TM_STATUS_RESERVED; + + break; }
- memset(tm_node_obj, 0, sizeof(tm_node_obj_t)); + odp_ticketlock_unlock(&tm_glb->node_obj.lock); + + if (!tm_node_obj) + return ODP_TM_INVALID; + tm_node_obj->user_context = params->user_context; tm_node_obj->name_tbl_id = name_tbl_id; tm_node_obj->max_fanin = params->max_fanin; @@ -3718,7 +3743,10 @@ int odp_tm_node_destroy(odp_tm_node_t tm_node) return rc; }
- free(tm_node_obj); + odp_ticketlock_lock(&tm_glb->node_obj.lock); + tm_node_obj->status = TM_STATUS_FREE; + odp_ticketlock_unlock(&tm_glb->node_obj.lock); + odp_ticketlock_unlock(&tm_system->tm_system_lock); return 0; } @@ -4645,6 +4673,7 @@ int odp_tm_init_global(void) tm_glb->shm = shm;
odp_ticketlock_init(&tm_glb->queue_obj.lock); + odp_ticketlock_init(&tm_glb->node_obj.lock); odp_ticketlock_init(&tm_glb->system_group.lock); odp_ticketlock_init(&tm_create_lock); odp_ticketlock_init(&tm_profile_lock);
commit e7ad8003e34195a3900e1dd3d3a93235896d7628 Author: Matias Elo matias.elo@nokia.com Date: Thu Nov 22 12:39:26 2018 +0200
linux-gen: traffic_mngr: move tm_wred_node_t inside tm_node_obj_t
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h b/platform/linux-generic/include/odp_traffic_mngr_internal.h index 3dc212c2..4289e5e0 100644 --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h @@ -293,7 +293,7 @@ struct tm_queue_obj_s {
struct tm_node_obj_s { void *user_context; - tm_wred_node_t *tm_wred_node; + tm_wred_node_t tm_wred_node; tm_schedulers_obj_t schedulers_obj; tm_shaper_obj_t *fanin_list_head; tm_shaper_obj_t *fanin_list_tail; diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c index 363ff7e2..35254096 100644 --- a/platform/linux-generic/odp_traffic_mngr.c +++ b/platform/linux-generic/odp_traffic_mngr.c @@ -3594,12 +3594,6 @@ odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm, if (!tm_node_obj) return ODP_TM_INVALID;
- tm_wred_node = malloc(sizeof(tm_wred_node_t)); - if (!tm_wred_node) { - free(tm_node_obj); - return ODP_TM_INVALID; - } - level = params->level; requirements = &tm_system->requirements.per_level[level]; num_priorities = requirements->max_priority + 1; @@ -3610,21 +3604,20 @@ odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm, name_tbl_id = _odp_int_name_tbl_add(name, ODP_TM_NODE_HANDLE, odp_tm_node); if (name_tbl_id == ODP_INVALID_NAME) { - free(tm_wred_node); free(tm_node_obj); return ODP_TM_INVALID; } }
memset(tm_node_obj, 0, sizeof(tm_node_obj_t)); - memset(tm_wred_node, 0, sizeof(tm_wred_node_t)); tm_node_obj->user_context = params->user_context; tm_node_obj->name_tbl_id = name_tbl_id; tm_node_obj->max_fanin = params->max_fanin; tm_node_obj->is_root_node = false; tm_node_obj->level = params->level; tm_node_obj->tm_idx = tm_system->tm_idx; - tm_node_obj->tm_wred_node = tm_wred_node; + + tm_wred_node = &tm_node_obj->tm_wred_node; odp_ticketlock_init(&tm_wred_node->tm_wred_node_lock);
schedulers_obj = &tm_node_obj->schedulers_obj; @@ -3698,16 +3691,14 @@ int odp_tm_node_destroy(odp_tm_node_t tm_node) if (shaper_obj->shaper_params != NULL) return -1;
- tm_wred_node = tm_node_obj->tm_wred_node; - if (tm_wred_node != NULL) { - if (tm_wred_node->threshold_params != NULL) - return -1; + tm_wred_node = &tm_node_obj->tm_wred_node; + if (tm_wred_node->threshold_params != NULL) + return -1;
- for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) { - wred_params = tm_wred_node->wred_params[color]; - if (wred_params != NULL) - return -1; - } + for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) { + wred_params = tm_wred_node->wred_params[color]; + if (wred_params != NULL) + return -1; }
/* Now that all of the checks are done, time to so some freeing. */ @@ -3715,9 +3706,6 @@ int odp_tm_node_destroy(odp_tm_node_t tm_node) if (tm_node_obj->name_tbl_id != ODP_INVALID_NAME) _odp_int_name_tbl_delete(tm_node_obj->name_tbl_id);
- if (tm_node_obj->tm_wred_node != NULL) - free(tm_node_obj->tm_wred_node); - schedulers_obj = &tm_node_obj->schedulers_obj; num_priorities = schedulers_obj->num_priorities; for (priority = 0; priority < num_priorities; priority++) { @@ -3784,11 +3772,11 @@ int odp_tm_node_threshold_config(odp_tm_node_t tm_node, tm_node_obj_t *tm_node_obj;
tm_node_obj = GET_TM_NODE_OBJ(tm_node); - if ((!tm_node_obj) || (!tm_node_obj->tm_wred_node)) + if (!tm_node_obj) return -1;
odp_ticketlock_lock(&tm_profile_lock); - tm_threshold_config_set(tm_node_obj->tm_wred_node, thresholds_profile); + tm_threshold_config_set(&tm_node_obj->tm_wred_node, thresholds_profile); odp_ticketlock_unlock(&tm_profile_lock); return 0; } @@ -3806,7 +3794,7 @@ int odp_tm_node_wred_config(odp_tm_node_t tm_node, if (!tm_node_obj) return -1;
- wred_node = tm_node_obj->tm_wred_node; + wred_node = &tm_node_obj->tm_wred_node;
odp_ticketlock_lock(&tm_profile_lock); rc = 0; @@ -4161,7 +4149,7 @@ int odp_tm_node_connect(odp_tm_node_t src_tm_node, odp_tm_node_t dst_tm_node) if (!tm_system) return -1;
- src_tm_wred_node = src_tm_node_obj->tm_wred_node; + src_tm_wred_node = &src_tm_node_obj->tm_wred_node; if (dst_tm_node == ODP_TM_ROOT) { src_tm_node_obj->shaper_obj.next_tm_node = tm_system->root_node; src_tm_wred_node->next_tm_wred_node = NULL; @@ -4172,7 +4160,7 @@ int odp_tm_node_connect(odp_tm_node_t src_tm_node, odp_tm_node_t dst_tm_node) if ((!dst_tm_node_obj) || dst_tm_node_obj->is_root_node) return -1;
- dst_tm_wred_node = dst_tm_node_obj->tm_wred_node; + dst_tm_wred_node = &dst_tm_node_obj->tm_wred_node; if (src_tm_node_obj->tm_idx != dst_tm_node_obj->tm_idx) return -1;
@@ -4203,9 +4191,8 @@ int odp_tm_node_disconnect(odp_tm_node_t src_tm_node) dst_tm_node_obj->current_tm_node_fanin--; }
- src_tm_wred_node = src_tm_node_obj->tm_wred_node; - if (src_tm_wred_node != NULL) - src_tm_wred_node->next_tm_wred_node = NULL; + src_tm_wred_node = &src_tm_node_obj->tm_wred_node; + src_tm_wred_node->next_tm_wred_node = NULL;
src_tm_node_obj->shaper_obj.next_tm_node = NULL; return 0; @@ -4242,7 +4229,7 @@ int odp_tm_queue_connect(odp_tm_queue_t tm_queue, odp_tm_node_t dst_tm_node) if ((!dst_tm_node_obj) || dst_tm_node_obj->is_root_node) return -1;
- dst_tm_wred_node = dst_tm_node_obj->tm_wred_node; + dst_tm_wred_node = &dst_tm_node_obj->tm_wred_node; if (src_tm_queue_obj->tm_idx != dst_tm_node_obj->tm_idx) return -1;
@@ -4275,8 +4262,7 @@ int odp_tm_queue_disconnect(odp_tm_queue_t tm_queue) }
src_tm_wred_node = &src_tm_queue_obj->tm_wred_node; - if (src_tm_wred_node != NULL) - src_tm_wred_node->next_tm_wred_node = NULL; + src_tm_wred_node->next_tm_wred_node = NULL;
src_tm_queue_obj->shaper_obj.next_tm_node = NULL; return 0; @@ -4362,19 +4348,17 @@ int odp_tm_node_info(odp_tm_node_t tm_node, odp_tm_node_info_t *info) if (shaper_params != NULL) info->shaper_profile = shaper_params->shaper_profile;
- tm_wred_node = tm_node_obj->tm_wred_node; - if (tm_wred_node != NULL) { - threshold_params = tm_wred_node->threshold_params; - if (threshold_params != NULL) - info->threshold_profile = - threshold_params->thresholds_profile; + tm_wred_node = &tm_node_obj->tm_wred_node; + threshold_params = tm_wred_node->threshold_params; + if (threshold_params != NULL) + info->threshold_profile = + threshold_params->thresholds_profile;
- for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) { - wred_params = tm_wred_node->wred_params[color]; - if (wred_params != NULL) - info->wred_profile[color] = - wred_params->wred_profile; - } + for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) { + wred_params = tm_wred_node->wred_params[color]; + if (wred_params != NULL) + info->wred_profile[color] = + wred_params->wred_profile; }
return 0; @@ -4538,8 +4522,6 @@ int odp_tm_queue_query(odp_tm_queue_t tm_queue, return -1;
tm_wred_node = &tm_queue_obj->tm_wred_node; - if (!tm_wred_node) - return -1;
/* **TBD** Where do we get the queue_info from. */ queue_info.threshold_params = tm_wred_node->threshold_params;
commit 294856cc30d48d57e12485076bae49da36d346ed Author: Matias Elo matias.elo@nokia.com Date: Thu Nov 22 12:10:23 2018 +0200
linux-gen: traffic_mngr: move tm_schedulers_obj_t inside tm_node_obj_t
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h b/platform/linux-generic/include/odp_traffic_mngr_internal.h index 199bfbb6..3dc212c2 100644 --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h @@ -258,7 +258,7 @@ typedef struct { uint8_t num_priorities; uint8_t highest_priority; uint8_t locked; - tm_sched_state_t sched_states[0]; + tm_sched_state_t sched_states[ODP_TM_MAX_PRIORITIES]; } tm_schedulers_obj_t;
struct tm_queue_obj_s { @@ -294,7 +294,7 @@ struct tm_queue_obj_s { struct tm_node_obj_s { void *user_context; tm_wred_node_t *tm_wred_node; - tm_schedulers_obj_t *schedulers_obj; + tm_schedulers_obj_t schedulers_obj; tm_shaper_obj_t *fanin_list_head; tm_shaper_obj_t *fanin_list_tail; tm_shaper_obj_t shaper_obj; diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c index 630bc42e..363ff7e2 100644 --- a/platform/linux-generic/odp_traffic_mngr.c +++ b/platform/linux-generic/odp_traffic_mngr.c @@ -1332,7 +1332,7 @@ static odp_bool_t tm_propagate_pkt_desc(tm_system_t *tm_system, if (!shaper_change) return false;
- schedulers_obj = tm_node_obj->schedulers_obj; + schedulers_obj = &tm_node_obj->schedulers_obj; prev_sched_pkt = schedulers_obj->out_pkt_desc; sched_was_empty = prev_sched_pkt.queue_num == 0; sched_change = false; @@ -1426,7 +1426,7 @@ static odp_bool_t tm_demote_pkt_desc(tm_system_t *tm_system, if ((!blocked_scheduler) && (!timer_shaper)) return false;
- if (tm_node_obj->schedulers_obj == blocked_scheduler) + if (&tm_node_obj->schedulers_obj == blocked_scheduler) return false;
/* See if this first shaper_obj is delaying the demoted_pkt_desc */ @@ -1452,7 +1452,7 @@ static odp_bool_t tm_demote_pkt_desc(tm_system_t *tm_system, if ((!demoted_pkt_desc) && (!shaper_change)) return false;
- schedulers_obj = tm_node_obj->schedulers_obj; + schedulers_obj = &tm_node_obj->schedulers_obj; prev_sched_pkt = schedulers_obj->out_pkt_desc; sched_was_empty = prev_sched_pkt.queue_num == 0; sched_change = false; @@ -1569,7 +1569,7 @@ static odp_bool_t tm_consume_pkt_desc(tm_system_t *tm_system,
tm_node_obj = shaper_obj->next_tm_node; while (!tm_node_obj->is_root_node) { /* not at egress */ - schedulers_obj = tm_node_obj->schedulers_obj; + schedulers_obj = &tm_node_obj->schedulers_obj; prev_sched_pkt = schedulers_obj->out_pkt_desc; sent_priority = schedulers_obj->highest_priority;
@@ -3586,7 +3586,7 @@ odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm, odp_tm_node_t odp_tm_node; odp_tm_wred_t wred_profile; tm_system_t *tm_system; - uint32_t level, num_priorities, priority, schedulers_obj_len, color; + uint32_t level, num_priorities, priority, color;
/* Allocate a tm_node_obj_t record. */ tm_system = GET_TM_SYSTEM(odp_tm); @@ -3603,23 +3603,13 @@ odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm, level = params->level; requirements = &tm_system->requirements.per_level[level]; num_priorities = requirements->max_priority + 1; - schedulers_obj_len = sizeof(tm_schedulers_obj_t) - + (sizeof(tm_sched_state_t) * num_priorities); - schedulers_obj = malloc(schedulers_obj_len); - if (!schedulers_obj) { - free(tm_wred_node); - free(tm_node_obj); - return ODP_TM_INVALID; - }
- memset(schedulers_obj, 0, schedulers_obj_len); odp_tm_node = MAKE_ODP_TM_NODE(tm_node_obj); name_tbl_id = ODP_INVALID_NAME; if ((name) && (name[0] != '\0')) { name_tbl_id = _odp_int_name_tbl_add(name, ODP_TM_NODE_HANDLE, odp_tm_node); if (name_tbl_id == ODP_INVALID_NAME) { - free(schedulers_obj); free(tm_wred_node); free(tm_node_obj); return ODP_TM_INVALID; @@ -3628,7 +3618,6 @@ odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm,
memset(tm_node_obj, 0, sizeof(tm_node_obj_t)); memset(tm_wred_node, 0, sizeof(tm_wred_node_t)); - memset(schedulers_obj, 0, schedulers_obj_len); tm_node_obj->user_context = params->user_context; tm_node_obj->name_tbl_id = name_tbl_id; tm_node_obj->max_fanin = params->max_fanin; @@ -3636,9 +3625,9 @@ odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm, tm_node_obj->level = params->level; tm_node_obj->tm_idx = tm_system->tm_idx; tm_node_obj->tm_wred_node = tm_wred_node; - tm_node_obj->schedulers_obj = schedulers_obj; odp_ticketlock_init(&tm_wred_node->tm_wred_node_lock);
+ schedulers_obj = &tm_node_obj->schedulers_obj; schedulers_obj->num_priorities = num_priorities; for (priority = 0; priority < num_priorities; priority++) { sorted_list = _odp_sorted_list_create( @@ -3666,7 +3655,7 @@ odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm, tm_node_obj->magic_num = TM_NODE_MAGIC_NUM; tm_node_obj->shaper_obj.enclosing_entity = tm_node_obj; tm_node_obj->shaper_obj.in_tm_node_obj = 1; - tm_node_obj->schedulers_obj->enclosing_entity = tm_node_obj; + tm_node_obj->schedulers_obj.enclosing_entity = tm_node_obj;
odp_ticketlock_unlock(&tm_system->tm_system_lock); return odp_tm_node; @@ -3729,21 +3718,18 @@ int odp_tm_node_destroy(odp_tm_node_t tm_node) if (tm_node_obj->tm_wred_node != NULL) free(tm_node_obj->tm_wred_node);
- schedulers_obj = tm_node_obj->schedulers_obj; - if (schedulers_obj != NULL) { - num_priorities = schedulers_obj->num_priorities; - for (priority = 0; priority < num_priorities; priority++) { - sched_state = &schedulers_obj->sched_states[priority]; - sorted_list = sched_state->sorted_list; - sorted_pool = tm_system->_odp_int_sorted_pool; - rc = _odp_sorted_list_destroy(sorted_pool, - sorted_list); - if (rc != 0) - return rc; - } + schedulers_obj = &tm_node_obj->schedulers_obj; + num_priorities = schedulers_obj->num_priorities; + for (priority = 0; priority < num_priorities; priority++) { + sched_state = &schedulers_obj->sched_states[priority]; + sorted_list = sched_state->sorted_list; + sorted_pool = tm_system->_odp_int_sorted_pool; + rc = _odp_sorted_list_destroy(sorted_pool, + sorted_list); + if (rc != 0) + return rc; }
- free(schedulers_obj); free(tm_node_obj); odp_ticketlock_unlock(&tm_system->tm_system_lock); return 0;
commit 6b79ac4b1640e8050b076ba0ecb590cc297320b0 Author: Matias Elo matias.elo@nokia.com Date: Wed Nov 21 13:46:46 2018 +0200
linux-gen: traffic_mngr: allocate tm_system_group_t from shm
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h b/platform/linux-generic/include/odp_traffic_mngr_internal.h index 21d95273..199bfbb6 100644 --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h @@ -415,15 +415,13 @@ struct tm_system_s { * while the input work queue is shared - timers are not. */
struct tm_system_group_s { - tm_system_group_t *prev; - tm_system_group_t *next; - odp_barrier_t tm_group_barrier; tm_system_t *first_tm_system; uint32_t num_tm_systems; uint32_t first_enq; pthread_t thread; pthread_attr_t attr; + tm_status_t status; };
#ifdef __cplusplus diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c index cfa755ee..630bc42e 100644 --- a/platform/linux-generic/odp_traffic_mngr.c +++ b/platform/linux-generic/odp_traffic_mngr.c @@ -82,6 +82,10 @@ static tm_prop_t basic_prop_tbl[MAX_PRIORITIES][NUM_SHAPER_COLORS] = { };
typedef struct { + struct { + tm_system_group_t group[ODP_TM_MAX_NUM_SYSTEMS]; + odp_ticketlock_t lock; + } system_group; struct { tm_queue_obj_t obj[ODP_TM_MAX_TM_QUEUES]; odp_ticketlock_t lock; @@ -98,8 +102,6 @@ static dynamic_tbl_t odp_tm_profile_tbls[ODP_TM_NUM_PROFILES]; /* TM systems table. */ static tm_system_t odp_tm_systems[ODP_TM_MAX_NUM_SYSTEMS];
-static tm_system_group_t *tm_group_list; - static odp_ticketlock_t tm_create_lock; static odp_ticketlock_t tm_profile_lock; static odp_barrier_t tm_first_enq; @@ -2663,35 +2665,9 @@ static int tm_thread_create(tm_system_group_t *tm_group)
return rc; } - -static _odp_tm_group_t _odp_tm_group_create(const char *name ODP_UNUSED) -{ - tm_system_group_t *tm_group, *first_tm_group, *second_tm_group; - - tm_group = malloc(sizeof(tm_system_group_t)); - memset(tm_group, 0, sizeof(tm_system_group_t)); - odp_barrier_init(&tm_group->tm_group_barrier, 2); - - /* Add this group to the tm_group_list linked list. */ - if (tm_group_list == NULL) { - tm_group_list = tm_group; - tm_group->next = tm_group; - tm_group->prev = tm_group; - } else { - first_tm_group = tm_group_list; - second_tm_group = first_tm_group->next; - first_tm_group->next = tm_group; - second_tm_group->prev = tm_group; - tm_group->next = second_tm_group; - tm_group->prev = first_tm_group; - } - - return MAKE_ODP_TM_SYSTEM_GROUP(tm_group); -} - static void _odp_tm_group_destroy(_odp_tm_group_t odp_tm_group) { - tm_system_group_t *tm_group, *prev_tm_group, *next_tm_group; + tm_system_group_t *tm_group; int rc;
tm_group = GET_TM_GROUP(odp_tm_group); @@ -2704,23 +2680,9 @@ static void _odp_tm_group_destroy(_odp_tm_group_t odp_tm_group) if (g_tm_cpu_num > 0) g_tm_cpu_num--;
- /* Remove this group from the tm_group_list linked list. Special case - * when this is the last tm_group in the linked list. */ - prev_tm_group = tm_group->prev; - next_tm_group = tm_group->next; - if (prev_tm_group == next_tm_group) { - ODP_ASSERT(tm_group_list == tm_group); - tm_group_list = NULL; - } else { - prev_tm_group->next = next_tm_group; - next_tm_group->prev = prev_tm_group; - if (tm_group_list == tm_group) - tm_group_list = next_tm_group; - } - - tm_group->prev = NULL; - tm_group->next = NULL; - free(tm_group); + odp_ticketlock_lock(&tm_glb->system_group.lock); + tm_group->status = TM_STATUS_FREE; + odp_ticketlock_unlock(&tm_glb->system_group.lock); }
static int _odp_tm_group_add(_odp_tm_group_t odp_tm_group, odp_tm_t odp_tm) @@ -2792,12 +2754,21 @@ static int _odp_tm_group_remove(_odp_tm_group_t odp_tm_group, odp_tm_t odp_tm) return 0; }
+static void _odp_tm_init_tm_group(tm_system_group_t *tm_group) +{ + memset(tm_group, 0, sizeof(tm_system_group_t)); + + tm_group->status = TM_STATUS_RESERVED; + odp_barrier_init(&tm_group->tm_group_barrier, 2); +} + static int tm_group_attach(odp_tm_t odp_tm) { tm_system_group_t *tm_group, *min_tm_group; _odp_tm_group_t odp_tm_group; odp_cpumask_t all_cpus, worker_cpus; uint32_t total_cpus, avail_cpus; + uint32_t i;
/* If this platform has a small number of cpu's then allocate one * tm_group and assign all tm_system's to this tm_group. Otherwise in @@ -2811,34 +2782,37 @@ static int tm_group_attach(odp_tm_t odp_tm) avail_cpus = odp_cpumask_count(&worker_cpus);
if (total_cpus < 24) { - tm_group = tm_group_list; - odp_tm_group = MAKE_ODP_TM_SYSTEM_GROUP(tm_group); - if (tm_group == NULL) - odp_tm_group = _odp_tm_group_create(""); + tm_group = &tm_glb->system_group.group[0];
- _odp_tm_group_add(odp_tm_group, odp_tm); - return 0; - } + odp_ticketlock_lock(&tm_glb->system_group.lock); + if (tm_group->status == TM_STATUS_FREE) + _odp_tm_init_tm_group(tm_group); + odp_ticketlock_unlock(&tm_glb->system_group.lock);
- /* Manycore case. */ - if ((tm_group_list == NULL) || (avail_cpus > 1)) { - odp_tm_group = _odp_tm_group_create(""); + odp_tm_group = MAKE_ODP_TM_SYSTEM_GROUP(tm_group); _odp_tm_group_add(odp_tm_group, odp_tm); return 0; }
/* Pick a tm_group according to the smallest number of tm_systems. */ - tm_group = tm_group_list; min_tm_group = NULL; - while (tm_group != NULL) { + odp_ticketlock_lock(&tm_glb->system_group.lock); + for (i = 0; i < ODP_TM_MAX_NUM_SYSTEMS && i < avail_cpus; i++) { + tm_group = &tm_glb->system_group.group[i]; + + if (tm_group->status == TM_STATUS_FREE) { + _odp_tm_init_tm_group(tm_group); + min_tm_group = tm_group; + break; + } + if (min_tm_group == NULL) min_tm_group = tm_group; else if (tm_group->num_tm_systems < min_tm_group->num_tm_systems) min_tm_group = tm_group; - - tm_group = tm_group->next; } + odp_ticketlock_unlock(&tm_glb->system_group.lock);
if (min_tm_group == NULL) return -1; @@ -4703,6 +4677,7 @@ int odp_tm_init_global(void) tm_glb->shm = shm;
odp_ticketlock_init(&tm_glb->queue_obj.lock); + odp_ticketlock_init(&tm_glb->system_group.lock); odp_ticketlock_init(&tm_create_lock); odp_ticketlock_init(&tm_profile_lock); odp_barrier_init(&tm_first_enq, 2);
commit 3cb35813da911a94eef6e07ae71ce0f5f325ebd8 Author: Matias Elo matias.elo@nokia.com Date: Thu Nov 22 09:57:19 2018 +0200
linux-gen: traffic_mngr: allocate tm_queue_obj_t from shm
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h b/platform/linux-generic/include/odp_traffic_mngr_internal.h index 463759c1..21d95273 100644 --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h @@ -287,6 +287,7 @@ struct tm_queue_obj_s { uint8_t tm_idx; uint8_t delayed_cnt; uint8_t blocked_cnt; + tm_status_t status; odp_queue_t queue; };
diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c index 1c80a840..cfa755ee 100644 --- a/platform/linux-generic/odp_traffic_mngr.c +++ b/platform/linux-generic/odp_traffic_mngr.c @@ -81,6 +81,17 @@ static tm_prop_t basic_prop_tbl[MAX_PRIORITIES][NUM_SHAPER_COLORS] = { [ODP_TM_SHAPER_RED] = { 7, DELAY_PKT } } };
+typedef struct { + struct { + tm_queue_obj_t obj[ODP_TM_MAX_TM_QUEUES]; + odp_ticketlock_t lock; + } queue_obj; + + odp_shm_t shm; +} tm_global_t; + +static tm_global_t *tm_glb; + /* Profile tables. */ static dynamic_tbl_t odp_tm_profile_tbls[ODP_TM_NUM_PROFILES];
@@ -108,6 +119,11 @@ static odp_bool_t tm_demote_pkt_desc(tm_system_t *tm_system, tm_shaper_obj_t *timer_shaper, pkt_desc_t *demoted_pkt_desc);
+static inline tm_queue_obj_t *tm_qobj_from_index(uint32_t queue_id) +{ + return &tm_glb->queue_obj.obj[queue_id]; +} + static int queue_tm_reenq(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr) { odp_tm_queue_t tm_queue = MAKE_ODP_TM_QUEUE(odp_queue_context(queue)); @@ -3889,70 +3905,84 @@ odp_tm_queue_t odp_tm_queue_create(odp_tm_t odp_tm, odp_tm_queue_params_t *params) { _odp_int_pkt_queue_t _odp_int_pkt_queue; - tm_queue_obj_t *tm_queue_obj; - odp_tm_queue_t odp_tm_queue; + tm_queue_obj_t *queue_obj; + odp_tm_queue_t odp_tm_queue = ODP_TM_INVALID; odp_queue_t queue; odp_tm_wred_t wred_profile; tm_system_t *tm_system; uint32_t color; + uint32_t i;
/* Allocate a tm_queue_obj_t record. */ tm_system = GET_TM_SYSTEM(odp_tm); - tm_queue_obj = malloc(sizeof(tm_queue_obj_t)); - if (!tm_queue_obj) - return ODP_TM_INVALID;
- _odp_int_pkt_queue = _odp_pkt_queue_create( - tm_system->_odp_int_queue_pool); - if (_odp_int_pkt_queue == _ODP_INT_PKT_QUEUE_INVALID) { - free(tm_queue_obj); - return ODP_TM_INVALID; - } + odp_ticketlock_lock(&tm_glb->queue_obj.lock);
- odp_tm_queue = MAKE_ODP_TM_QUEUE(tm_queue_obj); - memset(tm_queue_obj, 0, sizeof(tm_queue_obj_t)); - tm_queue_obj->user_context = params->user_context; - tm_queue_obj->priority = params->priority; - tm_queue_obj->tm_idx = tm_system->tm_idx; - tm_queue_obj->queue_num = tm_system->next_queue_num++; - tm_queue_obj->_odp_int_pkt_queue = _odp_int_pkt_queue; - tm_queue_obj->pkt = ODP_PACKET_INVALID; - odp_ticketlock_init(&tm_queue_obj->tm_wred_node.tm_wred_node_lock); - - queue = odp_queue_create(NULL, NULL); - if (queue == ODP_QUEUE_INVALID) { - free(tm_queue_obj); - return ODP_TM_INVALID; - } + for (i = 0; i < ODP_TM_MAX_TM_QUEUES; i++) { + _odp_int_queue_pool_t int_queue_pool;
- tm_queue_obj->queue = queue; - odp_queue_context_set(queue, tm_queue_obj, sizeof(tm_queue_obj_t)); - queue_fn->set_enq_deq_fn(queue, - queue_tm_reenq, queue_tm_reenq_multi, - NULL, NULL); + queue_obj = tm_qobj_from_index(i);
- tm_system->queue_num_tbl[tm_queue_obj->queue_num - 1] = tm_queue_obj; - odp_ticketlock_lock(&tm_system->tm_system_lock); - if (params->shaper_profile != ODP_TM_INVALID) - tm_shaper_config_set(tm_system, params->shaper_profile, - &tm_queue_obj->shaper_obj); + if (queue_obj->status != TM_STATUS_FREE) + continue;
- if (params->threshold_profile != ODP_TM_INVALID) - tm_threshold_config_set(&tm_queue_obj->tm_wred_node, - params->threshold_profile); + int_queue_pool = tm_system->_odp_int_queue_pool; + _odp_int_pkt_queue = _odp_pkt_queue_create(int_queue_pool); + if (_odp_int_pkt_queue == _ODP_INT_PKT_QUEUE_INVALID) + continue;
- for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) { - wred_profile = params->wred_profile[color]; - if (wred_profile != ODP_TM_INVALID) - tm_wred_config_set(&tm_queue_obj->tm_wred_node, color, - wred_profile); + odp_tm_queue = MAKE_ODP_TM_QUEUE(queue_obj); + memset(queue_obj, 0, sizeof(tm_queue_obj_t)); + queue_obj->user_context = params->user_context; + queue_obj->priority = params->priority; + queue_obj->tm_idx = tm_system->tm_idx; + queue_obj->queue_num = tm_system->next_queue_num++; + queue_obj->_odp_int_pkt_queue = _odp_int_pkt_queue; + queue_obj->pkt = ODP_PACKET_INVALID; + odp_ticketlock_init(&queue_obj->tm_wred_node.tm_wred_node_lock); + + queue = odp_queue_create(NULL, NULL); + if (queue == ODP_QUEUE_INVALID) { + odp_tm_queue = ODP_TM_INVALID; + continue; + } + + queue_obj->queue = queue; + odp_queue_context_set(queue, queue_obj, sizeof(tm_queue_obj_t)); + queue_fn->set_enq_deq_fn(queue, queue_tm_reenq, + queue_tm_reenq_multi, NULL, NULL); + + tm_system->queue_num_tbl[queue_obj->queue_num - 1] = queue_obj; + + odp_ticketlock_lock(&tm_system->tm_system_lock); + + if (params->shaper_profile != ODP_TM_INVALID) + tm_shaper_config_set(tm_system, params->shaper_profile, + &queue_obj->shaper_obj); + + if (params->threshold_profile != ODP_TM_INVALID) + tm_threshold_config_set(&queue_obj->tm_wred_node, + params->threshold_profile); + + for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) { + wred_profile = params->wred_profile[color]; + if (wred_profile != ODP_TM_INVALID) + tm_wred_config_set(&queue_obj->tm_wred_node, + color, wred_profile); + } + + queue_obj->magic_num = TM_QUEUE_MAGIC_NUM; + queue_obj->shaper_obj.enclosing_entity = queue_obj; + queue_obj->shaper_obj.in_tm_node_obj = 0; + + odp_ticketlock_unlock(&tm_system->tm_system_lock); + + queue_obj->status = TM_STATUS_RESERVED; + break; }
- tm_queue_obj->magic_num = TM_QUEUE_MAGIC_NUM; - tm_queue_obj->shaper_obj.enclosing_entity = tm_queue_obj; - tm_queue_obj->shaper_obj.in_tm_node_obj = 0; + odp_ticketlock_unlock(&tm_glb->queue_obj.lock);
- odp_ticketlock_unlock(&tm_system->tm_system_lock); return odp_tm_queue; }
@@ -3984,9 +4014,10 @@ int odp_tm_queue_destroy(odp_tm_queue_t tm_queue)
odp_queue_destroy(tm_queue_obj->queue);
- /* First delete any associated tm_wred_node and then the tm_queue_obj - * itself */ - free(tm_queue_obj); + odp_ticketlock_lock(&tm_glb->queue_obj.lock); + tm_queue_obj->status = TM_STATUS_FREE; + odp_ticketlock_unlock(&tm_glb->queue_obj.lock); + odp_ticketlock_unlock(&tm_system->tm_system_lock); return 0; } @@ -4660,6 +4691,18 @@ void odp_tm_stats_print(odp_tm_t odp_tm)
int odp_tm_init_global(void) { + odp_shm_t shm; + + shm = odp_shm_reserve("_odp_traffic_mng", sizeof(tm_global_t), 0, 0); + if (shm == ODP_SHM_INVALID) + return -1; + + tm_glb = odp_shm_addr(shm); + memset(tm_glb, 0, sizeof(tm_global_t)); + + tm_glb->shm = shm; + + odp_ticketlock_init(&tm_glb->queue_obj.lock); odp_ticketlock_init(&tm_create_lock); odp_ticketlock_init(&tm_profile_lock); odp_barrier_init(&tm_first_enq, 2); @@ -4672,5 +4715,9 @@ int odp_tm_init_global(void)
int odp_tm_term_global(void) { + if (odp_shm_free(tm_glb->shm)) { + ODP_ERR("shm free failed\n"); + return -1; + } return 0; }
commit 3aad0e2ce0e5901fd49e50e26ac7d762c2b9a6aa Author: Matias Elo matias.elo@nokia.com Date: Wed Nov 21 14:14:32 2018 +0200
linux-gen: traffic_mngr: move tm_wred_node_t inside tm_queue_obj_t
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h b/platform/linux-generic/include/odp_traffic_mngr_internal.h index eb1e5775..463759c1 100644 --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h @@ -269,7 +269,7 @@ struct tm_queue_obj_s { uint32_t pkts_dequeued_cnt; uint32_t pkts_consumed_cnt; _odp_int_pkt_queue_t _odp_int_pkt_queue; - tm_wred_node_t *tm_wred_node; + tm_wred_node_t tm_wred_node; odp_packet_t pkt; odp_packet_t sent_pkt; uint32_t timer_seq; diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c index 81117ce9..1c80a840 100644 --- a/platform/linux-generic/odp_traffic_mngr.c +++ b/platform/linux-generic/odp_traffic_mngr.c @@ -1635,7 +1635,7 @@ static odp_bool_t tm_consume_sent_pkt(tm_system_t *tm_system,
pkt_len = sent_pkt_desc->pkt_len; tm_queue_obj->pkts_consumed_cnt++; - tm_queue_cnts_decrement(tm_system, tm_queue_obj->tm_wred_node, + tm_queue_cnts_decrement(tm_system, &tm_queue_obj->tm_wred_node, tm_queue_obj->priority, pkt_len);
/* Get the next pkt in the tm_queue, if there is one. */ @@ -1901,7 +1901,7 @@ static int tm_enqueue(tm_system_t *tm_system, pkt_color = odp_packet_color(pkt); drop_eligible = odp_packet_drop_eligible(pkt);
- initial_tm_wred_node = tm_queue_obj->tm_wred_node; + initial_tm_wred_node = &tm_queue_obj->tm_wred_node; if (drop_eligible) { drop = random_early_discard(tm_system, tm_queue_obj, initial_tm_wred_node, pkt_color); @@ -3890,7 +3890,6 @@ odp_tm_queue_t odp_tm_queue_create(odp_tm_t odp_tm, { _odp_int_pkt_queue_t _odp_int_pkt_queue; tm_queue_obj_t *tm_queue_obj; - tm_wred_node_t *tm_wred_node; odp_tm_queue_t odp_tm_queue; odp_queue_t queue; odp_tm_wred_t wred_profile; @@ -3903,35 +3902,25 @@ odp_tm_queue_t odp_tm_queue_create(odp_tm_t odp_tm, if (!tm_queue_obj) return ODP_TM_INVALID;
- tm_wred_node = malloc(sizeof(tm_wred_node_t)); - if (!tm_wred_node) { - free(tm_queue_obj); - return ODP_TM_INVALID; - } - _odp_int_pkt_queue = _odp_pkt_queue_create( tm_system->_odp_int_queue_pool); if (_odp_int_pkt_queue == _ODP_INT_PKT_QUEUE_INVALID) { - free(tm_wred_node); free(tm_queue_obj); return ODP_TM_INVALID; }
odp_tm_queue = MAKE_ODP_TM_QUEUE(tm_queue_obj); memset(tm_queue_obj, 0, sizeof(tm_queue_obj_t)); - memset(tm_wred_node, 0, sizeof(tm_wred_node_t)); tm_queue_obj->user_context = params->user_context; tm_queue_obj->priority = params->priority; tm_queue_obj->tm_idx = tm_system->tm_idx; tm_queue_obj->queue_num = tm_system->next_queue_num++; - tm_queue_obj->tm_wred_node = tm_wred_node; tm_queue_obj->_odp_int_pkt_queue = _odp_int_pkt_queue; tm_queue_obj->pkt = ODP_PACKET_INVALID; - odp_ticketlock_init(&tm_wred_node->tm_wred_node_lock); + odp_ticketlock_init(&tm_queue_obj->tm_wred_node.tm_wred_node_lock);
queue = odp_queue_create(NULL, NULL); if (queue == ODP_QUEUE_INVALID) { - free(tm_wred_node); free(tm_queue_obj); return ODP_TM_INVALID; } @@ -3949,13 +3938,14 @@ odp_tm_queue_t odp_tm_queue_create(odp_tm_t odp_tm, &tm_queue_obj->shaper_obj);
if (params->threshold_profile != ODP_TM_INVALID) - tm_threshold_config_set(tm_wred_node, + tm_threshold_config_set(&tm_queue_obj->tm_wred_node, params->threshold_profile);
for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) { wred_profile = params->wred_profile[color]; if (wred_profile != ODP_TM_INVALID) - tm_wred_config_set(tm_wred_node, color, wred_profile); + tm_wred_config_set(&tm_queue_obj->tm_wred_node, color, + wred_profile); }
tm_queue_obj->magic_num = TM_QUEUE_MAGIC_NUM; @@ -3996,7 +3986,6 @@ int odp_tm_queue_destroy(odp_tm_queue_t tm_queue)
/* First delete any associated tm_wred_node and then the tm_queue_obj * itself */ - free(tm_queue_obj->tm_wred_node); free(tm_queue_obj); odp_ticketlock_unlock(&tm_system->tm_system_lock); return 0; @@ -4080,7 +4069,7 @@ int odp_tm_queue_threshold_config(odp_tm_queue_t tm_queue, return -1;
odp_ticketlock_lock(&tm_profile_lock); - ret = tm_threshold_config_set(tm_queue_obj->tm_wred_node, + ret = tm_threshold_config_set(&tm_queue_obj->tm_wred_node, thresholds_profile); odp_ticketlock_unlock(&tm_profile_lock); return ret; @@ -4099,7 +4088,7 @@ int odp_tm_queue_wred_config(odp_tm_queue_t tm_queue, if (!tm_queue_obj) return -1;
- wred_node = tm_queue_obj->tm_wred_node; + wred_node = &tm_queue_obj->tm_wred_node;
odp_ticketlock_lock(&tm_profile_lock); rc = 0; @@ -4250,7 +4239,7 @@ int odp_tm_queue_connect(odp_tm_queue_t tm_queue, odp_tm_node_t dst_tm_node) if (!tm_system) return -1;
- src_tm_wred_node = src_tm_queue_obj->tm_wred_node; + src_tm_wred_node = &src_tm_queue_obj->tm_wred_node; if (dst_tm_node == ODP_TM_ROOT) { root_node = tm_system->root_node; src_tm_queue_obj->shaper_obj.next_tm_node = root_node; @@ -4294,7 +4283,7 @@ int odp_tm_queue_disconnect(odp_tm_queue_t tm_queue) dst_tm_node_obj->current_tm_queue_fanin--; }
- src_tm_wred_node = src_tm_queue_obj->tm_wred_node; + src_tm_wred_node = &src_tm_queue_obj->tm_wred_node; if (src_tm_wred_node != NULL) src_tm_wred_node->next_tm_wred_node = NULL;
@@ -4498,7 +4487,7 @@ int odp_tm_queue_info(odp_tm_queue_t tm_queue, odp_tm_queue_info_t *info) if (shaper_params != NULL) info->shaper_profile = shaper_params->shaper_profile;
- tm_wred_node = tm_queue_obj->tm_wred_node; + tm_wred_node = &tm_queue_obj->tm_wred_node; if (tm_wred_node != NULL) { threshold_params = tm_wred_node->threshold_params; if (threshold_params != NULL) @@ -4557,7 +4546,7 @@ int odp_tm_queue_query(odp_tm_queue_t tm_queue, if (!tm_queue_obj) return -1;
- tm_wred_node = tm_queue_obj->tm_wred_node; + tm_wred_node = &tm_queue_obj->tm_wred_node; if (!tm_wred_node) return -1;
commit 98eb7327113fbd33a8e5448406e8f47d8d0ad5fb Author: Matias Elo matias.elo@nokia.com Date: Fri Nov 16 15:53:01 2018 +0200
linux-gen: traffic_mngr: use static array for odp_tm_systems
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h b/platform/linux-generic/include/odp_traffic_mngr_internal.h index 770a6490..eb1e5775 100644 --- a/platform/linux-generic/include/odp_traffic_mngr_internal.h +++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h @@ -99,6 +99,11 @@ typedef enum { TM_WRED_PROFILE } profile_kind_t;
+typedef enum { + TM_STATUS_FREE = 0, + TM_STATUS_RESERVED +} tm_status_t; + typedef struct tm_queue_obj_s tm_queue_obj_t; typedef struct tm_node_obj_s tm_node_obj_t;
@@ -396,6 +401,7 @@ struct tm_system_s { uint8_t tm_idx; uint8_t first_enq; odp_bool_t is_idle; + tm_status_t status;
uint64_t shaper_green_cnt; uint64_t shaper_yellow_cnt; diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c index cf2ce83e..81117ce9 100644 --- a/platform/linux-generic/odp_traffic_mngr.c +++ b/platform/linux-generic/odp_traffic_mngr.c @@ -85,7 +85,7 @@ static tm_prop_t basic_prop_tbl[MAX_PRIORITIES][NUM_SHAPER_COLORS] = { static dynamic_tbl_t odp_tm_profile_tbls[ODP_TM_NUM_PROFILES];
/* TM systems table. */ -static tm_system_t *odp_tm_systems[ODP_TM_MAX_NUM_SYSTEMS]; +static tm_system_t odp_tm_systems[ODP_TM_MAX_NUM_SYSTEMS];
static tm_system_group_t *tm_group_list;
@@ -363,11 +363,11 @@ static tm_system_t *tm_system_alloc(void)
/* Find an open slot in the odp_tm_systems array. */ for (tm_idx = 0; tm_idx < ODP_TM_MAX_NUM_SYSTEMS; tm_idx++) { - if (!odp_tm_systems[tm_idx]) { - tm_system = malloc(sizeof(tm_system_t)); + if (odp_tm_systems[tm_idx].status == TM_STATUS_FREE) { + tm_system = &odp_tm_systems[tm_idx]; memset(tm_system, 0, sizeof(tm_system_t)); - odp_tm_systems[tm_idx] = tm_system; tm_system->tm_idx = tm_idx; + tm_system->status = TM_STATUS_RESERVED; return tm_system; } } @@ -383,8 +383,7 @@ static void tm_system_free(tm_system_t *tm_system) if (tm_system->queue_num_tbl) free(tm_system->queue_num_tbl);
- odp_tm_systems[tm_system->tm_idx] = NULL; - free(tm_system); + odp_tm_systems[tm_system->tm_idx].status = TM_STATUS_FREE; }
static void *tm_common_profile_create(const char *name, @@ -3702,7 +3701,7 @@ int odp_tm_node_destroy(odp_tm_node_t tm_node) if (!tm_node_obj) return -1;
- tm_system = odp_tm_systems[tm_node_obj->tm_idx]; + tm_system = &odp_tm_systems[tm_node_obj->tm_idx]; if (!tm_system) return -1;
@@ -3770,7 +3769,7 @@ int odp_tm_node_shaper_config(odp_tm_node_t tm_node, if (!tm_node_obj) return -1;
- tm_system = odp_tm_systems[tm_node_obj->tm_idx]; + tm_system = &odp_tm_systems[tm_node_obj->tm_idx]; if (!tm_system) return -1;
@@ -3978,7 +3977,7 @@ int odp_tm_queue_destroy(odp_tm_queue_t tm_queue) if (!tm_queue_obj) return -1;
- tm_system = odp_tm_systems[tm_queue_obj->tm_idx]; + tm_system = &odp_tm_systems[tm_queue_obj->tm_idx]; if (!tm_system) return -1;
@@ -4036,7 +4035,7 @@ int odp_tm_queue_shaper_config(odp_tm_queue_t tm_queue, if (!tm_queue_obj) return -1;
- tm_system = odp_tm_systems[tm_queue_obj->tm_idx]; + tm_system = &odp_tm_systems[tm_queue_obj->tm_idx]; if (!tm_system) return -1;
@@ -4178,7 +4177,7 @@ int odp_tm_node_connect(odp_tm_node_t src_tm_node, odp_tm_node_t dst_tm_node) if ((!src_tm_node_obj) || src_tm_node_obj->is_root_node) return -1;
- tm_system = odp_tm_systems[src_tm_node_obj->tm_idx]; + tm_system = &odp_tm_systems[src_tm_node_obj->tm_idx]; if (!tm_system) return -1;
@@ -4247,7 +4246,7 @@ int odp_tm_queue_connect(odp_tm_queue_t tm_queue, odp_tm_node_t dst_tm_node) if (!src_tm_queue_obj) return -1;
- tm_system = odp_tm_systems[src_tm_queue_obj->tm_idx]; + tm_system = &odp_tm_systems[src_tm_queue_obj->tm_idx]; if (!tm_system) return -1;
@@ -4312,7 +4311,7 @@ int odp_tm_enq(odp_tm_queue_t tm_queue, odp_packet_t pkt) if (!tm_queue_obj) return -1;
- tm_system = odp_tm_systems[tm_queue_obj->tm_idx]; + tm_system = &odp_tm_systems[tm_queue_obj->tm_idx]; if (!tm_system) return -1;
@@ -4333,7 +4332,7 @@ int odp_tm_enq_with_cnt(odp_tm_queue_t tm_queue, odp_packet_t pkt) if (!tm_queue_obj) return -1;
- tm_system = odp_tm_systems[tm_queue_obj->tm_idx]; + tm_system = &odp_tm_systems[tm_queue_obj->tm_idx]; if (!tm_system) return -1;
commit d64232f45abae8d4f1222313ce44532cc26e2336 Author: Matias Elo matias.elo@nokia.com Date: Wed Nov 21 15:25:38 2018 +0200
abi: traffic_mngr: reduce max defines
Decrease TM maximum defines to reduce memory usage. With the selected values TM SHM usage stays under 10MB.
Signed-off-by: Matias Elo matias.elo@nokia.com Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/include/odp/api/abi-default/traffic_mngr.h b/include/odp/api/abi-default/traffic_mngr.h index b7b04b83..9c01ef98 100644 --- a/include/odp/api/abi-default/traffic_mngr.h +++ b/include/odp/api/abi-default/traffic_mngr.h @@ -28,7 +28,7 @@ extern "C" { * systems that may be created. On some platforms this might be much more * limited to as little as one hardware TM system. */ -#define ODP_TM_MAX_NUM_SYSTEMS 64 +#define ODP_TM_MAX_NUM_SYSTEMS 8
/** The ODP_TM_MAX_PRIORITIES constant specifies the largest range of * priorities that any TM system can support. All strict priority values MUST @@ -57,7 +57,7 @@ extern "C" { /** The ODP_TM_MAX_TM_QUEUES constant is the largest number of tm_queues * that can be handled by any one TM system. */ -#define ODP_TM_MAX_TM_QUEUES (16 * 1024 * 1024) +#define ODP_TM_MAX_TM_QUEUES (4 * 1024)
/** The ODP_TM_MAX_NUM_OUTPUTS constant is the largest number of outputs that * can be configured for any one TM system. @@ -67,13 +67,13 @@ extern "C" { /** The ODP_TM_MAX_NUM_TM_NODES constant is the largest number of tm_nodes that * can be in existence for any one TM system. */ -#define ODP_TM_MAX_NUM_TM_NODES (1024 * 1024) +#define ODP_TM_MAX_NUM_TM_NODES (4 * 1024)
/** The ODP_TM_MAX_TM_NODE_FANIN constant is the largest number of fan-in * "inputs" that can be simultaneously connected to a single tm_node. * *TBD* Does this need to be as large as ODP_TM_MAX_TM_QUEUES? *TBD* */ -#define ODP_TM_MAX_TM_NODE_FANIN (1024 * 1024) +#define ODP_TM_MAX_TM_NODE_FANIN (4 * 1024)
/** The ODP_TM_MIN_SHAPER_BW constant is the smallest amount of bandwidth that * can a shaper's peak or commit rate can be set to. It is in units of
-----------------------------------------------------------------------
Summary of changes: include/odp/api/abi-default/traffic_mngr.h | 8 +- .../include/odp_traffic_mngr_internal.h | 37 +- platform/linux-generic/odp_traffic_mngr.c | 958 +++++++++++---------- 3 files changed, 543 insertions(+), 460 deletions(-)
hooks/post-receive