This is an automated email from the git hooks/post-receive script. It was generated because a ref change was pushed to the repository containing the project "".
The branch, master has been updated via 0f6ebf74b3f77bf7e02558778e32d317cb983393 (commit) via 31f9a83aec500e45576202cc42219bf0673a9790 (commit) via 08620451ad0d82b092ac7516673644ab20ebc9bc (commit) via 83478c2c04ee939b69e09867f97be88ae5c9e684 (commit) via c5025dfd7c43740aae55d8e69104a251ef1c32ab (commit) via bbe8e532130900bbd6896e7ec5e6828f7e19fa4b (commit) via cd760151e86ac1276906edc83856650b7694162e (commit) via 99a96552ddc57fa93af1eb041b71c55e0927fa75 (commit) via 664cbd820806256b9f9d44dd879b5fa85c70d40c (commit) from 01a3bd80c5e56bdfa1868cfb1f030ca3a834d742 (commit)
Those revisions listed above that are new to this repository have not appeared on any other notification email; so we list those revisions in full, below.
- Log ----------------------------------------------------------------- commit 0f6ebf74b3f77bf7e02558778e32d317cb983393 Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Oct 12 10:15:18 2018 +0300
validation: pool: add max num pool tests
Test that pools can be created with maximum number of events defined in pool capability. Test that all events can be allocated and freed. Event size is small.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/validation/api/pool/pool.c b/test/validation/api/pool/pool.c index 2795e857..71a1a284 100644 --- a/test/validation/api/pool/pool.c +++ b/test/validation/api/pool/pool.c @@ -11,6 +11,7 @@
#define PKT_LEN 400 #define PKT_NUM 500 +#define MAX_NUM_DEFAULT (10 * 1024 * 1024)
static const int default_buffer_size = 1500; static const int default_buffer_num = 1000; @@ -32,7 +33,7 @@ static void pool_test_create_destroy_buffer(void)
odp_pool_param_init(¶m);
- param.type = ODP_POOL_BUFFER, + param.type = ODP_POOL_BUFFER; param.buf.size = default_buffer_size; param.buf.align = ODP_CACHE_LINE_SIZE; param.buf.num = default_buffer_num; @@ -106,7 +107,7 @@ static void pool_test_alloc_packet(void)
odp_pool_param_init(¶m);
- param.type = ODP_POOL_PACKET, + param.type = ODP_POOL_PACKET; param.pkt.num = PKT_NUM; param.pkt.len = PKT_LEN;
@@ -145,7 +146,7 @@ static void pool_test_alloc_packet_subparam(void)
odp_pool_param_init(¶m);
- param.type = ODP_POOL_PACKET, + param.type = ODP_POOL_PACKET; param.pkt.num = PKT_NUM; param.pkt.len = PKT_LEN; param.pkt.num_subparam = num_sub; @@ -270,6 +271,159 @@ static void pool_test_info_data_range(void) CU_ASSERT(odp_pool_destroy(pool) == 0); }
+static void pool_test_buf_max_num(void) +{ + odp_pool_t pool; + odp_pool_param_t param; + odp_pool_capability_t capa; + uint32_t max_num, num, i; + odp_shm_t shm; + odp_buffer_t *buf; + + CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0); + + max_num = MAX_NUM_DEFAULT; + if (capa.buf.max_num) + max_num = capa.buf.max_num; + + odp_pool_param_init(¶m); + + param.type = ODP_POOL_BUFFER; + param.buf.num = max_num; + param.buf.size = 10; + + pool = odp_pool_create("test_buf_max_num", ¶m); + + CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); + + shm = odp_shm_reserve("test_max_num_shm", + max_num * sizeof(odp_buffer_t), + sizeof(odp_buffer_t), 0); + + CU_ASSERT_FATAL(shm != ODP_SHM_INVALID); + + buf = odp_shm_addr(shm); + + num = 0; + for (i = 0; i < max_num; i++) { + buf[num] = odp_buffer_alloc(pool); + + if (buf[num] != ODP_BUFFER_INVALID) + num++; + } + + CU_ASSERT(num == max_num); + + for (i = 0; i < num; i++) + odp_buffer_free(buf[i]); + + CU_ASSERT(odp_shm_free(shm) == 0); + CU_ASSERT(odp_pool_destroy(pool) == 0); +} + +static void pool_test_pkt_max_num(void) +{ + odp_pool_t pool; + odp_pool_param_t param; + odp_pool_capability_t capa; + uint32_t max_num, num, i; + odp_shm_t shm; + odp_packet_t *pkt; + uint32_t len = 10; + + CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0); + + max_num = MAX_NUM_DEFAULT; + if (capa.pkt.max_num) + max_num = capa.pkt.max_num; + + odp_pool_param_init(¶m); + + param.type = ODP_POOL_PACKET; + param.pkt.num = max_num; + param.pkt.max_num = max_num; + param.pkt.len = len; + param.pkt.max_len = len; + param.pkt.headroom = 0; + + pool = odp_pool_create("test_packet_max_num", ¶m); + + CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); + + shm = odp_shm_reserve("test_max_num_shm", + max_num * sizeof(odp_packet_t), + sizeof(odp_packet_t), 0); + + CU_ASSERT_FATAL(shm != ODP_SHM_INVALID); + + pkt = odp_shm_addr(shm); + + num = 0; + for (i = 0; i < max_num; i++) { + pkt[num] = odp_packet_alloc(pool, len); + + if (pkt[num] != ODP_PACKET_INVALID) + num++; + } + + CU_ASSERT(num == max_num); + + for (i = 0; i < num; i++) + odp_packet_free(pkt[i]); + + CU_ASSERT(odp_shm_free(shm) == 0); + CU_ASSERT(odp_pool_destroy(pool) == 0); +} + +static void pool_test_tmo_max_num(void) +{ + odp_pool_t pool; + odp_pool_param_t param; + odp_pool_capability_t capa; + uint32_t max_num, num, i; + odp_shm_t shm; + odp_timeout_t *tmo; + + CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0); + + max_num = MAX_NUM_DEFAULT; + if (capa.tmo.max_num) + max_num = capa.tmo.max_num; + + odp_pool_param_init(¶m); + + param.type = ODP_POOL_TIMEOUT; + param.tmo.num = max_num; + + pool = odp_pool_create("test_tmo_max_num", ¶m); + + CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); + + shm = odp_shm_reserve("test_max_num_shm", + max_num * sizeof(odp_packet_t), + sizeof(odp_packet_t), 0); + + CU_ASSERT_FATAL(shm != ODP_SHM_INVALID); + + tmo = odp_shm_addr(shm); + + num = 0; + for (i = 0; i < max_num; i++) { + tmo[num] = odp_timeout_alloc(pool); + + if (tmo[num] != ODP_TIMEOUT_INVALID) + num++; + } + + CU_ASSERT(num == max_num); + + for (i = 0; i < num; i++) + odp_timeout_free(tmo[i]); + + CU_ASSERT(odp_shm_free(shm) == 0); + CU_ASSERT(odp_pool_destroy(pool) == 0); +} + odp_testinfo_t pool_suite[] = { ODP_TEST_INFO(pool_test_create_destroy_buffer), ODP_TEST_INFO(pool_test_create_destroy_packet), @@ -279,6 +433,9 @@ odp_testinfo_t pool_suite[] = { ODP_TEST_INFO(pool_test_info_packet), ODP_TEST_INFO(pool_test_lookup_info_print), ODP_TEST_INFO(pool_test_info_data_range), + ODP_TEST_INFO(pool_test_buf_max_num), + ODP_TEST_INFO(pool_test_pkt_max_num), + ODP_TEST_INFO(pool_test_tmo_max_num), ODP_TEST_INFO_NULL, };
commit 31f9a83aec500e45576202cc42219bf0673a9790 Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Oct 12 14:43:38 2018 +0300
linux-gen: pool: reduce max pool size
Reduce maximum pool size, so that maximum size (packet) pool requires less than 1GB of SHM memory. The limit of 1GB (for default configuration) comes from maximum SHM reserve size in process mode (single VA) and limited memory size CI virtual machines.
This define can be increased when pool size options are added into the configuration file.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h index d87c457b..a94012ac 100644 --- a/platform/linux-generic/include/odp_config_internal.h +++ b/platform/linux-generic/include/odp_config_internal.h @@ -157,7 +157,7 @@ extern "C" { * Maximum number of events in a pool. Power of two minus one results optimal * memory usage for the ring. */ -#define CONFIG_POOL_MAX_NUM ((1 * 1024 * 1024) - 1) +#define CONFIG_POOL_MAX_NUM ((256 * 1024) - 1)
/* * Maximum number of events in a thread local pool cache
commit 08620451ad0d82b092ac7516673644ab20ebc9bc Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Oct 11 15:03:01 2018 +0300
linux-gen: ring: add reader tail check
Reader tail index is needed to detect if a reader is so much behind that a writer is going to overwrite the data it is reading.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_ring_internal.h b/platform/linux-generic/include/odp_ring_internal.h index 9a637afb..ad2f37ef 100644 --- a/platform/linux-generic/include/odp_ring_internal.h +++ b/platform/linux-generic/include/odp_ring_internal.h @@ -23,8 +23,9 @@ extern "C" { * Ring stores head and tail counters. Ring indexes are formed from these * counters with a mask (mask = ring_size - 1), which requires that ring size * must be a power of two. Also ring size must be larger than the maximum - * number of data items that will be stored on it (there's no check against - * overwriting). */ + * number of data items that will be stored on it as write operations are + * assumed to succeed eventually (after readers complete their current + * operations). */ typedef struct ODP_ALIGNED_CACHE { /* Writer head and tail */ odp_atomic_u32_t w_head; @@ -33,6 +34,7 @@ typedef struct ODP_ALIGNED_CACHE {
/* Reader head and tail */ odp_atomic_u32_t r_head; + odp_atomic_u32_t r_tail;
uint32_t data[0]; } ring_t; @@ -53,6 +55,7 @@ static inline void ring_init(ring_t *ring) odp_atomic_init_u32(&ring->w_head, 0); odp_atomic_init_u32(&ring->w_tail, 0); odp_atomic_init_u32(&ring->r_head, 0); + odp_atomic_init_u32(&ring->r_tail, 0); }
/* Dequeue data from the ring head */ @@ -75,12 +78,19 @@ static inline uint32_t ring_deq(ring_t *ring, uint32_t mask, uint32_t *data) new_head = head + 1;
} while (odp_unlikely(cas_mo_u32(&ring->r_head, &head, new_head, - __ATOMIC_ACQ_REL, + __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) == 0));
- /* Read data. CAS acquire-release ensures that data read - * does not move above from here. */ + /* Read data. */ *data = ring->data[new_head & mask]; + + /* Wait until other readers have updated the tail */ + while (odp_unlikely(odp_atomic_load_u32(&ring->r_tail) != head)) + odp_cpu_pause(); + + /* Update the tail. Writers acquire it. */ + odp_atomic_store_rel_u32(&ring->r_tail, new_head); + return 1; }
@@ -110,14 +120,20 @@ static inline uint32_t ring_deq_multi(ring_t *ring, uint32_t mask, new_head = head + num;
} while (odp_unlikely(cas_mo_u32(&ring->r_head, &head, new_head, - __ATOMIC_ACQ_REL, + __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE) == 0));
- /* Read data. CAS acquire-release ensures that data read - * does not move above from here. */ + /* Read data. */ for (i = 0; i < num; i++) data[i] = ring->data[(head + 1 + i) & mask];
+ /* Wait until other readers have updated the tail */ + while (odp_unlikely(odp_atomic_load_u32(&ring->r_tail) != head)) + odp_cpu_pause(); + + /* Update the tail. Writers acquire it. */ + odp_atomic_store_rel_u32(&ring->r_tail, new_head); + return num; }
@@ -125,16 +141,24 @@ static inline uint32_t ring_deq_multi(ring_t *ring, uint32_t mask, static inline void ring_enq(ring_t *ring, uint32_t mask, uint32_t data) { uint32_t old_head, new_head; + uint32_t size = mask + 1;
/* Reserve a slot in the ring for writing */ old_head = odp_atomic_fetch_inc_u32(&ring->w_head); new_head = old_head + 1;
+ /* Wait for the last reader to finish. This prevents overwrite when + * a reader has been left behind (e.g. due to an interrupt) and is + * still reading the same slot. */ + while (odp_unlikely(new_head - odp_atomic_load_acq_u32(&ring->r_tail) + >= size)) + odp_cpu_pause(); + /* Write data */ ring->data[new_head & mask] = data;
/* Wait until other writers have updated the tail */ - while (odp_unlikely(odp_atomic_load_acq_u32(&ring->w_tail) != old_head)) + while (odp_unlikely(odp_atomic_load_u32(&ring->w_tail) != old_head)) odp_cpu_pause();
/* Release the new writer tail, readers acquire it. */ @@ -146,17 +170,25 @@ static inline void ring_enq_multi(ring_t *ring, uint32_t mask, uint32_t data[], uint32_t num) { uint32_t old_head, new_head, i; + uint32_t size = mask + 1;
/* Reserve a slot in the ring for writing */ old_head = odp_atomic_fetch_add_u32(&ring->w_head, num); new_head = old_head + 1;
+ /* Wait for the last reader to finish. This prevents overwrite when + * a reader has been left behind (e.g. due to an interrupt) and is + * still reading these slots. */ + while (odp_unlikely(new_head - odp_atomic_load_acq_u32(&ring->r_tail) + >= size)) + odp_cpu_pause(); + /* Write data */ for (i = 0; i < num; i++) ring->data[(new_head + i) & mask] = data[i];
/* Wait until other writers have updated the tail */ - while (odp_unlikely(odp_atomic_load_acq_u32(&ring->w_tail) != old_head)) + while (odp_unlikely(odp_atomic_load_u32(&ring->w_tail) != old_head)) odp_cpu_pause();
/* Release the new writer tail, readers acquire it. */
commit 83478c2c04ee939b69e09867f97be88ae5c9e684 Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Oct 11 15:01:46 2018 +0300
linux-gen: pool: ring size must be larger than num items
Ensure that ring size is larger than number of events to be stored in there.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h index bfe203bf..d87c457b 100644 --- a/platform/linux-generic/include/odp_config_internal.h +++ b/platform/linux-generic/include/odp_config_internal.h @@ -154,9 +154,10 @@ extern "C" { #define CONFIG_BURST_SIZE 32
/* - * Maximum number of events in a pool + * Maximum number of events in a pool. Power of two minus one results optimal + * memory usage for the ring. */ -#define CONFIG_POOL_MAX_NUM (1 * 1024 * 1024) +#define CONFIG_POOL_MAX_NUM ((1 * 1024 * 1024) - 1)
/* * Maximum number of events in a thread local pool cache diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h index df02fdaf..2696e8ee 100644 --- a/platform/linux-generic/include/odp_pool_internal.h +++ b/platform/linux-generic/include/odp_pool_internal.h @@ -38,7 +38,7 @@ typedef struct ODP_ALIGNED_CACHE { ring_t hdr;
/* Ring data: buffer handles */ - uint32_t buf[CONFIG_POOL_MAX_NUM]; + uint32_t buf[CONFIG_POOL_MAX_NUM + 1];
} pool_ring_t;
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index 5bb5bc63..7a4a9eb9 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -471,10 +471,11 @@ static odp_pool_t pool_create(const char *name, odp_pool_param_t *params, FIRST_HP_SIZE - 1) / FIRST_HP_SIZE); }
- if (num <= RING_SIZE_MIN) + /* Ring size must be larger than the number of items stored */ + if (num + 1 <= RING_SIZE_MIN) ring_size = RING_SIZE_MIN; else - ring_size = ROUNDUP_POWER2_U32(num); + ring_size = ROUNDUP_POWER2_U32(num + 1);
pool->ring_mask = ring_size - 1; pool->num = num;
commit c5025dfd7c43740aae55d8e69104a251ef1c32ab Author: Petri Savolainen petri.savolainen@linaro.org Date: Thu Oct 11 09:33:49 2018 +0300
linux-gen: sched: remove unnecessary queue null index
Ring does not use any more special null index. So, queue index initialization to null index is not needed any more.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c index f9f45670..49d9c25c 100644 --- a/platform/linux-generic/odp_schedule_basic.c +++ b/platform/linux-generic/odp_schedule_basic.c @@ -69,17 +69,11 @@ ODP_STATIC_ASSERT((ODP_SCHED_PRIO_NORMAL > 0) && /* Maximum pktin index. Needs to fit into 8 bits. */ #define MAX_PKTIN_INDEX 255
-/* Not a valid index */ -#define NULL_INDEX ((uint32_t)-1) - /* Maximum priority queue ring size. A ring must be large enough to store all * queues in the worst case (all queues are scheduled, have the same priority * and no spreading). */ #define MAX_RING_SIZE ODP_CONFIG_QUEUES
-/* Priority queue empty, not a valid queue index. */ -#define PRIO_QUEUE_EMPTY NULL_INDEX - /* For best performance, the number of queues should be a power of two. */ ODP_STATIC_ASSERT(CHECK_IS_POWER2(ODP_CONFIG_QUEUES), "Number_of_queues_is_not_power_of_two"); @@ -329,8 +323,6 @@ static void sched_local_init(void) sched_local.thr = odp_thread_id(); sched_local.sync_ctx = NO_SYNC_CONTEXT; sched_local.stash.queue = ODP_QUEUE_INVALID; - sched_local.stash.qi = PRIO_QUEUE_EMPTY; - sched_local.ordered.src_queue = NULL_INDEX;
spread = prio_spread_index(sched_local.thr);
@@ -380,15 +372,9 @@ static int schedule_init_global(void) for (i = 0; i < NUM_PRIO; i++) { for (j = 0; j < MAX_SPREAD; j++) { prio_queue_t *prio_q; - int k;
prio_q = &sched->prio_q[grp][i][j]; ring_init(&prio_q->ring); - - for (k = 0; k < MAX_RING_SIZE; k++) { - prio_q->queue_index[k] = - PRIO_QUEUE_EMPTY; - } } } } @@ -1177,8 +1163,7 @@ static void schedule_order_lock(uint32_t lock_index)
queue_index = sched_local.ordered.src_queue;
- ODP_ASSERT(queue_index != NULL_INDEX && - lock_index <= sched->queue[queue_index].order_lock_count && + ODP_ASSERT(lock_index <= sched->queue[queue_index].order_lock_count && !sched_local.ordered.lock_called.u8[lock_index]);
ord_lock = &sched->order[queue_index].lock[lock_index]; @@ -1207,8 +1192,7 @@ static void schedule_order_unlock(uint32_t lock_index)
queue_index = sched_local.ordered.src_queue;
- ODP_ASSERT(queue_index != NULL_INDEX && - lock_index <= sched->queue[queue_index].order_lock_count); + ODP_ASSERT(lock_index <= sched->queue[queue_index].order_lock_count);
ord_lock = &sched->order[queue_index].lock[lock_index];
commit bbe8e532130900bbd6896e7ec5e6828f7e19fa4b Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Oct 12 10:19:23 2018 +0300
test: sched_latency: honor pool capability limits
Check maximum pool size from pool capability.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/performance/odp_sched_latency.c b/test/performance/odp_sched_latency.c index 64a21983..873738ef 100644 --- a/test/performance/odp_sched_latency.c +++ b/test/performance/odp_sched_latency.c @@ -641,11 +641,13 @@ int main(int argc, char *argv[]) odph_odpthread_params_t thr_params; odp_cpumask_t cpumask; odp_pool_t pool; + odp_pool_capability_t pool_capa; odp_pool_param_t params; odp_shm_t shm; test_globals_t *globals; test_args_t args; char cpumaskstr[ODP_CPUMASK_STR_SIZE]; + uint32_t pool_size; int i, j; int ret = 0; int num_workers = 0; @@ -706,10 +708,19 @@ int main(int argc, char *argv[]) /* * Create event pool */ + if (odp_pool_capability(&pool_capa)) { + LOG_ERR("pool capa failed\n"); + return -1; + } + + pool_size = EVENT_POOL_SIZE; + if (pool_capa.buf.max_num && pool_capa.buf.max_num < EVENT_POOL_SIZE) + pool_size = pool_capa.buf.max_num; + odp_pool_param_init(¶ms); params.buf.size = sizeof(test_event_t); params.buf.align = 0; - params.buf.num = EVENT_POOL_SIZE; + params.buf.num = pool_size; params.type = ODP_POOL_BUFFER;
pool = odp_pool_create("event_pool", ¶ms);
commit cd760151e86ac1276906edc83856650b7694162e Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Oct 12 16:02:50 2018 +0300
test: scheduling: honor pool capability
Limit pool size to maximum pool capability.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/performance/odp_scheduling.c b/test/performance/odp_scheduling.c index 3b75f635..5eeb6926 100644 --- a/test/performance/odp_scheduling.c +++ b/test/performance/odp_scheduling.c @@ -30,7 +30,7 @@ /* GNU lib C */ #include <getopt.h>
-#define NUM_MSG (512 * 1024) /**< Number of msg in pool */ +#define MAX_BUF (512 * 1024) /**< Maximum pool size */ #define MAX_ALLOCS 32 /**< Alloc burst size */ #define QUEUES_PER_PRIO 64 /**< Queue per priority */ #define NUM_PRIOS 2 /**< Number of tested priorities */ @@ -813,7 +813,8 @@ int main(int argc, char *argv[]) odp_instance_t instance; odph_odpthread_params_t thr_params; odp_queue_capability_t capa; - uint32_t num_queues; + odp_pool_capability_t pool_capa; + uint32_t num_queues, num_buf;
printf("\nODP example starts\n\n");
@@ -869,11 +870,19 @@ int main(int argc, char *argv[]) /* * Create message pool */ + if (odp_pool_capability(&pool_capa)) { + LOG_ERR("Pool capabilities failed.\n"); + return -1; + } + + num_buf = MAX_BUF; + if (pool_capa.buf.max_num && pool_capa.buf.max_num < MAX_BUF) + num_buf = pool_capa.buf.max_num;
odp_pool_param_init(¶ms); params.buf.size = sizeof(test_message_t); params.buf.align = 0; - params.buf.num = NUM_MSG; + params.buf.num = num_buf; params.type = ODP_POOL_BUFFER;
pool = odp_pool_create("msg_pool", ¶ms);
commit 99a96552ddc57fa93af1eb041b71c55e0927fa75 Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Oct 12 15:56:14 2018 +0300
test: scheduling: fix script to exit with failure status
It seems that some shells did not notice failure status. Simplified the script to exit on the first failure.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/test/performance/odp_scheduling_run.sh b/test/performance/odp_scheduling_run.sh index ae3d1c8f..57792276 100755 --- a/test/performance/odp_scheduling_run.sh +++ b/test/performance/odp_scheduling_run.sh @@ -17,7 +17,12 @@ run() echo odp_scheduling_run starts requesting $1 worker threads echo ===============================================
- $TEST_DIR/odp_scheduling${EXEEXT} -c $1 || ret=1 + $TEST_DIR/odp_scheduling${EXEEXT} -c $1 + + if [ $? -ne 0 ]; then + echo odp_scheduling FAILED + exit $? + fi }
run 1 @@ -26,4 +31,4 @@ run 8 run 11 run $ALL
-exit $ret +exit 0
commit 664cbd820806256b9f9d44dd879b5fa85c70d40c Author: Petri Savolainen petri.savolainen@linaro.org Date: Fri Oct 12 11:06:19 2018 +0300
helper: iplookup: check capabilities
Check pool and queue capabilities instead of assuming e.g. that 1M events can be stored into a queue. Reduced table defines (pool / queue size requirement) as an easy fix to pass tests.
Signed-off-by: Petri Savolainen petri.savolainen@linaro.org Reviewed-by: Bill Fischofer bill.fischofer@linaro.org Signed-off-by: Maxim Uvarov maxim.uvarov@linaro.org
diff --git a/helper/iplookuptable.c b/helper/iplookuptable.c index 7102eb54..84b4e2cb 100644 --- a/helper/iplookuptable.c +++ b/helper/iplookuptable.c @@ -83,9 +83,9 @@ typedef struct trie_node { } trie_node_t;
/** Number of L2\L3 entries(subtrees) per cache cube. */ -#define CACHE_NUM_SUBTREE (1 << 13) +#define CACHE_NUM_SUBTREE (4 * 1024) /** Number of trie nodes per cache cube. */ -#define CACHE_NUM_TRIE (1 << 20) +#define CACHE_NUM_TRIE (4 * 1024)
/** @typedef cache_type_t * Cache node type @@ -187,12 +187,34 @@ cache_alloc_new_pool( { odp_pool_t pool; odp_pool_param_t param; + odp_pool_capability_t pool_capa; odp_queue_t queue = tbl->free_slots[type];
odp_buffer_t buffer; char pool_name[ODPH_TABLE_NAME_LEN + 8]; uint32_t size = 0, num = 0;
+ if (odp_pool_capability(&pool_capa)) { + ODPH_ERR("pool capa failed\n"); + return -1; + } + + if (pool_capa.buf.max_num) { + if (pool_capa.buf.max_num < CACHE_NUM_TRIE || + pool_capa.buf.max_num < CACHE_NUM_SUBTREE) { + ODPH_ERR("pool size too small\n"); + return -1; + } + } + + if (pool_capa.buf.max_size) { + if (pool_capa.buf.max_size < ENTRY_SIZE * ENTRY_NUM_SUBTREE || + pool_capa.buf.max_size < sizeof(trie_node_t)) { + ODPH_ERR("buffer size too small\n"); + return -1; + } + } + /* Create new pool (new free buffers). */ odp_pool_param_init(¶m); param.type = ODP_POOL_BUFFER; @@ -223,7 +245,11 @@ cache_alloc_new_pool( while ((buffer = odp_buffer_alloc(pool)) != ODP_BUFFER_INVALID) { cache_init_buffer(buffer, type, size); - odp_queue_enq(queue, odp_buffer_to_event(buffer)); + if (odp_queue_enq(queue, odp_buffer_to_event(buffer))) { + ODPH_DBG("queue enqueue failed\n"); + odp_buffer_free(buffer); + break; + } }
tbl->cache_count[type]++; @@ -449,10 +475,28 @@ odph_table_t odph_iplookup_table_create(const char *name, odp_shm_t shm_tbl; odp_queue_t queue; odp_queue_param_t qparam; + odp_queue_capability_t queue_capa; unsigned i; - uint32_t impl_size, l1_size; + uint32_t impl_size, l1_size, queue_size; char queue_name[ODPH_TABLE_NAME_LEN + 2];
+ if (odp_queue_capability(&queue_capa)) { + ODPH_ERR("queue capa failed\n"); + return NULL; + } + + if (queue_capa.plain.max_size) { + if (queue_capa.plain.max_size < CACHE_NUM_TRIE || + queue_capa.plain.max_size < CACHE_NUM_SUBTREE) { + ODPH_ERR("queue size too small\n"); + return NULL; + } + } + + queue_size = CACHE_NUM_TRIE; + if (CACHE_NUM_SUBTREE > CACHE_NUM_TRIE) + queue_size = CACHE_NUM_SUBTREE; + /* Check for valid parameters */ if (strlen(name) == 0) { ODPH_DBG("invalid parameters\n"); @@ -502,6 +546,7 @@ odph_table_t odph_iplookup_table_create(const char *name,
odp_queue_param_init(&qparam); qparam.type = ODP_QUEUE_TYPE_PLAIN; + qparam.size = queue_size; sprintf(queue_name, "%s_%d", name, i); queue = odp_queue_create(queue_name, &qparam); if (queue == ODP_QUEUE_INVALID) {
-----------------------------------------------------------------------
Summary of changes: helper/iplookuptable.c | 53 ++++++- .../linux-generic/include/odp_config_internal.h | 5 +- platform/linux-generic/include/odp_pool_internal.h | 2 +- platform/linux-generic/include/odp_ring_internal.h | 52 +++++-- platform/linux-generic/odp_pool.c | 5 +- platform/linux-generic/odp_schedule_basic.c | 20 +-- test/performance/odp_sched_latency.c | 13 +- test/performance/odp_scheduling.c | 15 +- test/performance/odp_scheduling_run.sh | 9 +- test/validation/api/pool/pool.c | 163 ++++++++++++++++++++- 10 files changed, 291 insertions(+), 46 deletions(-)
hooks/post-receive